xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision d824159ff5364611bd5b48a02cf303d446ccc64c)
1/*
2 * Copyright (c) 2016-2017, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <arm32_macros.S>
30#include <arm.h>
31#include <asm.S>
32#include <keep.h>
33#include <kernel/abort.h>
34#include <kernel/thread_defs.h>
35#include <kernel/unwind.h>
36#include <sm/optee_smc.h>
37#include <sm/teesmc_opteed.h>
38#include <sm/teesmc_opteed_macros.h>
39
40	.section .text.thread_asm
41
42LOCAL_FUNC vector_std_smc_entry , :
43UNWIND(	.fnstart)
44UNWIND(	.cantunwind)
45	push	{r0-r7}
46	mov	r0, sp
47	bl	thread_handle_std_smc
48	/*
49	 * Normally thread_handle_std_smc() should return via
50	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
51	 * hasn't switched stack (error detected) it will do a normal "C"
52	 * return.
53	 */
54	pop	{r1-r8}
55	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
56	smc	#0
57	b	.	/* SMC should not return */
58UNWIND(	.fnend)
59END_FUNC vector_std_smc_entry
60
61LOCAL_FUNC vector_fast_smc_entry , :
62UNWIND(	.fnstart)
63UNWIND(	.cantunwind)
64	push	{r0-r7}
65	mov	r0, sp
66	bl	thread_handle_fast_smc
67	pop	{r1-r8}
68	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
69	smc	#0
70	b	.	/* SMC should not return */
71UNWIND(	.fnend)
72END_FUNC vector_fast_smc_entry
73
74LOCAL_FUNC vector_fiq_entry , :
75UNWIND(	.fnstart)
76UNWIND(	.cantunwind)
77 	/* Secure Monitor received a FIQ and passed control to us. */
78	bl	thread_check_canaries
79	ldr	lr, =thread_nintr_handler_ptr
80 	ldr	lr, [lr]
81 	blx	lr
82	mov	r1, r0
83	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
84	smc	#0
85	b	.	/* SMC should not return */
86UNWIND(	.fnend)
87END_FUNC vector_fiq_entry
88
89LOCAL_FUNC vector_cpu_on_entry , :
90UNWIND(	.fnstart)
91UNWIND(	.cantunwind)
92	ldr	lr, =thread_cpu_on_handler_ptr
93	ldr	lr, [lr]
94	blx	lr
95	mov	r1, r0
96	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
97	smc	#0
98	b	.	/* SMC should not return */
99UNWIND(	.fnend)
100END_FUNC vector_cpu_on_entry
101
102LOCAL_FUNC vector_cpu_off_entry , :
103UNWIND(	.fnstart)
104UNWIND(	.cantunwind)
105	ldr	lr, =thread_cpu_off_handler_ptr
106	ldr	lr, [lr]
107	blx	lr
108	mov	r1, r0
109	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
110	smc	#0
111	b	.	/* SMC should not return */
112UNWIND(	.fnend)
113END_FUNC vector_cpu_off_entry
114
115LOCAL_FUNC vector_cpu_suspend_entry , :
116UNWIND(	.fnstart)
117UNWIND(	.cantunwind)
118	ldr	lr, =thread_cpu_suspend_handler_ptr
119	ldr	lr, [lr]
120	blx	lr
121	mov	r1, r0
122	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
123	smc	#0
124	b	.	/* SMC should not return */
125UNWIND(	.fnend)
126END_FUNC vector_cpu_suspend_entry
127
128LOCAL_FUNC vector_cpu_resume_entry , :
129UNWIND(	.fnstart)
130UNWIND(	.cantunwind)
131	ldr	lr, =thread_cpu_resume_handler_ptr
132	ldr	lr, [lr]
133	blx	lr
134	mov	r1, r0
135	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
136	smc	#0
137	b	.	/* SMC should not return */
138UNWIND(	.fnend)
139END_FUNC vector_cpu_resume_entry
140
141LOCAL_FUNC vector_system_off_entry , :
142UNWIND(	.fnstart)
143UNWIND(	.cantunwind)
144	ldr	lr, =thread_system_off_handler_ptr
145	ldr	lr, [lr]
146	blx	lr
147	mov	r1, r0
148	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
149	smc	#0
150	b	.	/* SMC should not return */
151UNWIND(	.fnend)
152END_FUNC vector_system_off_entry
153
154LOCAL_FUNC vector_system_reset_entry , :
155UNWIND(	.fnstart)
156UNWIND(	.cantunwind)
157	ldr	lr, =thread_system_reset_handler_ptr
158	ldr	lr, [lr]
159	blx	lr
160	mov	r1, r0
161	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
162	smc	#0
163	b	.	/* SMC should not return */
164UNWIND(	.fnend)
165END_FUNC vector_system_reset_entry
166
167/*
168 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
169 * initialization.  Also used when compiled with the internal monitor, but
170 * the cpu_*_entry and system_*_entry are not used then.
171 *
172 * Note that ARM-TF depends on the layout of this vector table, any change
173 * in layout has to be synced with ARM-TF.
174 */
175FUNC thread_vector_table , :
176UNWIND(	.fnstart)
177UNWIND(	.cantunwind)
178	b	vector_std_smc_entry
179	b	vector_fast_smc_entry
180	b	vector_cpu_on_entry
181	b	vector_cpu_off_entry
182	b	vector_cpu_resume_entry
183	b	vector_cpu_suspend_entry
184	b	vector_fiq_entry
185	b	vector_system_off_entry
186	b	vector_system_reset_entry
187UNWIND(	.fnend)
188END_FUNC thread_vector_table
189
190FUNC thread_set_abt_sp , :
191UNWIND(	.fnstart)
192UNWIND(	.cantunwind)
193	mrs	r1, cpsr
194	cps	#CPSR_MODE_ABT
195	mov	sp, r0
196	msr	cpsr, r1
197	bx	lr
198UNWIND(	.fnend)
199END_FUNC thread_set_abt_sp
200
201FUNC thread_set_irq_sp , :
202UNWIND(	.fnstart)
203UNWIND(	.cantunwind)
204	mrs	r1, cpsr
205	cps	#CPSR_MODE_IRQ
206	mov	sp, r0
207	msr	cpsr, r1
208	bx	lr
209UNWIND(	.fnend)
210END_FUNC thread_set_irq_sp
211
212FUNC thread_set_fiq_sp , :
213UNWIND(	.fnstart)
214UNWIND(	.cantunwind)
215	mrs	r1, cpsr
216	cps	#CPSR_MODE_FIQ
217	mov	sp, r0
218	msr	cpsr, r1
219	bx	lr
220UNWIND(	.fnend)
221END_FUNC thread_set_fiq_sp
222
223/* void thread_resume(struct thread_ctx_regs *regs) */
224FUNC thread_resume , :
225UNWIND(	.fnstart)
226UNWIND(	.cantunwind)
227	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
228
229	cps	#CPSR_MODE_SYS
230	ldm	r12!, {sp, lr}
231
232	cps	#CPSR_MODE_SVC
233	ldm	r12!, {r1, sp, lr}
234	msr	spsr_fsxc, r1
235
236	cps	#CPSR_MODE_SVC
237	ldm	r12, {r1, r2}
238	push	{r1, r2}
239
240	ldm	r0, {r0-r12}
241
242	/* Restore CPSR and jump to the instruction to resume at */
243	rfefd	sp!
244UNWIND(	.fnend)
245END_FUNC thread_resume
246
247/*
248 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
249 * the banked r8-r12 registers, returns original CPSR.
250 */
251LOCAL_FUNC thread_save_state_fiq , :
252UNWIND(	.fnstart)
253UNWIND(	.cantunwind)
254	mov	r9, lr
255
256	/*
257	 * Uses stack for temporary storage, while storing needed
258	 * context in the thread context struct.
259	 */
260
261	mrs	r8, cpsr
262
263	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
264
265	push	{r4-r7}
266	push	{r0-r3}
267
268	mrs	r6, cpsr		/* Save current CPSR */
269
270	bl	thread_get_ctx_regs
271
272	pop	{r1-r4}			/* r0-r3 pushed above */
273	stm	r0!, {r1-r4}
274	pop	{r1-r4}			/* r4-r7 pushed above */
275	stm	r0!, {r1-r4}
276
277	cps     #CPSR_MODE_SYS
278	stm	r0!, {r8-r12}
279	stm     r0!, {sp, lr}
280
281	cps     #CPSR_MODE_SVC
282	mrs     r1, spsr
283	stm     r0!, {r1, sp, lr}
284
285	/* back to fiq mode */
286	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
287	msr	cpsr, r6		/* Restore mode */
288
289	mov	r0, r8			/* Return original CPSR */
290	bx	r9
291UNWIND(	.fnend)
292END_FUNC thread_save_state_fiq
293
294/*
295 * Disables IRQ and FIQ and saves state of thread, returns original
296 * CPSR.
297 */
298LOCAL_FUNC thread_save_state , :
299UNWIND(	.fnstart)
300UNWIND(	.cantunwind)
301	push	{r12, lr}
302	/*
303	 * Uses stack for temporary storage, while storing needed
304	 * context in the thread context struct.
305	 */
306
307	mrs	r12, cpsr
308
309	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
310
311	push	{r4-r7}
312	push	{r0-r3}
313
314	mov	r5, r12			/* Save CPSR in a preserved register */
315	mrs	r6, cpsr		/* Save current CPSR */
316
317	bl	thread_get_ctx_regs
318
319	pop	{r1-r4}			/* r0-r3 pushed above */
320	stm	r0!, {r1-r4}
321	pop	{r1-r4}			/* r4-r7 pushed above */
322	stm	r0!, {r1-r4}
323	stm	r0!, {r8-r11}
324
325	pop	{r12, lr}
326	stm	r0!, {r12}
327
328        cps     #CPSR_MODE_SYS
329        stm     r0!, {sp, lr}
330
331        cps     #CPSR_MODE_SVC
332        mrs     r1, spsr
333        stm     r0!, {r1, sp, lr}
334
335	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
336	msr	cpsr, r6		/* Restore mode */
337
338	mov	r0, r5			/* Return original CPSR */
339	bx	lr
340UNWIND(	.fnend)
341END_FUNC thread_save_state
342
343FUNC thread_std_smc_entry , :
344UNWIND(	.fnstart)
345UNWIND(	.cantunwind)
346	/* Pass r0-r7 in a struct thread_smc_args */
347	push	{r0-r7}
348	mov	r0, sp
349	bl	__thread_std_smc_entry
350	/*
351	 * Load the returned r0-r3 into preserved registers and skip the
352	 * "returned" r4-r7 since they will not be returned to normal
353	 * world.
354	 */
355	pop	{r4-r7}
356	add	sp, #(4 * 4)
357
358	/* Disable interrupts before switching to temporary stack */
359	cpsid	aif
360	bl	thread_get_tmp_sp
361	mov	sp, r0
362
363	bl	thread_state_free
364
365	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
366	mov	r1, r4
367	mov	r2, r5
368	mov	r3, r6
369	mov	r4, r7
370	smc	#0
371	b	.	/* SMC should not return */
372UNWIND(	.fnend)
373END_FUNC thread_std_smc_entry
374
375
376/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
377FUNC thread_rpc , :
378/*
379 * r0-r2 are used to pass parameters to normal world
380 * r0-r5 are used to pass return vaule back from normal world
381 *
382 * note that r3 is used to pass "resume information", that is, which
383 * thread it is that should resume.
384 *
385 * Since the this function is following AAPCS we need to preserve r4-r5
386 * which are otherwise modified when returning back from normal world.
387 */
388UNWIND(	.fnstart)
389	push	{r4-r5, lr}
390UNWIND(	.save	{r4-r5, lr})
391	push	{r0}
392UNWIND(	.save	{r0})
393
394	bl	thread_save_state
395	mov	r4, r0			/* Save original CPSR */
396
397	/*
398 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
399	 */
400	bl	thread_get_tmp_sp
401	ldr	r5, [sp]		/* Get pointer to rv[] */
402	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
403	mov	sp, r0			/* Switch to tmp stack */
404
405	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
406	mov	r1, r4			/* CPSR to restore */
407	ldr	r2, =.thread_rpc_return
408	bl	thread_state_suspend
409	mov	r4, r0			/* Supply thread index */
410	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
411	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
412	smc	#0
413	b	.	/* SMC should not return */
414
415.thread_rpc_return:
416	/*
417	 * At this point has the stack pointer been restored to the value
418	 * it had when thread_save_state() was called above.
419	 *
420	 * Jumps here from thread_resume above when RPC has returned. The
421	 * IRQ and FIQ bits are restored to what they where when this
422	 * function was originally entered.
423	 */
424	pop	{r12}			/* Get pointer to rv[] */
425	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
426	pop	{r4-r5, pc}
427UNWIND(	.fnend)
428END_FUNC thread_rpc
429
430/* The handler of native interrupt. */
431.macro	native_intr_handler mode:req
432	/*
433	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
434	 * address
435	 */
436	sub     lr, lr, #4
437
438	/*
439	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
440	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
441	 * because the secure monitor doesn't save those. The treatment of
442	 * the banked fiq registers is somewhat analogous to the lazy save
443	 * of VFP registers.
444	 */
445	.ifc	\mode\(),fiq
446	push	{r0-r3, r8-r12, lr}
447	.else
448	push	{r0-r3, r12, lr}
449	.endif
450	bl	thread_check_canaries
451	ldr	lr, =thread_nintr_handler_ptr
452	ldr	lr, [lr]
453	blx	lr
454	.ifc	\mode\(),fiq
455	pop	{r0-r3, r8-r12, lr}
456	.else
457	pop	{r0-r3, r12, lr}
458	.endif
459	movs	pc, lr
460.endm
461
462/* The handler of foreign interrupt. */
463.macro foreign_intr_handler mode:req
464	.ifc	\mode\(),irq
465	/*
466	 * Disable FIQ if the foreign interrupt is sent as IRQ.
467	 * IRQ mode is set up to use tmp stack so FIQ has to be
468	 * disabled before touching the stack. We can also assign
469	 * SVC sp from IRQ sp to get SVC mode into the state we
470	 * need when doing the SMC below.
471	 * If it is sent as FIQ, the IRQ has already been masked by hardware
472	 */
473	cpsid	f
474	.endif
475	sub	lr, lr, #4
476	push	{lr}
477	push	{r12}
478
479	.ifc	\mode\(),fiq
480	bl	thread_save_state_fiq
481	.else
482	bl	thread_save_state
483	.endif
484
485	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
486	mrs	r1, spsr
487	pop	{r12}
488	pop	{r2}
489	blx	thread_state_suspend
490	mov	r4, r0		/* Supply thread index */
491
492	/*
493	 * Switch to SVC mode and copy current stack pointer as it already
494	 * is the tmp stack.
495	 */
496	mov	r0, sp
497	cps	#CPSR_MODE_SVC
498	mov	sp, r0
499
500	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
501	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
502	mov	r2, #0
503	mov	r3, #0
504	/* r4 is already filled in above */
505	smc	#0
506	b	.	/* SMC should not return */
507.endm
508
509LOCAL_FUNC thread_fiq_handler , :
510UNWIND(	.fnstart)
511UNWIND(	.cantunwind)
512#if defined(CFG_ARM_GICV3)
513	foreign_intr_handler	fiq
514#else
515	native_intr_handler	fiq
516#endif
517UNWIND(	.fnend)
518END_FUNC thread_fiq_handler
519
520LOCAL_FUNC thread_irq_handler , :
521UNWIND(	.fnstart)
522UNWIND(	.cantunwind)
523#if defined(CFG_ARM_GICV3)
524	native_intr_handler	irq
525#else
526	foreign_intr_handler	irq
527#endif
528UNWIND(	.fnend)
529END_FUNC thread_irq_handler
530
531FUNC thread_init_vbar , :
532UNWIND(	.fnstart)
533	/* Set vector (VBAR) */
534	ldr	r0, =thread_vect_table
535	write_vbar r0
536	bx	lr
537UNWIND(	.fnend)
538END_FUNC thread_init_vbar
539KEEP_PAGER thread_init_vbar
540
541/*
542 * Below are low level routines handling entry and return from user mode.
543 *
544 * thread_enter_user_mode() saves all that registers user mode can change
545 * so kernel mode can restore needed registers when resuming execution
546 * after the call to thread_enter_user_mode() has returned.
547 * thread_enter_user_mode() doesn't return directly since it enters user
548 * mode instead, it's thread_unwind_user_mode() that does the
549 * returning by restoring the registers saved by thread_enter_user_mode().
550 *
551 * There's three ways for thread_enter_user_mode() to return to caller,
552 * user TA calls utee_return, user TA calls utee_panic or through an abort.
553 *
554 * Calls to utee_return or utee_panic are handled as:
555 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
556 * calls syscall_return() or syscall_panic().
557 *
558 * These function calls returns normally except thread_svc_handler() which
559 * which is an exception handling routine so it reads return address and
560 * SPSR to restore from the stack. syscall_return() and syscall_panic()
561 * changes return address and SPSR used by thread_svc_handler() to instead of
562 * returning into user mode as with other syscalls it returns into
563 * thread_unwind_user_mode() in kernel mode instead.  When
564 * thread_svc_handler() returns the stack pointer at the point where
565 * thread_enter_user_mode() left it so this is where
566 * thread_unwind_user_mode() can operate.
567 *
568 * Aborts are handled in a similar way but by thread_abort_handler()
569 * instead, when the pager sees that it's an abort from user mode that
570 * can't be handled it updates SPSR and return address used by
571 * thread_abort_handler() to return into thread_unwind_user_mode()
572 * instead.
573 */
574
575/*
576 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
577 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
578 *               unsigned long user_func, unsigned long spsr,
579 *               uint32_t *exit_status0, uint32_t *exit_status1)
580 *
581 */
582FUNC __thread_enter_user_mode , :
583UNWIND(	.fnstart)
584UNWIND(	.cantunwind)
585	/*
586	 * Save all registers to allow syscall_return() to resume execution
587	 * as if this function would have returned. This is also used in
588	 * syscall_panic().
589	 *
590	 * If stack usage of this function is changed
591	 * thread_unwind_user_mode() has to be updated.
592	 */
593	push    {r4-r12,lr}
594
595	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
596	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
597	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
598
599	/*
600	 * Set the saved Processors Status Register to user mode to allow
601	 * entry of user mode through movs below.
602	 */
603	msr     spsr_cxsf, r6
604
605	/*
606	 * Save old user sp and set new user sp.
607	 */
608	cps	#CPSR_MODE_SYS
609	mov	r6, sp
610	mov     sp, r4
611	cps	#CPSR_MODE_SVC
612	push	{r6,r7}
613
614	/*
615	* Don't allow return from this function, return is done through
616	* thread_unwind_user_mode() below.
617	*/
618	mov     lr, #0
619	/* Call the user function with its arguments */
620	movs    pc, r5
621UNWIND(	.fnend)
622END_FUNC __thread_enter_user_mode
623
624/*
625 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
626 *              uint32_t exit_status1);
627 * See description in thread.h
628 */
629FUNC thread_unwind_user_mode , :
630UNWIND(	.fnstart)
631UNWIND(	.cantunwind)
632	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
633	str	r1, [ip]
634	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
635	str	r2, [ip]
636
637	/* Restore old user sp */
638	pop	{r4,r7}
639	cps	#CPSR_MODE_SYS
640	mov	sp, r4
641	cps	#CPSR_MODE_SVC
642
643	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
644UNWIND(	.fnend)
645END_FUNC thread_unwind_user_mode
646
647LOCAL_FUNC thread_abort_handler , :
648thread_abort_handler:
649thread_und_handler:
650UNWIND(	.fnstart)
651UNWIND(	.cantunwind)
652	/*
653	 * Switch to abort mode to use that stack instead.
654	 */
655	cps	#CPSR_MODE_ABT
656	push	{r0-r11, ip}
657	cps	#CPSR_MODE_UND
658	mrs	r0, spsr
659	tst	r0, #CPSR_T
660	subne	r1, lr, #2
661	subeq	r1, lr, #4
662	cps	#CPSR_MODE_ABT
663	push	{r0, r1}
664	msr	spsr_fsxc, r0	/* In case some code reads spsr directly */
665	mov	r0, #ABORT_TYPE_UNDEF
666	b	.thread_abort_generic
667
668thread_dabort_handler:
669	push	{r0-r11, ip}
670	sub	r1, lr, #8
671	mrs	r0, spsr
672	push	{r0, r1}
673	mov	r0, #ABORT_TYPE_DATA
674	b	.thread_abort_generic
675
676thread_pabort_handler:
677	push	{r0-r11, ip}
678	sub	r1, lr, #4
679	mrs	r0, spsr
680	push	{r0, r1}
681	mov	r0, #ABORT_TYPE_PREFETCH
682	b	.thread_abort_generic
683
684.thread_abort_generic:
685	cps	#CPSR_MODE_SYS
686	mov	r1, sp
687	mov	r2, lr
688	cps	#CPSR_MODE_ABT
689	push	{r1-r3}
690	mov	r1, sp
691	bl	abort_handler
692	pop	{r1-r3}
693	cps	#CPSR_MODE_SYS
694	mov	sp, r1
695	mov	lr, r2
696	cps	#CPSR_MODE_ABT
697	pop	{r0, r1}
698	mov	lr, r1
699	msr	spsr_fsxc, r0
700	pop	{r0-r11, ip}
701	movs	pc, lr
702UNWIND(	.fnend)
703END_FUNC thread_abort_handler
704
705LOCAL_FUNC thread_svc_handler , :
706UNWIND(	.fnstart)
707UNWIND(	.cantunwind)
708	push	{r0-r7, lr}
709	mrs	r0, spsr
710	push	{r0}
711	mov	r0, sp
712	bl	tee_svc_handler
713	pop	{r0}
714	msr	spsr_fsxc, r0
715	pop	{r0-r7, lr}
716	movs	pc, lr
717UNWIND(	.fnend)
718END_FUNC thread_svc_handler
719
720        .align	5
721LOCAL_FUNC thread_vect_table , :
722UNWIND(	.fnstart)
723UNWIND(	.cantunwind)
724	b	.			/* Reset			*/
725	b	thread_und_handler	/* Undefined instruction	*/
726	b	thread_svc_handler	/* System call			*/
727	b	thread_pabort_handler	/* Prefetch abort		*/
728	b	thread_dabort_handler	/* Data abort			*/
729	b	.			/* Reserved			*/
730	b	thread_irq_handler	/* IRQ				*/
731	b	thread_fiq_handler	/* FIQ				*/
732UNWIND(	.fnend)
733END_FUNC thread_vect_table
734