xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 63988d7cb5d649265ec68a2887296e63503da579)
1/*
2 * Copyright (c) 2016-2017, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <asm.S>
30#include <arm.h>
31#include <arm32_macros.S>
32#include <sm/optee_smc.h>
33#include <sm/teesmc_opteed_macros.h>
34#include <sm/teesmc_opteed.h>
35#include <kernel/abort.h>
36#include <kernel/thread_defs.h>
37#include <kernel/unwind.h>
38
39	.section .text.thread_asm
40
41LOCAL_FUNC vector_std_smc_entry , :
42UNWIND(	.fnstart)
43UNWIND(	.cantunwind)
44	push	{r0-r7}
45	mov	r0, sp
46	bl	thread_handle_std_smc
47	/*
48	 * Normally thread_handle_std_smc() should return via
49	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
50	 * hasn't switched stack (error detected) it will do a normal "C"
51	 * return.
52	 */
53	pop	{r1-r8}
54	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
55	smc	#0
56	b	.	/* SMC should not return */
57UNWIND(	.fnend)
58END_FUNC vector_std_smc_entry
59
60LOCAL_FUNC vector_fast_smc_entry , :
61UNWIND(	.fnstart)
62UNWIND(	.cantunwind)
63	push	{r0-r7}
64	mov	r0, sp
65	bl	thread_handle_fast_smc
66	pop	{r1-r8}
67	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
68	smc	#0
69	b	.	/* SMC should not return */
70UNWIND(	.fnend)
71END_FUNC vector_fast_smc_entry
72
73LOCAL_FUNC vector_fiq_entry , :
74UNWIND(	.fnstart)
75UNWIND(	.cantunwind)
76 	/* Secure Monitor received a FIQ and passed control to us. */
77	bl	thread_check_canaries
78	ldr	lr, =thread_nintr_handler_ptr
79 	ldr	lr, [lr]
80 	blx	lr
81	mov	r1, r0
82	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
83	smc	#0
84	b	.	/* SMC should not return */
85UNWIND(	.fnend)
86END_FUNC vector_fiq_entry
87
88LOCAL_FUNC vector_cpu_on_entry , :
89UNWIND(	.fnstart)
90UNWIND(	.cantunwind)
91	ldr	lr, =thread_cpu_on_handler_ptr
92	ldr	lr, [lr]
93	blx	lr
94	mov	r1, r0
95	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
96	smc	#0
97	b	.	/* SMC should not return */
98UNWIND(	.fnend)
99END_FUNC vector_cpu_on_entry
100
101LOCAL_FUNC vector_cpu_off_entry , :
102UNWIND(	.fnstart)
103UNWIND(	.cantunwind)
104	ldr	lr, =thread_cpu_off_handler_ptr
105	ldr	lr, [lr]
106	blx	lr
107	mov	r1, r0
108	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
109	smc	#0
110	b	.	/* SMC should not return */
111UNWIND(	.fnend)
112END_FUNC vector_cpu_off_entry
113
114LOCAL_FUNC vector_cpu_suspend_entry , :
115UNWIND(	.fnstart)
116UNWIND(	.cantunwind)
117	ldr	lr, =thread_cpu_suspend_handler_ptr
118	ldr	lr, [lr]
119	blx	lr
120	mov	r1, r0
121	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
122	smc	#0
123	b	.	/* SMC should not return */
124UNWIND(	.fnend)
125END_FUNC vector_cpu_suspend_entry
126
127LOCAL_FUNC vector_cpu_resume_entry , :
128UNWIND(	.fnstart)
129UNWIND(	.cantunwind)
130	ldr	lr, =thread_cpu_resume_handler_ptr
131	ldr	lr, [lr]
132	blx	lr
133	mov	r1, r0
134	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
135	smc	#0
136	b	.	/* SMC should not return */
137UNWIND(	.fnend)
138END_FUNC vector_cpu_resume_entry
139
140LOCAL_FUNC vector_system_off_entry , :
141UNWIND(	.fnstart)
142UNWIND(	.cantunwind)
143	ldr	lr, =thread_system_off_handler_ptr
144	ldr	lr, [lr]
145	blx	lr
146	mov	r1, r0
147	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
148	smc	#0
149	b	.	/* SMC should not return */
150UNWIND(	.fnend)
151END_FUNC vector_system_off_entry
152
153LOCAL_FUNC vector_system_reset_entry , :
154UNWIND(	.fnstart)
155UNWIND(	.cantunwind)
156	ldr	lr, =thread_system_reset_handler_ptr
157	ldr	lr, [lr]
158	blx	lr
159	mov	r1, r0
160	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
161	smc	#0
162	b	.	/* SMC should not return */
163UNWIND(	.fnend)
164END_FUNC vector_system_reset_entry
165
166/*
167 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
168 * initialization.  Also used when compiled with the internal monitor, but
169 * the cpu_*_entry and system_*_entry are not used then.
170 *
171 * Note that ARM-TF depends on the layout of this vector table, any change
172 * in layout has to be synced with ARM-TF.
173 */
174FUNC thread_vector_table , :
175UNWIND(	.fnstart)
176UNWIND(	.cantunwind)
177	b	vector_std_smc_entry
178	b	vector_fast_smc_entry
179	b	vector_cpu_on_entry
180	b	vector_cpu_off_entry
181	b	vector_cpu_resume_entry
182	b	vector_cpu_suspend_entry
183	b	vector_fiq_entry
184	b	vector_system_off_entry
185	b	vector_system_reset_entry
186UNWIND(	.fnend)
187END_FUNC thread_vector_table
188
189FUNC thread_set_abt_sp , :
190UNWIND(	.fnstart)
191UNWIND(	.cantunwind)
192	mrs	r1, cpsr
193	cps	#CPSR_MODE_ABT
194	mov	sp, r0
195	msr	cpsr, r1
196	bx	lr
197UNWIND(	.fnend)
198END_FUNC thread_set_abt_sp
199
200FUNC thread_set_irq_sp , :
201UNWIND(	.fnstart)
202UNWIND(	.cantunwind)
203	mrs	r1, cpsr
204	cps	#CPSR_MODE_IRQ
205	mov	sp, r0
206	msr	cpsr, r1
207	bx	lr
208UNWIND(	.fnend)
209END_FUNC thread_set_irq_sp
210
211FUNC thread_set_fiq_sp , :
212UNWIND(	.fnstart)
213UNWIND(	.cantunwind)
214	mrs	r1, cpsr
215	cps	#CPSR_MODE_FIQ
216	mov	sp, r0
217	msr	cpsr, r1
218	bx	lr
219UNWIND(	.fnend)
220END_FUNC thread_set_fiq_sp
221
222/* void thread_resume(struct thread_ctx_regs *regs) */
223FUNC thread_resume , :
224UNWIND(	.fnstart)
225UNWIND(	.cantunwind)
226	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
227
228	cps	#CPSR_MODE_SYS
229	ldm	r12!, {sp, lr}
230
231	cps	#CPSR_MODE_SVC
232	ldm	r12!, {r1, sp, lr}
233	msr	spsr_fsxc, r1
234
235	cps	#CPSR_MODE_SVC
236	ldm	r12, {r1, r2}
237	push	{r1, r2}
238
239	ldm	r0, {r0-r12}
240
241	/* Restore CPSR and jump to the instruction to resume at */
242	rfefd	sp!
243UNWIND(	.fnend)
244END_FUNC thread_resume
245
246/*
247 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
248 * the banked r8-r12 registers, returns original CPSR.
249 */
250LOCAL_FUNC thread_save_state_fiq , :
251UNWIND(	.fnstart)
252UNWIND(	.cantunwind)
253	mov	r9, lr
254
255	/*
256	 * Uses stack for temporary storage, while storing needed
257	 * context in the thread context struct.
258	 */
259
260	mrs	r8, cpsr
261
262	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
263
264	push	{r4-r7}
265	push	{r0-r3}
266
267	mrs	r6, cpsr		/* Save current CPSR */
268
269	bl	thread_get_ctx_regs
270
271	pop	{r1-r4}			/* r0-r3 pushed above */
272	stm	r0!, {r1-r4}
273	pop	{r1-r4}			/* r4-r7 pushed above */
274	stm	r0!, {r1-r4}
275
276	cps     #CPSR_MODE_SYS
277	stm	r0!, {r8-r12}
278	stm     r0!, {sp, lr}
279
280	cps     #CPSR_MODE_SVC
281	mrs     r1, spsr
282	stm     r0!, {r1, sp, lr}
283
284	/* back to fiq mode */
285	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
286	msr	cpsr, r6		/* Restore mode */
287
288	mov	r0, r8			/* Return original CPSR */
289	bx	r9
290UNWIND(	.fnend)
291END_FUNC thread_save_state_fiq
292
293/*
294 * Disables IRQ and FIQ and saves state of thread, returns original
295 * CPSR.
296 */
297LOCAL_FUNC thread_save_state , :
298UNWIND(	.fnstart)
299UNWIND(	.cantunwind)
300	push	{r12, lr}
301	/*
302	 * Uses stack for temporary storage, while storing needed
303	 * context in the thread context struct.
304	 */
305
306	mrs	r12, cpsr
307
308	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
309
310	push	{r4-r7}
311	push	{r0-r3}
312
313	mov	r5, r12			/* Save CPSR in a preserved register */
314	mrs	r6, cpsr		/* Save current CPSR */
315
316	bl	thread_get_ctx_regs
317
318	pop	{r1-r4}			/* r0-r3 pushed above */
319	stm	r0!, {r1-r4}
320	pop	{r1-r4}			/* r4-r7 pushed above */
321	stm	r0!, {r1-r4}
322	stm	r0!, {r8-r11}
323
324	pop	{r12, lr}
325	stm	r0!, {r12}
326
327        cps     #CPSR_MODE_SYS
328        stm     r0!, {sp, lr}
329
330        cps     #CPSR_MODE_SVC
331        mrs     r1, spsr
332        stm     r0!, {r1, sp, lr}
333
334	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
335	msr	cpsr, r6		/* Restore mode */
336
337	mov	r0, r5			/* Return original CPSR */
338	bx	lr
339UNWIND(	.fnend)
340END_FUNC thread_save_state
341
342FUNC thread_std_smc_entry , :
343UNWIND(	.fnstart)
344UNWIND(	.cantunwind)
345	/* Pass r0-r7 in a struct thread_smc_args */
346	push	{r0-r7}
347	mov	r0, sp
348	bl	__thread_std_smc_entry
349	/*
350	 * Load the returned r0-r3 into preserved registers and skip the
351	 * "returned" r4-r7 since they will not be returned to normal
352	 * world.
353	 */
354	pop	{r4-r7}
355	add	sp, #(4 * 4)
356
357	/* Disable interrupts before switching to temporary stack */
358	cpsid	aif
359	bl	thread_get_tmp_sp
360	mov	sp, r0
361
362	bl	thread_state_free
363
364	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
365	mov	r1, r4
366	mov	r2, r5
367	mov	r3, r6
368	mov	r4, r7
369	smc	#0
370	b	.	/* SMC should not return */
371UNWIND(	.fnend)
372END_FUNC thread_std_smc_entry
373
374
375/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
376FUNC thread_rpc , :
377/*
378 * r0-r2 are used to pass parameters to normal world
379 * r0-r5 are used to pass return vaule back from normal world
380 *
381 * note that r3 is used to pass "resume information", that is, which
382 * thread it is that should resume.
383 *
384 * Since the this function is following AAPCS we need to preserve r4-r5
385 * which are otherwise modified when returning back from normal world.
386 */
387UNWIND(	.fnstart)
388	push	{r4-r5, lr}
389UNWIND(	.save	{r4-r5, lr})
390	push	{r0}
391UNWIND(	.save	{r0})
392
393	bl	thread_save_state
394	mov	r4, r0			/* Save original CPSR */
395
396	/*
397 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
398	 */
399	bl	thread_get_tmp_sp
400	ldr	r5, [sp]		/* Get pointer to rv[] */
401	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
402	mov	sp, r0			/* Switch to tmp stack */
403
404	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
405	mov	r1, r4			/* CPSR to restore */
406	ldr	r2, =.thread_rpc_return
407	bl	thread_state_suspend
408	mov	r4, r0			/* Supply thread index */
409	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
410	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
411	smc	#0
412	b	.	/* SMC should not return */
413
414.thread_rpc_return:
415	/*
416	 * At this point has the stack pointer been restored to the value
417	 * it had when thread_save_state() was called above.
418	 *
419	 * Jumps here from thread_resume above when RPC has returned. The
420	 * IRQ and FIQ bits are restored to what they where when this
421	 * function was originally entered.
422	 */
423	pop	{r12}			/* Get pointer to rv[] */
424	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
425	pop	{r4-r5, pc}
426UNWIND(	.fnend)
427END_FUNC thread_rpc
428
429/* The handler of native interrupt. */
430.macro	native_intr_handler mode:req
431	/*
432	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
433	 * address
434	 */
435	sub     lr, lr, #4
436
437	/*
438	 * We're saving {r0-r3}. The banked fiq registers {r8-r12} need to be
439	 * saved if the native interrupt is sent as FIQ because the secure
440	 * monitor doesn't save those. The treatment of the banked fiq
441	 * registers is somewhat analogous to the lazy save of VFP registers.
442	 */
443	.ifc	\mode\(),fiq
444	push	{r0-r3, r8-r12, lr}
445	.else
446	push	{r0-r3, lr}
447	.endif
448	bl	thread_check_canaries
449	ldr	lr, =thread_nintr_handler_ptr
450	ldr	lr, [lr]
451	blx	lr
452	.ifc	\mode\(),fiq
453	pop	{r0-r3, r8-r12, lr}
454	.else
455	pop	{r0-r3, lr}
456	.endif
457	movs	pc, lr
458.endm
459
460/* The handler of foreign interrupt. */
461.macro foreign_intr_handler mode:req
462	.ifc	\mode\(),irq
463	/*
464	 * Disable FIQ if the foreign interrupt is sent as IRQ.
465	 * IRQ mode is set up to use tmp stack so FIQ has to be
466	 * disabled before touching the stack. We can also assign
467	 * SVC sp from IRQ sp to get SVC mode into the state we
468	 * need when doing the SMC below.
469	 * If it is sent as FIQ, the IRQ has already been masked by hardware
470	 */
471	cpsid	f
472	.endif
473	sub	lr, lr, #4
474	push	{lr}
475	push	{r12}
476
477	.ifc	\mode\(),fiq
478	bl	thread_save_state_fiq
479	.else
480	bl	thread_save_state
481	.endif
482
483	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
484	mrs	r1, spsr
485	pop	{r12}
486	pop	{r2}
487	blx	thread_state_suspend
488	mov	r4, r0		/* Supply thread index */
489
490	/*
491	 * Switch to SVC mode and copy current stack pointer as it already
492	 * is the tmp stack.
493	 */
494	mov	r0, sp
495	cps	#CPSR_MODE_SVC
496	mov	sp, r0
497
498	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
499	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
500	mov	r2, #0
501	mov	r3, #0
502	/* r4 is already filled in above */
503	smc	#0
504	b	.	/* SMC should not return */
505.endm
506
507LOCAL_FUNC thread_fiq_handler , :
508UNWIND(	.fnstart)
509UNWIND(	.cantunwind)
510#if defined(CFG_ARM_GICV3)
511	foreign_intr_handler	fiq
512#else
513	native_intr_handler	fiq
514#endif
515UNWIND(	.fnend)
516END_FUNC thread_fiq_handler
517
518LOCAL_FUNC thread_irq_handler , :
519UNWIND(	.fnstart)
520UNWIND(	.cantunwind)
521#if defined(CFG_ARM_GICV3)
522	native_intr_handler	irq
523#else
524	foreign_intr_handler	irq
525#endif
526UNWIND(	.fnend)
527END_FUNC thread_irq_handler
528
529FUNC thread_init_vbar , :
530UNWIND(	.fnstart)
531	/* Set vector (VBAR) */
532	ldr	r0, =thread_vect_table
533	write_vbar r0
534	bx	lr
535UNWIND(	.fnend)
536END_FUNC thread_init_vbar
537
538/*
539 * Below are low level routines handling entry and return from user mode.
540 *
541 * thread_enter_user_mode() saves all that registers user mode can change
542 * so kernel mode can restore needed registers when resuming execution
543 * after the call to thread_enter_user_mode() has returned.
544 * thread_enter_user_mode() doesn't return directly since it enters user
545 * mode instead, it's thread_unwind_user_mode() that does the
546 * returning by restoring the registers saved by thread_enter_user_mode().
547 *
548 * There's three ways for thread_enter_user_mode() to return to caller,
549 * user TA calls utee_return, user TA calls utee_panic or through an abort.
550 *
551 * Calls to utee_return or utee_panic are handled as:
552 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
553 * calls syscall_return() or syscall_panic().
554 *
555 * These function calls returns normally except thread_svc_handler() which
556 * which is an exception handling routine so it reads return address and
557 * SPSR to restore from the stack. syscall_return() and syscall_panic()
558 * changes return address and SPSR used by thread_svc_handler() to instead of
559 * returning into user mode as with other syscalls it returns into
560 * thread_unwind_user_mode() in kernel mode instead.  When
561 * thread_svc_handler() returns the stack pointer at the point where
562 * thread_enter_user_mode() left it so this is where
563 * thread_unwind_user_mode() can operate.
564 *
565 * Aborts are handled in a similar way but by thread_abort_handler()
566 * instead, when the pager sees that it's an abort from user mode that
567 * can't be handled it updates SPSR and return address used by
568 * thread_abort_handler() to return into thread_unwind_user_mode()
569 * instead.
570 */
571
572/*
573 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
574 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
575 *               unsigned long user_func, unsigned long spsr,
576 *               uint32_t *exit_status0, uint32_t *exit_status1)
577 *
578 */
579FUNC __thread_enter_user_mode , :
580UNWIND(	.fnstart)
581UNWIND(	.cantunwind)
582	/*
583	 * Save all registers to allow syscall_return() to resume execution
584	 * as if this function would have returned. This is also used in
585	 * syscall_panic().
586	 *
587	 * If stack usage of this function is changed
588	 * thread_unwind_user_mode() has to be updated.
589	 */
590	push    {r4-r12,lr}
591
592	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
593	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
594	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
595
596	/*
597	 * Set the saved Processors Status Register to user mode to allow
598	 * entry of user mode through movs below.
599	 */
600	msr     spsr_cxsf, r6
601
602	/*
603	 * Save old user sp and set new user sp.
604	 */
605	cps	#CPSR_MODE_SYS
606	mov	r6, sp
607	mov     sp, r4
608	cps	#CPSR_MODE_SVC
609	push	{r6,r7}
610
611	/*
612	* Don't allow return from this function, return is done through
613	* thread_unwind_user_mode() below.
614	*/
615	mov     lr, #0
616	/* Call the user function with its arguments */
617	movs    pc, r5
618UNWIND(	.fnend)
619END_FUNC __thread_enter_user_mode
620
621/*
622 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
623 *              uint32_t exit_status1);
624 * See description in thread.h
625 */
626FUNC thread_unwind_user_mode , :
627UNWIND(	.fnstart)
628UNWIND(	.cantunwind)
629	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
630	str	r1, [ip]
631	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
632	str	r2, [ip]
633
634	/* Restore old user sp */
635	pop	{r4,r7}
636	cps	#CPSR_MODE_SYS
637	mov	sp, r4
638	cps	#CPSR_MODE_SVC
639
640	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
641UNWIND(	.fnend)
642END_FUNC thread_unwind_user_mode
643
644LOCAL_FUNC thread_abort_handler , :
645thread_abort_handler:
646thread_und_handler:
647UNWIND(	.fnstart)
648UNWIND(	.cantunwind)
649	/*
650	 * Switch to abort mode to use that stack instead.
651	 */
652	cps	#CPSR_MODE_ABT
653	push	{r0-r11, ip}
654	cps	#CPSR_MODE_UND
655	mrs	r0, spsr
656	tst	r0, #CPSR_T
657	subne	r1, lr, #2
658	subeq	r1, lr, #4
659	cps	#CPSR_MODE_ABT
660	push	{r0, r1}
661	msr	spsr_fsxc, r0	/* In case some code reads spsr directly */
662	mov	r0, #ABORT_TYPE_UNDEF
663	b	.thread_abort_generic
664
665thread_dabort_handler:
666	push	{r0-r11, ip}
667	sub	r1, lr, #8
668	mrs	r0, spsr
669	push	{r0, r1}
670	mov	r0, #ABORT_TYPE_DATA
671	b	.thread_abort_generic
672
673thread_pabort_handler:
674	push	{r0-r11, ip}
675	sub	r1, lr, #4
676	mrs	r0, spsr
677	push	{r0, r1}
678	mov	r0, #ABORT_TYPE_PREFETCH
679	b	.thread_abort_generic
680
681.thread_abort_generic:
682	cps	#CPSR_MODE_SYS
683	mov	r1, sp
684	mov	r2, lr
685	cps	#CPSR_MODE_ABT
686	push	{r1-r3}
687	mov	r1, sp
688	bl	abort_handler
689	pop	{r1-r3}
690	cps	#CPSR_MODE_SYS
691	mov	sp, r1
692	mov	lr, r2
693	cps	#CPSR_MODE_ABT
694	pop	{r0, r1}
695	mov	lr, r1
696	msr	spsr_fsxc, r0
697	pop	{r0-r11, ip}
698	movs	pc, lr
699UNWIND(	.fnend)
700END_FUNC thread_abort_handler
701
702LOCAL_FUNC thread_svc_handler , :
703UNWIND(	.fnstart)
704UNWIND(	.cantunwind)
705	push	{r0-r7, lr}
706	mrs	r0, spsr
707	push	{r0}
708	mov	r0, sp
709	bl	tee_svc_handler
710	pop	{r0}
711	msr	spsr_fsxc, r0
712	pop	{r0-r7, lr}
713	movs	pc, lr
714UNWIND(	.fnend)
715END_FUNC thread_svc_handler
716
717        .align	5
718LOCAL_FUNC thread_vect_table , :
719UNWIND(	.fnstart)
720UNWIND(	.cantunwind)
721	b	.			/* Reset			*/
722	b	thread_und_handler	/* Undefined instruction	*/
723	b	thread_svc_handler	/* System call			*/
724	b	thread_pabort_handler	/* Prefetch abort		*/
725	b	thread_dabort_handler	/* Data abort			*/
726	b	.			/* Reserved			*/
727	b	thread_irq_handler	/* IRQ				*/
728	b	thread_fiq_handler	/* FIQ				*/
729UNWIND(	.fnend)
730END_FUNC thread_vect_table
731