xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 722b96ee08691556efd96a3a2fe8849ed462e230)
1/*
2 * Copyright (c) 2016-2017, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <arm32_macros.S>
30#include <arm.h>
31#include <asm-defines.h>
32#include <asm.S>
33#include <keep.h>
34#include <kernel/abort.h>
35#include <kernel/thread_defs.h>
36#include <kernel/unwind.h>
37#include <sm/optee_smc.h>
38#include <sm/teesmc_opteed.h>
39#include <sm/teesmc_opteed_macros.h>
40
41#include "thread_private.h"
42
43LOCAL_FUNC vector_std_smc_entry , :
44UNWIND(	.fnstart)
45UNWIND(	.cantunwind)
46	push	{r0-r7}
47	mov	r0, sp
48	bl	thread_handle_std_smc
49	/*
50	 * Normally thread_handle_std_smc() should return via
51	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
52	 * hasn't switched stack (error detected) it will do a normal "C"
53	 * return.
54	 */
55	pop	{r1-r8}
56	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
57	smc	#0
58	b	.	/* SMC should not return */
59UNWIND(	.fnend)
60END_FUNC vector_std_smc_entry
61
62LOCAL_FUNC vector_fast_smc_entry , :
63UNWIND(	.fnstart)
64UNWIND(	.cantunwind)
65	push	{r0-r7}
66	mov	r0, sp
67	bl	thread_handle_fast_smc
68	pop	{r1-r8}
69	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
70	smc	#0
71	b	.	/* SMC should not return */
72UNWIND(	.fnend)
73END_FUNC vector_fast_smc_entry
74
75LOCAL_FUNC vector_fiq_entry , :
76UNWIND(	.fnstart)
77UNWIND(	.cantunwind)
78 	/* Secure Monitor received a FIQ and passed control to us. */
79	bl	thread_check_canaries
80	ldr	lr, =thread_nintr_handler_ptr
81 	ldr	lr, [lr]
82 	blx	lr
83	mov	r1, r0
84	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
85	smc	#0
86	b	.	/* SMC should not return */
87UNWIND(	.fnend)
88END_FUNC vector_fiq_entry
89
90LOCAL_FUNC vector_cpu_on_entry , :
91UNWIND(	.fnstart)
92UNWIND(	.cantunwind)
93	ldr	lr, =thread_cpu_on_handler_ptr
94	ldr	lr, [lr]
95	blx	lr
96	mov	r1, r0
97	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
98	smc	#0
99	b	.	/* SMC should not return */
100UNWIND(	.fnend)
101END_FUNC vector_cpu_on_entry
102
103LOCAL_FUNC vector_cpu_off_entry , :
104UNWIND(	.fnstart)
105UNWIND(	.cantunwind)
106	ldr	lr, =thread_cpu_off_handler_ptr
107	ldr	lr, [lr]
108	blx	lr
109	mov	r1, r0
110	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
111	smc	#0
112	b	.	/* SMC should not return */
113UNWIND(	.fnend)
114END_FUNC vector_cpu_off_entry
115
116LOCAL_FUNC vector_cpu_suspend_entry , :
117UNWIND(	.fnstart)
118UNWIND(	.cantunwind)
119	ldr	lr, =thread_cpu_suspend_handler_ptr
120	ldr	lr, [lr]
121	blx	lr
122	mov	r1, r0
123	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
124	smc	#0
125	b	.	/* SMC should not return */
126UNWIND(	.fnend)
127END_FUNC vector_cpu_suspend_entry
128
129LOCAL_FUNC vector_cpu_resume_entry , :
130UNWIND(	.fnstart)
131UNWIND(	.cantunwind)
132	ldr	lr, =thread_cpu_resume_handler_ptr
133	ldr	lr, [lr]
134	blx	lr
135	mov	r1, r0
136	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
137	smc	#0
138	b	.	/* SMC should not return */
139UNWIND(	.fnend)
140END_FUNC vector_cpu_resume_entry
141
142LOCAL_FUNC vector_system_off_entry , :
143UNWIND(	.fnstart)
144UNWIND(	.cantunwind)
145	ldr	lr, =thread_system_off_handler_ptr
146	ldr	lr, [lr]
147	blx	lr
148	mov	r1, r0
149	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
150	smc	#0
151	b	.	/* SMC should not return */
152UNWIND(	.fnend)
153END_FUNC vector_system_off_entry
154
155LOCAL_FUNC vector_system_reset_entry , :
156UNWIND(	.fnstart)
157UNWIND(	.cantunwind)
158	ldr	lr, =thread_system_reset_handler_ptr
159	ldr	lr, [lr]
160	blx	lr
161	mov	r1, r0
162	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
163	smc	#0
164	b	.	/* SMC should not return */
165UNWIND(	.fnend)
166END_FUNC vector_system_reset_entry
167
168/*
169 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
170 * initialization.  Also used when compiled with the internal monitor, but
171 * the cpu_*_entry and system_*_entry are not used then.
172 *
173 * Note that ARM-TF depends on the layout of this vector table, any change
174 * in layout has to be synced with ARM-TF.
175 */
176FUNC thread_vector_table , :
177UNWIND(	.fnstart)
178UNWIND(	.cantunwind)
179	b	vector_std_smc_entry
180	b	vector_fast_smc_entry
181	b	vector_cpu_on_entry
182	b	vector_cpu_off_entry
183	b	vector_cpu_resume_entry
184	b	vector_cpu_suspend_entry
185	b	vector_fiq_entry
186	b	vector_system_off_entry
187	b	vector_system_reset_entry
188UNWIND(	.fnend)
189END_FUNC thread_vector_table
190KEEP_PAGER thread_vector_table
191
192FUNC thread_set_abt_sp , :
193UNWIND(	.fnstart)
194UNWIND(	.cantunwind)
195	mrs	r1, cpsr
196	cps	#CPSR_MODE_ABT
197	mov	sp, r0
198	msr	cpsr, r1
199	bx	lr
200UNWIND(	.fnend)
201END_FUNC thread_set_abt_sp
202
203FUNC thread_set_und_sp , :
204UNWIND(	.fnstart)
205UNWIND(	.cantunwind)
206	mrs	r1, cpsr
207	cps	#CPSR_MODE_UND
208	mov	sp, r0
209	msr	cpsr, r1
210	bx	lr
211UNWIND(	.fnend)
212END_FUNC thread_set_und_sp
213
214FUNC thread_set_irq_sp , :
215UNWIND(	.fnstart)
216UNWIND(	.cantunwind)
217	mrs	r1, cpsr
218	cps	#CPSR_MODE_IRQ
219	mov	sp, r0
220	msr	cpsr, r1
221	bx	lr
222UNWIND(	.fnend)
223END_FUNC thread_set_irq_sp
224
225FUNC thread_set_fiq_sp , :
226UNWIND(	.fnstart)
227UNWIND(	.cantunwind)
228	mrs	r1, cpsr
229	cps	#CPSR_MODE_FIQ
230	mov	sp, r0
231	msr	cpsr, r1
232	bx	lr
233UNWIND(	.fnend)
234END_FUNC thread_set_fiq_sp
235
236/* void thread_resume(struct thread_ctx_regs *regs) */
237FUNC thread_resume , :
238UNWIND(	.fnstart)
239UNWIND(	.cantunwind)
240	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
241
242	cps	#CPSR_MODE_SYS
243	ldm	r12!, {sp, lr}
244
245	cps	#CPSR_MODE_SVC
246	ldm	r12!, {r1, sp, lr}
247	msr	spsr_fsxc, r1
248
249	cps	#CPSR_MODE_SVC
250	ldm	r12, {r1, r2}
251	push	{r1, r2}
252
253	ldm	r0, {r0-r12}
254
255	/* Restore CPSR and jump to the instruction to resume at */
256	rfefd	sp!
257UNWIND(	.fnend)
258END_FUNC thread_resume
259
260/*
261 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
262 * the banked r8-r12 registers, returns original CPSR.
263 */
264LOCAL_FUNC thread_save_state_fiq , :
265UNWIND(	.fnstart)
266UNWIND(	.cantunwind)
267	mov	r9, lr
268
269	/*
270	 * Uses stack for temporary storage, while storing needed
271	 * context in the thread context struct.
272	 */
273
274	mrs	r8, cpsr
275
276	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
277
278	push	{r4-r7}
279	push	{r0-r3}
280
281	mrs	r6, cpsr		/* Save current CPSR */
282
283	bl	thread_get_ctx_regs
284
285	pop	{r1-r4}			/* r0-r3 pushed above */
286	stm	r0!, {r1-r4}
287	pop	{r1-r4}			/* r4-r7 pushed above */
288	stm	r0!, {r1-r4}
289
290	cps     #CPSR_MODE_SYS
291	stm	r0!, {r8-r12}
292	stm     r0!, {sp, lr}
293
294	cps     #CPSR_MODE_SVC
295	mrs     r1, spsr
296	stm     r0!, {r1, sp, lr}
297
298	/* back to fiq mode */
299	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
300	msr	cpsr, r6		/* Restore mode */
301
302	mov	r0, r8			/* Return original CPSR */
303	bx	r9
304UNWIND(	.fnend)
305END_FUNC thread_save_state_fiq
306
307/*
308 * Disables IRQ and FIQ and saves state of thread, returns original
309 * CPSR.
310 */
311LOCAL_FUNC thread_save_state , :
312UNWIND(	.fnstart)
313UNWIND(	.cantunwind)
314	push	{r12, lr}
315	/*
316	 * Uses stack for temporary storage, while storing needed
317	 * context in the thread context struct.
318	 */
319
320	mrs	r12, cpsr
321
322	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
323
324	push	{r4-r7}
325	push	{r0-r3}
326
327	mov	r5, r12			/* Save CPSR in a preserved register */
328	mrs	r6, cpsr		/* Save current CPSR */
329
330	bl	thread_get_ctx_regs
331
332	pop	{r1-r4}			/* r0-r3 pushed above */
333	stm	r0!, {r1-r4}
334	pop	{r1-r4}			/* r4-r7 pushed above */
335	stm	r0!, {r1-r4}
336	stm	r0!, {r8-r11}
337
338	pop	{r12, lr}
339	stm	r0!, {r12}
340
341        cps     #CPSR_MODE_SYS
342        stm     r0!, {sp, lr}
343
344        cps     #CPSR_MODE_SVC
345        mrs     r1, spsr
346        stm     r0!, {r1, sp, lr}
347
348	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
349	msr	cpsr, r6		/* Restore mode */
350
351	mov	r0, r5			/* Return original CPSR */
352	bx	lr
353UNWIND(	.fnend)
354END_FUNC thread_save_state
355
356FUNC thread_std_smc_entry , :
357UNWIND(	.fnstart)
358UNWIND(	.cantunwind)
359	/* Pass r0-r7 in a struct thread_smc_args */
360	push	{r0-r7}
361	mov	r0, sp
362	bl	__thread_std_smc_entry
363	/*
364	 * Load the returned r0-r3 into preserved registers and skip the
365	 * "returned" r4-r7 since they will not be returned to normal
366	 * world.
367	 */
368	pop	{r4-r7}
369	add	sp, #(4 * 4)
370
371	/* Disable interrupts before switching to temporary stack */
372	cpsid	aif
373	bl	thread_get_tmp_sp
374	mov	sp, r0
375
376	bl	thread_state_free
377
378	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
379	mov	r1, r4
380	mov	r2, r5
381	mov	r3, r6
382	mov	r4, r7
383	smc	#0
384	b	.	/* SMC should not return */
385UNWIND(	.fnend)
386END_FUNC thread_std_smc_entry
387
388
389/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
390FUNC thread_rpc , :
391/*
392 * r0-r2 are used to pass parameters to normal world
393 * r0-r5 are used to pass return vaule back from normal world
394 *
395 * note that r3 is used to pass "resume information", that is, which
396 * thread it is that should resume.
397 *
398 * Since the this function is following AAPCS we need to preserve r4-r5
399 * which are otherwise modified when returning back from normal world.
400 */
401UNWIND(	.fnstart)
402	push	{r4-r5, lr}
403UNWIND(	.save	{r4-r5, lr})
404	push	{r0}
405UNWIND(	.save	{r0})
406
407	bl	thread_save_state
408	mov	r4, r0			/* Save original CPSR */
409
410	/*
411 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
412	 */
413	bl	thread_get_tmp_sp
414	ldr	r5, [sp]		/* Get pointer to rv[] */
415	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
416	mov	sp, r0			/* Switch to tmp stack */
417
418	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
419	mov	r1, r4			/* CPSR to restore */
420	ldr	r2, =.thread_rpc_return
421	bl	thread_state_suspend
422	mov	r4, r0			/* Supply thread index */
423	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
424	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
425	smc	#0
426	b	.	/* SMC should not return */
427
428.thread_rpc_return:
429	/*
430	 * At this point has the stack pointer been restored to the value
431	 * it had when thread_save_state() was called above.
432	 *
433	 * Jumps here from thread_resume above when RPC has returned. The
434	 * IRQ and FIQ bits are restored to what they where when this
435	 * function was originally entered.
436	 */
437	pop	{r12}			/* Get pointer to rv[] */
438	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
439	pop	{r4-r5, pc}
440UNWIND(	.fnend)
441END_FUNC thread_rpc
442KEEP_PAGER thread_rpc
443
444/* The handler of native interrupt. */
445.macro	native_intr_handler mode:req
446	.ifc	\mode\(),irq
447	/*
448	 * Foreign interrupts should be masked.
449	 * For GICv2, IRQ is for foreign interrupt and already masked by
450	 * hardware in FIQ mode which is used for native interrupt.
451	 * For GICv3, FIQ is for foreign interrupt. It's not masked by hardware
452	 * in IRQ mode which is used for natvie interrupt.
453	 */
454	cpsid	f
455	.endif
456	/*
457	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
458	 * address
459	 */
460	sub     lr, lr, #4
461
462	/*
463	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
464	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
465	 * because the secure monitor doesn't save those. The treatment of
466	 * the banked fiq registers is somewhat analogous to the lazy save
467	 * of VFP registers.
468	 */
469	.ifc	\mode\(),fiq
470	push	{r0-r3, r8-r12, lr}
471	.else
472	push	{r0-r3, r12, lr}
473	.endif
474	bl	thread_check_canaries
475	ldr	lr, =thread_nintr_handler_ptr
476	ldr	lr, [lr]
477	blx	lr
478	.ifc	\mode\(),fiq
479	pop	{r0-r3, r8-r12, lr}
480	.else
481	pop	{r0-r3, r12, lr}
482	.endif
483	movs	pc, lr
484.endm
485
486/* The handler of foreign interrupt. */
487.macro foreign_intr_handler mode:req
488	.ifc	\mode\(),irq
489	/*
490	 * Disable FIQ if the foreign interrupt is sent as IRQ.
491	 * IRQ mode is set up to use tmp stack so FIQ has to be
492	 * disabled before touching the stack. We can also assign
493	 * SVC sp from IRQ sp to get SVC mode into the state we
494	 * need when doing the SMC below.
495	 * If it is sent as FIQ, the IRQ has already been masked by hardware
496	 */
497	cpsid	f
498	.endif
499	sub	lr, lr, #4
500	push	{lr}
501	push	{r12}
502
503	.ifc	\mode\(),fiq
504	bl	thread_save_state_fiq
505	.else
506	bl	thread_save_state
507	.endif
508
509	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
510	mrs	r1, spsr
511	pop	{r12}
512	pop	{r2}
513	blx	thread_state_suspend
514	mov	r4, r0		/* Supply thread index */
515
516	/*
517	 * Switch to SVC mode and copy current stack pointer as it already
518	 * is the tmp stack.
519	 */
520	mov	r0, sp
521	cps	#CPSR_MODE_SVC
522	mov	sp, r0
523
524	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
525	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
526	mov	r2, #0
527	mov	r3, #0
528	/* r4 is already filled in above */
529	smc	#0
530	b	.	/* SMC should not return */
531.endm
532
533FUNC thread_init_vbar , :
534UNWIND(	.fnstart)
535	/* Set vector (VBAR) */
536	ldr	r0, =thread_vect_table
537	write_vbar r0
538	bx	lr
539UNWIND(	.fnend)
540END_FUNC thread_init_vbar
541KEEP_PAGER thread_init_vbar
542
543/*
544 * Below are low level routines handling entry and return from user mode.
545 *
546 * thread_enter_user_mode() saves all that registers user mode can change
547 * so kernel mode can restore needed registers when resuming execution
548 * after the call to thread_enter_user_mode() has returned.
549 * thread_enter_user_mode() doesn't return directly since it enters user
550 * mode instead, it's thread_unwind_user_mode() that does the
551 * returning by restoring the registers saved by thread_enter_user_mode().
552 *
553 * There's three ways for thread_enter_user_mode() to return to caller,
554 * user TA calls utee_return, user TA calls utee_panic or through an abort.
555 *
556 * Calls to utee_return or utee_panic are handled as:
557 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
558 * calls syscall_return() or syscall_panic().
559 *
560 * These function calls returns normally except thread_svc_handler() which
561 * which is an exception handling routine so it reads return address and
562 * SPSR to restore from the stack. syscall_return() and syscall_panic()
563 * changes return address and SPSR used by thread_svc_handler() to instead of
564 * returning into user mode as with other syscalls it returns into
565 * thread_unwind_user_mode() in kernel mode instead.  When
566 * thread_svc_handler() returns the stack pointer at the point where
567 * thread_enter_user_mode() left it so this is where
568 * thread_unwind_user_mode() can operate.
569 *
570 * Aborts are handled in a similar way but by thread_abort_handler()
571 * instead, when the pager sees that it's an abort from user mode that
572 * can't be handled it updates SPSR and return address used by
573 * thread_abort_handler() to return into thread_unwind_user_mode()
574 * instead.
575 */
576
577/*
578 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
579 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
580 *               unsigned long user_func, unsigned long spsr,
581 *               uint32_t *exit_status0, uint32_t *exit_status1)
582 *
583 */
584FUNC __thread_enter_user_mode , :
585UNWIND(	.fnstart)
586UNWIND(	.cantunwind)
587	/*
588	 * Save all registers to allow syscall_return() to resume execution
589	 * as if this function would have returned. This is also used in
590	 * syscall_panic().
591	 *
592	 * If stack usage of this function is changed
593	 * thread_unwind_user_mode() has to be updated.
594	 */
595	push    {r4-r12,lr}
596
597	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
598	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
599	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
600
601	/*
602	 * Set the saved Processors Status Register to user mode to allow
603	 * entry of user mode through movs below.
604	 */
605	msr     spsr_cxsf, r6
606
607	/*
608	 * Save old user sp and set new user sp.
609	 */
610	cps	#CPSR_MODE_SYS
611	mov	r6, sp
612	mov     sp, r4
613	cps	#CPSR_MODE_SVC
614	push	{r6,r7}
615
616	/*
617	* Don't allow return from this function, return is done through
618	* thread_unwind_user_mode() below.
619	*/
620	mov     lr, #0
621	/* Call the user function with its arguments */
622	movs    pc, r5
623UNWIND(	.fnend)
624END_FUNC __thread_enter_user_mode
625
626/*
627 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
628 *              uint32_t exit_status1);
629 * See description in thread.h
630 */
631FUNC thread_unwind_user_mode , :
632UNWIND(	.fnstart)
633UNWIND(	.cantunwind)
634	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
635	str	r1, [ip]
636	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
637	str	r2, [ip]
638
639	/* Restore old user sp */
640	pop	{r4,r7}
641	cps	#CPSR_MODE_SYS
642	mov	sp, r4
643	cps	#CPSR_MODE_SVC
644
645	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
646UNWIND(	.fnend)
647END_FUNC thread_unwind_user_mode
648
649	.section .text.thread_vect_table
650        .align	5
651FUNC thread_vect_table , :
652UNWIND(	.fnstart)
653UNWIND(	.cantunwind)
654	b	.			/* Reset			*/
655	b	thread_und_handler	/* Undefined instruction	*/
656	b	thread_svc_handler	/* System call			*/
657	b	thread_pabort_handler	/* Prefetch abort		*/
658	b	thread_dabort_handler	/* Data abort			*/
659	b	.			/* Reserved			*/
660	b	thread_irq_handler	/* IRQ				*/
661	b	thread_fiq_handler	/* FIQ				*/
662
663thread_und_handler:
664	/*
665	 * Disable both foreign and native interrupts in the thread handlers.
666	 * The tee handlers can decide when the native interrupts should
667	 * be enabled.
668	 */
669	cpsid	f	/* IRQ is already masked by the hardware */
670	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
671	mrs	r1, spsr
672	tst	r1, #CPSR_T
673	subne	lr, lr, #2
674	subeq	lr, lr, #4
675	mov	r0, #ABORT_TYPE_UNDEF
676	b	thread_abort_common
677
678thread_dabort_handler:
679	/*
680	 * Disable both foreign and native interrupts in the thread handlers.
681	 * The tee handlers can decide when the native interrupts should
682	 * be enabled.
683	 */
684	cpsid	f	/* IRQ is already masked by the hardware */
685	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
686	sub	lr, lr, #8
687	mov	r0, #ABORT_TYPE_DATA
688	b	thread_abort_common
689
690thread_pabort_handler:
691	/*
692	 * Disable both foreign and native interrupts in the thread handlers.
693	 * The tee handlers can decide when the native interrupts should
694	 * be enabled.
695	 */
696	cpsid	f	/* IRQ is already masked by the hardware */
697	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
698	sub	lr, lr, #4
699	mov	r0, #ABORT_TYPE_PREFETCH
700
701thread_abort_common:
702	/*
703	 * At this label:
704	 * cpsr is in mode undef or abort
705	 * sp is still pointing to struct thread_core_local belonging to
706	 * this core.
707	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
708	 * {r2-r11, ip} are untouched.
709	 * r0 holds the first argument for abort_handler()
710	 */
711
712	/*
713	 * Update core local flags.
714	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
715	 */
716	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
717	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
718	orr	r1, r1, #THREAD_CLF_ABORT
719
720	/*
721	 * Select stack and update flags accordingly
722	 *
723	 * Normal case:
724	 * If the abort stack is unused select that.
725	 *
726	 * Fatal error handling:
727	 * If we're already using the abort stack as noted by bit
728	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
729	 * field we're selecting the temporary stack instead to be able to
730	 * make a stack trace of the abort in abort mode.
731	 *
732	 * r1 is initialized as a temporary stack pointer until we've
733	 * switched to system mode.
734	 */
735	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
736	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
737	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
738	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
739	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
740
741	/*
742	 * Store registers on stack fitting struct thread_abort_regs
743	 * start from the end of the struct
744	 * {r2-r11, ip}
745	 * Load content of previously saved {r0-r1} and stores
746	 * it up to the pad field.
747	 * After this is only {usr_sp, usr_lr} missing in the struct
748	 */
749	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
750	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
751	/* Push the original {r0-r1} on the selected stack */
752	stmdb	r1!, {r2-r3}
753	mrs	r3, spsr
754	/* Push {pad, spsr, elr} on the selected stack */
755	stmdb	r1!, {r2, r3, lr}
756
757	cps	#CPSR_MODE_SYS
758	str	lr, [r1, #-4]!
759	str	sp, [r1, #-4]!
760	mov	sp, r1
761
762	bl	abort_handler
763
764	mov	ip, sp
765	ldr	sp, [ip], #4
766	ldr	lr, [ip], #4
767
768	/*
769	 * Even if we entered via CPSR_MODE_UND, we are returning via
770	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
771	 * here.
772	 */
773	cps	#CPSR_MODE_ABT
774	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
775	msr	spsr_fsxc, r1
776
777	/* Update core local flags */
778	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
779	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
780	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
781
782	ldm	ip, {r0-r11, ip}
783
784	movs	pc, lr
785	/* end thread_abort_common */
786
787thread_svc_handler:
788	/*
789	 * Disable both foreign and native interrupts in the thread handlers.
790	 * The tee handlers can decide when the native interrupts should
791	 * be enabled.
792	 */
793	cpsid	f	/* IRQ is already masked by the hardware */
794	push	{r0-r7, lr}
795	mrs	r0, spsr
796	push	{r0}
797	mov	r0, sp
798	bl	tee_svc_handler
799	pop	{r0}
800	msr	spsr_fsxc, r0
801	pop	{r0-r7, lr}
802	movs	pc, lr
803	/* end thread_svc_handler */
804
805thread_fiq_handler:
806#if defined(CFG_ARM_GICV3)
807	foreign_intr_handler	fiq
808#else
809	native_intr_handler	fiq
810#endif
811	/* end thread_fiq_handler */
812
813thread_irq_handler:
814#if defined(CFG_ARM_GICV3)
815	native_intr_handler	irq
816#else
817	foreign_intr_handler	irq
818#endif
819	/* end thread_irq_handler */
820UNWIND(	.fnend)
821END_FUNC thread_vect_table
822