xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision e84e1feccbdbd9deae5ad2dea921f4f624e8ad6d)
1/*
2 * Copyright (c) 2015-2017, Linaro Limited
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <arm64.h>
29#include <arm64_macros.S>
30#include <asm-defines.h>
31#include <asm.S>
32#include <keep.h>
33#include <kernel/thread_defs.h>
34#include <sm/optee_smc.h>
35#include <sm/teesmc_opteed.h>
36#include <sm/teesmc_opteed_macros.h>
37
38#include "thread_private.h"
39
40	.macro get_thread_ctx core_local, res, tmp0, tmp1
41		ldr	w\tmp0, [\core_local, \
42				#THREAD_CORE_LOCAL_CURR_THREAD]
43		adr	x\res, threads
44		mov	x\tmp1, #THREAD_CTX_SIZE
45		madd	x\res, x\tmp0, x\tmp1, x\res
46	.endm
47
48	.section .text.thread_asm
49LOCAL_FUNC vector_std_smc_entry , :
50	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
51	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
52	mov	x0, sp
53	bl	thread_handle_std_smc
54	/*
55	 * Normally thread_handle_std_smc() should return via
56	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
57	 * hasn't switched stack (error detected) it will do a normal "C"
58	 * return.
59	 */
60	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
61	add	sp, sp, #THREAD_SMC_ARGS_SIZE
62	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
63	smc	#0
64	b	.	/* SMC should not return */
65END_FUNC vector_std_smc_entry
66
67LOCAL_FUNC vector_fast_smc_entry , :
68	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
69	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
70	mov	x0, sp
71	bl	thread_handle_fast_smc
72	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
73	add	sp, sp, #THREAD_SMC_ARGS_SIZE
74	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
75	smc	#0
76	b	.	/* SMC should not return */
77END_FUNC vector_fast_smc_entry
78
79LOCAL_FUNC vector_fiq_entry , :
80	/* Secure Monitor received a FIQ and passed control to us. */
81	bl	thread_check_canaries
82	adr	x16, thread_nintr_handler_ptr
83	ldr	x16, [x16]
84	blr	x16
85	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
86	smc	#0
87	b	.	/* SMC should not return */
88END_FUNC vector_fiq_entry
89
90LOCAL_FUNC vector_cpu_on_entry , :
91	adr	x16, thread_cpu_on_handler_ptr
92	ldr	x16, [x16]
93	blr	x16
94	mov	x1, x0
95	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
96	smc	#0
97	b	.	/* SMC should not return */
98END_FUNC vector_cpu_on_entry
99
100LOCAL_FUNC vector_cpu_off_entry , :
101	adr	x16, thread_cpu_off_handler_ptr
102	ldr	x16, [x16]
103	blr	x16
104	mov	x1, x0
105	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
106	smc	#0
107	b	.	/* SMC should not return */
108END_FUNC vector_cpu_off_entry
109
110LOCAL_FUNC vector_cpu_suspend_entry , :
111	adr	x16, thread_cpu_suspend_handler_ptr
112	ldr	x16, [x16]
113	blr	x16
114	mov	x1, x0
115	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
116	smc	#0
117	b	.	/* SMC should not return */
118END_FUNC vector_cpu_suspend_entry
119
120LOCAL_FUNC vector_cpu_resume_entry , :
121	adr	x16, thread_cpu_resume_handler_ptr
122	ldr	x16, [x16]
123	blr	x16
124	mov	x1, x0
125	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
126	smc	#0
127	b	.	/* SMC should not return */
128END_FUNC vector_cpu_resume_entry
129
130LOCAL_FUNC vector_system_off_entry , :
131	adr	x16, thread_system_off_handler_ptr
132	ldr	x16, [x16]
133	blr	x16
134	mov	x1, x0
135	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
136	smc	#0
137	b	.	/* SMC should not return */
138END_FUNC vector_system_off_entry
139
140LOCAL_FUNC vector_system_reset_entry , :
141	adr	x16, thread_system_reset_handler_ptr
142	ldr	x16, [x16]
143	blr	x16
144	mov	x1, x0
145	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
146	smc	#0
147	b	.	/* SMC should not return */
148END_FUNC vector_system_reset_entry
149
150/*
151 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
152 * initialization.
153 *
154 * Note that ARM-TF depends on the layout of this vector table, any change
155 * in layout has to be synced with ARM-TF.
156 */
157FUNC thread_vector_table , :
158	b	vector_std_smc_entry
159	b	vector_fast_smc_entry
160	b	vector_cpu_on_entry
161	b	vector_cpu_off_entry
162	b	vector_cpu_resume_entry
163	b	vector_cpu_suspend_entry
164	b	vector_fiq_entry
165	b	vector_system_off_entry
166	b	vector_system_reset_entry
167END_FUNC thread_vector_table
168
169
170/* void thread_resume(struct thread_ctx_regs *regs) */
171FUNC thread_resume , :
172	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
173	mov	sp, x1
174	msr	elr_el1, x2
175	msr	spsr_el1, x3
176	load_xregs x0, THREAD_CTX_REGS_X1, 1, 30
177	ldr	x0, [x0, THREAD_CTX_REGS_X0]
178	eret
179END_FUNC thread_resume
180
181FUNC thread_std_smc_entry , :
182	/* pass x0-x7 in a struct thread_smc_args */
183	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
184	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
185	mov	x0, sp
186
187	/* Call the registered handler */
188	bl	__thread_std_smc_entry
189
190	/*
191	 * Load the returned x0-x3 into preserved registers and skip the
192	 * "returned" x4-x7 since they will not be returned to normal
193	 * world.
194	 */
195	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
196	add	sp, sp, #THREAD_SMC_ARGS_SIZE
197
198	/* Mask all maskable exceptions before switching to temporary stack */
199	msr	daifset, #DAIFBIT_ALL
200	bl	thread_get_tmp_sp
201	mov	sp, x0
202
203	bl	thread_state_free
204
205	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
206	mov	x1, x20
207	mov	x2, x21
208	mov	x3, x22
209	mov	x4, x23
210	smc	#0
211	b	.	/* SMC should not return */
212END_FUNC thread_std_smc_entry
213
214/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
215FUNC thread_rpc , :
216	/* Read daif and create an SPSR */
217	mrs	x1, daif
218	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
219
220	/* Mask all maskable exceptions before switching to temporary stack */
221	msr	daifset, #DAIFBIT_ALL
222	push	x0, xzr
223	push	x1, x30
224	bl	thread_get_ctx_regs
225	ldr	x30, [sp, #8]
226	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
227	mov	x19, x0
228
229	bl	thread_get_tmp_sp
230	pop	x1, xzr		/* Match "push x1, x30" above */
231	mov	x2, sp
232	str	x2, [x19, #THREAD_CTX_REGS_SP]
233	ldr	x20, [sp]	/* Get pointer to rv[] */
234	mov	sp, x0		/* Switch to tmp stack */
235
236	adr	x2, .thread_rpc_return
237	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
238	bl	thread_state_suspend
239	mov	x4, x0		/* Supply thread index */
240	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
241	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
242	smc	#0
243	b	.		/* SMC should not return */
244
245.thread_rpc_return:
246	/*
247	 * At this point has the stack pointer been restored to the value
248	 * stored in THREAD_CTX above.
249	 *
250	 * Jumps here from thread_resume above when RPC has returned. The
251	 * IRQ and FIQ bits are restored to what they where when this
252	 * function was originally entered.
253	 */
254	pop	x16, xzr	/* Get pointer to rv[] */
255	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
256	ret
257END_FUNC thread_rpc
258
259FUNC thread_init_vbar , :
260	adr	x0, thread_vect_table
261	msr	vbar_el1, x0
262	ret
263END_FUNC thread_init_vbar
264KEEP_PAGER thread_init_vbar
265
266/*
267 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
268 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
269 *               unsigned long user_func, unsigned long spsr,
270 *               uint32_t *exit_status0, uint32_t *exit_status1)
271 *
272 */
273FUNC __thread_enter_user_mode , :
274	ldr	x8, [sp]
275	/*
276	 * Create the and fill in the struct thread_user_mode_rec
277	 */
278	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
279	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
280	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
281
282	/*
283	 * Switch to SP_EL1
284	 * Disable exceptions
285	 * Save kern sp in x19
286	 */
287	msr	daifset, #DAIFBIT_ALL
288	mov	x19, sp
289	msr	spsel, #1
290
291	/*
292	 * Save the kernel stack pointer in the thread context
293	 */
294	/* get pointer to current thread context */
295	get_thread_ctx sp, 21, 20, 22
296	/*
297	 * Save kernel stack pointer to ensure that el0_svc() uses
298	 * correct stack pointer
299	 */
300	str	x19, [x21, #THREAD_CTX_KERN_SP]
301
302	/*
303	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
304	 */
305	msr	spsr_el1, x6
306	/* Set user sp */
307	mov	x13, x4		/* Used when running TA in Aarch32 */
308	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
309	/* Set user function */
310	msr	elr_el1, x5
311	/* Set frame pointer (user stack can't be unwound past this point) */
312	mov x29, #0
313
314	/* Jump into user mode */
315	eret
316END_FUNC __thread_enter_user_mode
317
318/*
319 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
320 * 		uint32_t exit_status1);
321 * See description in thread.h
322 */
323FUNC thread_unwind_user_mode , :
324	/* Store the exit status */
325	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
326	str	w1, [x3]
327	str	w2, [x4]
328	/* Restore x19..x30 */
329	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
330	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
331	/* Return from the call of thread_enter_user_mode() */
332	ret
333END_FUNC thread_unwind_user_mode
334
335	/*
336	 * This macro verifies that the a given vector doesn't exceed the
337	 * architectural limit of 32 instructions. This is meant to be placed
338	 * immedately after the last instruction in the vector. It takes the
339	 * vector entry as the parameter
340	 */
341	.macro check_vector_size since
342	  .if (. - \since) > (32 * 4)
343	    .error "Vector exceeds 32 instructions"
344	  .endif
345	.endm
346
347
348	.align	11
349LOCAL_FUNC thread_vect_table , :
350	/* -----------------------------------------------------
351	 * EL1 with SP0 : 0x0 - 0x180
352	 * -----------------------------------------------------
353	 */
354	.align	7
355sync_el1_sp0:
356	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
357	b	el1_sync_abort
358	check_vector_size sync_el1_sp0
359
360	.align	7
361irq_el1_sp0:
362	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
363	b	elx_irq
364	check_vector_size irq_el1_sp0
365
366	.align	7
367fiq_el1_sp0:
368	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
369	b	elx_fiq
370	check_vector_size fiq_el1_sp0
371
372	.align	7
373SErrorSP0:
374	b	SErrorSP0
375	check_vector_size SErrorSP0
376
377	/* -----------------------------------------------------
378	 * Current EL with SPx: 0x200 - 0x380
379	 * -----------------------------------------------------
380	 */
381	.align	7
382SynchronousExceptionSPx:
383	b	SynchronousExceptionSPx
384	check_vector_size SynchronousExceptionSPx
385
386	.align	7
387IrqSPx:
388	b	IrqSPx
389	check_vector_size IrqSPx
390
391	.align	7
392FiqSPx:
393	b	FiqSPx
394	check_vector_size FiqSPx
395
396	.align	7
397SErrorSPx:
398	b	SErrorSPx
399	check_vector_size SErrorSPx
400
401	/* -----------------------------------------------------
402	 * Lower EL using AArch64 : 0x400 - 0x580
403	 * -----------------------------------------------------
404	 */
405	.align	7
406el0_sync_a64:
407	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
408	mrs	x2, esr_el1
409	mrs	x3, sp_el0
410	lsr	x2, x2, #ESR_EC_SHIFT
411	cmp	x2, #ESR_EC_AARCH64_SVC
412	b.eq	el0_svc
413	b	el0_sync_abort
414	check_vector_size el0_sync_a64
415
416	.align	7
417el0_irq_a64:
418	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
419	b	elx_irq
420	check_vector_size el0_irq_a64
421
422	.align	7
423el0_fiq_a64:
424	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
425	b	elx_fiq
426	check_vector_size el0_fiq_a64
427
428	.align	7
429SErrorA64:
430	b   	SErrorA64
431	check_vector_size SErrorA64
432
433	/* -----------------------------------------------------
434	 * Lower EL using AArch32 : 0x0 - 0x180
435	 * -----------------------------------------------------
436	 */
437	.align	7
438el0_sync_a32:
439	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
440	mrs	x2, esr_el1
441	mrs	x3, sp_el0
442	lsr	x2, x2, #ESR_EC_SHIFT
443	cmp	x2, #ESR_EC_AARCH32_SVC
444	b.eq	el0_svc
445	b	el0_sync_abort
446	check_vector_size el0_sync_a32
447
448	.align	7
449el0_irq_a32:
450	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
451	b	elx_irq
452	check_vector_size el0_irq_a32
453
454	.align	7
455el0_fiq_a32:
456	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
457	b	elx_fiq
458	check_vector_size el0_fiq_a32
459
460	.align	7
461SErrorA32:
462	b	SErrorA32
463	check_vector_size SErrorA32
464
465END_FUNC thread_vect_table
466
467LOCAL_FUNC el0_svc , :
468	/* get pointer to current thread context in x0 */
469	get_thread_ctx sp, 0, 1, 2
470	/* load saved kernel sp */
471	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
472	/* Keep pointer to initial recod in x1 */
473	mov	x1, sp
474	/* Switch to SP_EL0 and restore kernel sp */
475	msr	spsel, #0
476	mov	x2, sp	/* Save SP_EL0 */
477	mov	sp, x0
478
479	/* Make room for struct thread_svc_regs */
480	sub	sp, sp, #THREAD_SVC_REG_SIZE
481	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
482
483	/* Restore x0-x3 */
484	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
485	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
486
487	/* Prepare the argument for the handler */
488	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
489	mrs	x0, elr_el1
490	mrs	x1, spsr_el1
491	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
492	mov	x0, sp
493
494	/*
495	 * Unmask native interrupts, Serror, and debug exceptions since we have
496	 * nothing left in sp_el1. Note that the SVC handler is excepted to
497	 * re-enable foreign interrupts by itself.
498	 */
499#if defined(CFG_ARM_GICV3)
500	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
501#else
502	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
503#endif
504
505	/* Call the handler */
506	bl	tee_svc_handler
507
508	/* Mask all maskable exceptions since we're switching back to sp_el1 */
509	msr	daifset, #DAIFBIT_ALL
510
511	/*
512	 * Save kernel sp we'll had at the beginning of this function.
513	 * This is when this TA has called another TA because
514	 * __thread_enter_user_mode() also saves the stack pointer in this
515	 * field.
516	 */
517	msr	spsel, #1
518	get_thread_ctx sp, 0, 1, 2
519	msr	spsel, #0
520	add	x1, sp, #THREAD_SVC_REG_SIZE
521	str	x1, [x0, #THREAD_CTX_KERN_SP]
522
523	/* Restore registers to the required state and return*/
524	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
525	msr	elr_el1, x0
526	msr	spsr_el1, x1
527	load_xregs sp, THREAD_SVC_REG_X0, 0, 14
528	mov	x30, sp
529	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
530	mov	sp, x0
531	ldr	x0, [x30, THREAD_SVC_REG_X0]
532	ldr	x30, [x30, #THREAD_SVC_REG_X30]
533
534	eret
535END_FUNC el0_svc
536
537LOCAL_FUNC el1_sync_abort , :
538	mov	x0, sp
539	msr	spsel, #0
540	mov	x3, sp		/* Save original sp */
541
542	/*
543	 * Update core local flags.
544	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
545	 */
546	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
547	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
548	orr	w1, w1, #THREAD_CLF_ABORT
549	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
550			.Lsel_tmp_sp
551
552	/* Select abort stack */
553	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
554	b	.Lset_sp
555
556.Lsel_tmp_sp:
557	/* Select tmp stack */
558	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
559	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
560
561.Lset_sp:
562	mov	sp, x2
563	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
564
565	/*
566	 * Save state on stack
567	 */
568	sub	sp, sp, #THREAD_ABT_REGS_SIZE
569	mrs	x2, spsr_el1
570	/* Store spsr, sp_el0 */
571	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
572	/* Store original x0, x1 */
573	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
574	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
575	/* Store original x2, x3 and x4 to x29 */
576	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
577	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
578	/* Store x30, elr_el1 */
579	mrs	x0, elr_el1
580	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
581
582	/*
583	 * Call handler
584	 */
585	mov	x0, #0
586	mov	x1, sp
587	bl	abort_handler
588
589	/*
590	 * Restore state from stack
591	 */
592	/* Load x30, elr_el1 */
593	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
594	msr	elr_el1, x0
595	/* Load x0 to x29 */
596	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
597	/* Switch to SP_EL1 */
598	msr	spsel, #1
599	/* Save x0 to x3 in CORE_LOCAL */
600	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
601	/* Restore spsr_el1 and sp_el0 */
602	mrs	x3, sp_el0
603	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
604	msr	spsr_el1, x0
605	msr	sp_el0, x1
606
607	/* Update core local flags */
608	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
609	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
610	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
611
612	/* Restore x0 to x3 */
613	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
614
615	/* Return from exception */
616	eret
617END_FUNC el1_sync_abort
618
619	/* sp_el0 in x3 */
620LOCAL_FUNC el0_sync_abort , :
621	/*
622	 * Update core local flags
623	 */
624	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
625	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
626	orr	w1, w1, #THREAD_CLF_ABORT
627	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
628
629	/*
630	 * Save state on stack
631	 */
632
633	/* load abt_stack_va_end */
634	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
635	/* Keep pointer to initial record in x0 */
636	mov	x0, sp
637	/* Switch to SP_EL0 */
638	msr	spsel, #0
639	mov	sp, x1
640	sub	sp, sp, #THREAD_ABT_REGS_SIZE
641	mrs	x2, spsr_el1
642	/* Store spsr, sp_el0 */
643	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
644	/* Store original x0, x1 */
645	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
646	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
647	/* Store original x2, x3 and x4 to x29 */
648	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
649	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
650	/* Store x30, elr_el1 */
651	mrs	x0, elr_el1
652	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
653
654	/*
655	 * Call handler
656	 */
657	mov	x0, #0
658	mov	x1, sp
659	bl	abort_handler
660
661	/*
662	 * Restore state from stack
663	 */
664
665	/* Load x30, elr_el1 */
666	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
667	msr	elr_el1, x0
668	/* Load x0 to x29 */
669	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
670	/* Switch to SP_EL1 */
671	msr	spsel, #1
672	/* Save x0 to x3 in EL1_REC */
673	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
674	/* Restore spsr_el1 and sp_el0 */
675	mrs	x3, sp_el0
676	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
677	msr	spsr_el1, x0
678	msr	sp_el0, x1
679
680	/* Update core local flags */
681	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
682	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
683	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
684
685	/* Restore x0 to x3 */
686	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
687
688	/* Return from exception */
689	eret
690END_FUNC el0_sync_abort
691
692/* The handler of foreign interrupt. */
693.macro foreign_intr_handler mode:req
694	/*
695	 * Update core local flags
696	 */
697	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
698	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
699	orr	w1, w1, #THREAD_CLF_TMP
700	.ifc	\mode\(),fiq
701	orr	w1, w1, #THREAD_CLF_FIQ
702	.else
703	orr	w1, w1, #THREAD_CLF_IRQ
704	.endif
705	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
706
707	/* get pointer to current thread context in x0 */
708	get_thread_ctx sp, 0, 1, 2
709	/* Keep original SP_EL0 */
710	mrs	x2, sp_el0
711
712	/* Store original sp_el0 */
713	str	x2, [x0, #THREAD_CTX_REGS_SP]
714	/* store x4..x30 */
715	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
716	/* Load original x0..x3 into x10..x13 */
717	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
718	/* Save original x0..x3 */
719	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
720
721	/* load tmp_stack_va_end */
722	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
723	/* Switch to SP_EL0 */
724	msr	spsel, #0
725	mov	sp, x1
726
727	/*
728	 * Mark current thread as suspended
729	 */
730	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
731	mrs	x1, spsr_el1
732	mrs	x2, elr_el1
733	bl	thread_state_suspend
734	mov	w4, w0		/* Supply thread index */
735
736	/* Update core local flags */
737	/* Switch to SP_EL1 */
738	msr	spsel, #1
739	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
740	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
741	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
742	msr	spsel, #0
743
744	/*
745	 * Note that we're exiting with SP_EL0 selected since the entry
746	 * functions expects to have SP_EL0 selected with the tmp stack
747	 * set.
748	 */
749
750	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
751	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
752	mov	w2, #0
753	mov	w3, #0
754	/* w4 is already filled in above */
755	smc	#0
756	b	.	/* SMC should not return */
757.endm
758
759/*
760 * This struct is never used from C it's only here to visualize the
761 * layout.
762 *
763 * struct elx_nintr_rec {
764 * 	uint64_t x[19 - 4]; x4..x18
765 * 	uint64_t lr;
766 * 	uint64_t sp_el0;
767 * };
768 */
769#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
770#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
771#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
772#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
773
774/* The handler of native interrupt. */
775.macro native_intr_handler mode:req
776	/*
777	 * Update core local flags
778	 */
779	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
780	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
781	.ifc	\mode\(),fiq
782	orr	w1, w1, #THREAD_CLF_FIQ
783	.else
784	orr	w1, w1, #THREAD_CLF_IRQ
785	.endif
786	orr	w1, w1, #THREAD_CLF_TMP
787	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
788
789	/* load tmp_stack_va_end */
790	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
791	/* Keep original SP_EL0 */
792	mrs	x2, sp_el0
793	/* Switch to SP_EL0 */
794	msr	spsel, #0
795	mov	sp, x1
796
797	/*
798	 * Save registers on stack that can be corrupted by a call to
799	 * a C function
800	 */
801	/* Make room for struct elx_nintr_rec */
802	sub	sp, sp, #ELX_NINTR_REC_SIZE
803	/* Store x4..x18 */
804	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
805	/* Store lr and original sp_el0 */
806	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
807
808	bl	thread_check_canaries
809	adr	x16, thread_nintr_handler_ptr
810	ldr	x16, [x16]
811	blr	x16
812
813	/*
814	 * Restore registers
815	 */
816	/* Restore x4..x18 */
817	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
818	/* Load  lr and original sp_el0 */
819	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
820	/* Restore SP_El0 */
821	mov	sp, x2
822	/* Switch back to SP_EL1 */
823	msr	spsel, #1
824
825	/* Update core local flags */
826	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
827	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
828	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
829
830	/* Restore x0..x3 */
831	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
832
833	/* Return from exception */
834	eret
835.endm
836
837LOCAL_FUNC elx_irq , :
838#if defined(CFG_ARM_GICV3)
839	native_intr_handler	irq
840#else
841	foreign_intr_handler	irq
842#endif
843END_FUNC elx_irq
844
845LOCAL_FUNC elx_fiq , :
846#if defined(CFG_ARM_GICV3)
847	foreign_intr_handler	fiq
848#else
849	native_intr_handler	fiq
850#endif
851END_FUNC elx_fiq
852