xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision ba6d8df98e3cf376aab45d0d958204c498a94123)
1/*
2 * Copyright (c) 2015-2017, Linaro Limited
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <arm64.h>
29#include <arm64_macros.S>
30#include <asm-defines.h>
31#include <asm.S>
32#include <keep.h>
33#include <kernel/thread_defs.h>
34#include <sm/optee_smc.h>
35#include <sm/teesmc_opteed.h>
36#include <sm/teesmc_opteed_macros.h>
37
38#include "thread_private.h"
39
40	.macro get_thread_ctx core_local, res, tmp0, tmp1
41		ldr	w\tmp0, [\core_local, \
42				#THREAD_CORE_LOCAL_CURR_THREAD]
43		adr	x\res, threads
44		mov	x\tmp1, #THREAD_CTX_SIZE
45		madd	x\res, x\tmp0, x\tmp1, x\res
46	.endm
47
48	.section .text.thread_asm
49LOCAL_FUNC vector_std_smc_entry , :
50	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
51	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
52	mov	x0, sp
53	bl	thread_handle_std_smc
54	/*
55	 * Normally thread_handle_std_smc() should return via
56	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
57	 * hasn't switched stack (error detected) it will do a normal "C"
58	 * return.
59	 */
60	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
61	add	sp, sp, #THREAD_SMC_ARGS_SIZE
62	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
63	smc	#0
64	b	.	/* SMC should not return */
65END_FUNC vector_std_smc_entry
66
67LOCAL_FUNC vector_fast_smc_entry , :
68	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
69	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
70	mov	x0, sp
71	bl	thread_handle_fast_smc
72	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
73	add	sp, sp, #THREAD_SMC_ARGS_SIZE
74	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
75	smc	#0
76	b	.	/* SMC should not return */
77END_FUNC vector_fast_smc_entry
78
79LOCAL_FUNC vector_fiq_entry , :
80	/* Secure Monitor received a FIQ and passed control to us. */
81	bl	thread_check_canaries
82	adr	x16, thread_nintr_handler_ptr
83	ldr	x16, [x16]
84	blr	x16
85	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
86	smc	#0
87	b	.	/* SMC should not return */
88END_FUNC vector_fiq_entry
89
90LOCAL_FUNC vector_cpu_on_entry , :
91	adr	x16, thread_cpu_on_handler_ptr
92	ldr	x16, [x16]
93	blr	x16
94	mov	x1, x0
95	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
96	smc	#0
97	b	.	/* SMC should not return */
98END_FUNC vector_cpu_on_entry
99
100LOCAL_FUNC vector_cpu_off_entry , :
101	adr	x16, thread_cpu_off_handler_ptr
102	ldr	x16, [x16]
103	blr	x16
104	mov	x1, x0
105	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
106	smc	#0
107	b	.	/* SMC should not return */
108END_FUNC vector_cpu_off_entry
109
110LOCAL_FUNC vector_cpu_suspend_entry , :
111	adr	x16, thread_cpu_suspend_handler_ptr
112	ldr	x16, [x16]
113	blr	x16
114	mov	x1, x0
115	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
116	smc	#0
117	b	.	/* SMC should not return */
118END_FUNC vector_cpu_suspend_entry
119
120LOCAL_FUNC vector_cpu_resume_entry , :
121	adr	x16, thread_cpu_resume_handler_ptr
122	ldr	x16, [x16]
123	blr	x16
124	mov	x1, x0
125	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
126	smc	#0
127	b	.	/* SMC should not return */
128END_FUNC vector_cpu_resume_entry
129
130LOCAL_FUNC vector_system_off_entry , :
131	adr	x16, thread_system_off_handler_ptr
132	ldr	x16, [x16]
133	blr	x16
134	mov	x1, x0
135	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
136	smc	#0
137	b	.	/* SMC should not return */
138END_FUNC vector_system_off_entry
139
140LOCAL_FUNC vector_system_reset_entry , :
141	adr	x16, thread_system_reset_handler_ptr
142	ldr	x16, [x16]
143	blr	x16
144	mov	x1, x0
145	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
146	smc	#0
147	b	.	/* SMC should not return */
148END_FUNC vector_system_reset_entry
149
150/*
151 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
152 * initialization.
153 *
154 * Note that ARM-TF depends on the layout of this vector table, any change
155 * in layout has to be synced with ARM-TF.
156 */
157FUNC thread_vector_table , :
158	b	vector_std_smc_entry
159	b	vector_fast_smc_entry
160	b	vector_cpu_on_entry
161	b	vector_cpu_off_entry
162	b	vector_cpu_resume_entry
163	b	vector_cpu_suspend_entry
164	b	vector_fiq_entry
165	b	vector_system_off_entry
166	b	vector_system_reset_entry
167END_FUNC thread_vector_table
168
169
170/* void thread_resume(struct thread_ctx_regs *regs) */
171FUNC thread_resume , :
172	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
173	mov	sp, x1
174	msr	elr_el1, x2
175	msr	spsr_el1, x3
176	load_xregs x0, THREAD_CTX_REGS_X1, 1, 30
177	ldr	x0, [x0, THREAD_CTX_REGS_X0]
178	eret
179END_FUNC thread_resume
180
181FUNC thread_std_smc_entry , :
182	/* pass x0-x7 in a struct thread_smc_args */
183	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
184	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
185	mov	x0, sp
186
187	/* Call the registered handler */
188	bl	__thread_std_smc_entry
189
190	/*
191	 * Load the returned x0-x3 into preserved registers and skip the
192	 * "returned" x4-x7 since they will not be returned to normal
193	 * world.
194	 */
195	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
196	add	sp, sp, #THREAD_SMC_ARGS_SIZE
197
198	/* Mask all maskable exceptions before switching to temporary stack */
199	msr	daifset, #DAIFBIT_ALL
200	bl	thread_get_tmp_sp
201	mov	sp, x0
202
203	bl	thread_state_free
204
205	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
206	mov	x1, x20
207	mov	x2, x21
208	mov	x3, x22
209	mov	x4, x23
210	smc	#0
211	b	.	/* SMC should not return */
212END_FUNC thread_std_smc_entry
213
214/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
215FUNC thread_rpc , :
216	/* Read daif and create an SPSR */
217	mrs	x1, daif
218	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
219
220	/* Mask all maskable exceptions before switching to temporary stack */
221	msr	daifset, #DAIFBIT_ALL
222	push	x0, xzr
223	push	x1, x30
224	bl	thread_get_ctx_regs
225	ldr	x30, [sp, #8]
226	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
227	mov	x19, x0
228
229	bl	thread_get_tmp_sp
230	pop	x1, xzr		/* Match "push x1, x30" above */
231	mov	x2, sp
232	str	x2, [x19, #THREAD_CTX_REGS_SP]
233	ldr	x20, [sp]	/* Get pointer to rv[] */
234	mov	sp, x0		/* Switch to tmp stack */
235
236	adr	x2, .thread_rpc_return
237	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
238	bl	thread_state_suspend
239	mov	x4, x0		/* Supply thread index */
240	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
241	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
242	smc	#0
243	b	.		/* SMC should not return */
244
245.thread_rpc_return:
246	/*
247	 * At this point has the stack pointer been restored to the value
248	 * stored in THREAD_CTX above.
249	 *
250	 * Jumps here from thread_resume above when RPC has returned. The
251	 * IRQ and FIQ bits are restored to what they where when this
252	 * function was originally entered.
253	 */
254	pop	x16, xzr	/* Get pointer to rv[] */
255	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
256	ret
257END_FUNC thread_rpc
258
259FUNC thread_init_vbar , :
260	adr	x0, thread_vect_table
261	msr	vbar_el1, x0
262	ret
263END_FUNC thread_init_vbar
264KEEP_PAGER thread_init_vbar
265
266/*
267 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
268 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
269 *               unsigned long user_func, unsigned long spsr,
270 *               uint32_t *exit_status0, uint32_t *exit_status1)
271 *
272 */
273FUNC __thread_enter_user_mode , :
274	ldr	x8, [sp]
275	/*
276	 * Create the and fill in the struct thread_user_mode_rec
277	 */
278	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
279	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
280	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
281
282	/*
283	 * Switch to SP_EL1
284	 * Disable exceptions
285	 * Save kern sp in x19
286	 */
287	msr	daifset, #DAIFBIT_ALL
288	mov	x19, sp
289	msr	spsel, #1
290
291	/*
292	 * Save the kernel stack pointer in the thread context
293	 */
294	/* get pointer to current thread context */
295	get_thread_ctx sp, 21, 20, 22
296	/*
297	 * Save kernel stack pointer to ensure that el0_svc() uses
298	 * correct stack pointer
299	 */
300	str	x19, [x21, #THREAD_CTX_KERN_SP]
301
302	/*
303	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
304	 */
305	msr	spsr_el1, x6
306	/* Set user sp */
307	mov	x13, x4		/* Used when running TA in Aarch32 */
308	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
309	/* Set user function */
310	msr	elr_el1, x5
311	/* Set frame pointer (user stack can't be unwound past this point) */
312	mov x29, #0
313
314	/* Jump into user mode */
315	eret
316END_FUNC __thread_enter_user_mode
317
318/*
319 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
320 * 		uint32_t exit_status1);
321 * See description in thread.h
322 */
323FUNC thread_unwind_user_mode , :
324	/* Store the exit status */
325	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
326	str	w1, [x3]
327	str	w2, [x4]
328	/* Restore x19..x30 */
329	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
330	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
331	/* Return from the call of thread_enter_user_mode() */
332	ret
333END_FUNC thread_unwind_user_mode
334
335	/*
336	 * This macro verifies that the a given vector doesn't exceed the
337	 * architectural limit of 32 instructions. This is meant to be placed
338	 * immedately after the last instruction in the vector. It takes the
339	 * vector entry as the parameter
340	 */
341	.macro check_vector_size since
342	  .if (. - \since) > (32 * 4)
343	    .error "Vector exceeds 32 instructions"
344	  .endif
345	.endm
346
347
348	.align	11
349LOCAL_FUNC thread_vect_table , :
350	/* -----------------------------------------------------
351	 * EL1 with SP0 : 0x0 - 0x180
352	 * -----------------------------------------------------
353	 */
354	.align	7
355sync_el1_sp0:
356	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
357	b	el1_sync_abort
358	check_vector_size sync_el1_sp0
359
360	.align	7
361irq_el1_sp0:
362	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
363	b	elx_irq
364	check_vector_size irq_el1_sp0
365
366	.align	7
367fiq_el1_sp0:
368	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
369	b	elx_fiq
370	check_vector_size fiq_el1_sp0
371
372	.align	7
373SErrorSP0:
374	b	SErrorSP0
375	check_vector_size SErrorSP0
376
377	/* -----------------------------------------------------
378	 * Current EL with SPx: 0x200 - 0x380
379	 * -----------------------------------------------------
380	 */
381	.align	7
382SynchronousExceptionSPx:
383	b	SynchronousExceptionSPx
384	check_vector_size SynchronousExceptionSPx
385
386	.align	7
387IrqSPx:
388	b	IrqSPx
389	check_vector_size IrqSPx
390
391	.align	7
392FiqSPx:
393	b	FiqSPx
394	check_vector_size FiqSPx
395
396	.align	7
397SErrorSPx:
398	b	SErrorSPx
399	check_vector_size SErrorSPx
400
401	/* -----------------------------------------------------
402	 * Lower EL using AArch64 : 0x400 - 0x580
403	 * -----------------------------------------------------
404	 */
405	.align	7
406el0_sync_a64:
407	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
408	mrs	x2, esr_el1
409	mrs	x3, sp_el0
410	lsr	x2, x2, #ESR_EC_SHIFT
411	cmp	x2, #ESR_EC_AARCH64_SVC
412	b.eq	el0_svc
413	b	el0_sync_abort
414	check_vector_size el0_sync_a64
415
416	.align	7
417el0_irq_a64:
418	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
419	b	elx_irq
420	check_vector_size el0_irq_a64
421
422	.align	7
423el0_fiq_a64:
424	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
425	b	elx_fiq
426	check_vector_size el0_fiq_a64
427
428	.align	7
429SErrorA64:
430	b   	SErrorA64
431	check_vector_size SErrorA64
432
433	/* -----------------------------------------------------
434	 * Lower EL using AArch32 : 0x0 - 0x180
435	 * -----------------------------------------------------
436	 */
437	.align	7
438el0_sync_a32:
439	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
440	mrs	x2, esr_el1
441	mrs	x3, sp_el0
442	lsr	x2, x2, #ESR_EC_SHIFT
443	cmp	x2, #ESR_EC_AARCH32_SVC
444	b.eq	el0_svc
445	b	el0_sync_abort
446	check_vector_size el0_sync_a32
447
448	.align	7
449el0_irq_a32:
450	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
451	b	elx_irq
452	check_vector_size el0_irq_a32
453
454	.align	7
455el0_fiq_a32:
456	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
457	b	elx_fiq
458	check_vector_size el0_fiq_a32
459
460	.align	7
461SErrorA32:
462	b	SErrorA32
463	check_vector_size SErrorA32
464
465END_FUNC thread_vect_table
466
467LOCAL_FUNC el0_svc , :
468	/* get pointer to current thread context in x0 */
469	get_thread_ctx sp, 0, 1, 2
470	/* load saved kernel sp */
471	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
472	/* Keep pointer to initial recod in x1 */
473	mov	x1, sp
474	/* Switch to SP_EL0 and restore kernel sp */
475	msr	spsel, #0
476	mov	x2, sp	/* Save SP_EL0 */
477	mov	sp, x0
478
479	/* Make room for struct thread_svc_regs */
480	sub	sp, sp, #THREAD_SVC_REG_SIZE
481	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
482
483	/* Restore x0-x3 */
484	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
485	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
486
487	/* Prepare the argument for the handler */
488	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
489	mrs	x0, elr_el1
490	mrs	x1, spsr_el1
491	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
492	mov	x0, sp
493
494	/*
495	 * Unmask native interrupts, Serror, and debug exceptions since we have
496	 * nothing left in sp_el1. Note that the SVC handler is excepted to
497	 * re-enable foreign interrupts by itself.
498	 */
499	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
500
501	/* Call the handler */
502	bl	tee_svc_handler
503
504	/* Mask all maskable exceptions since we're switching back to sp_el1 */
505	msr	daifset, #DAIFBIT_ALL
506
507	/*
508	 * Save kernel sp we'll had at the beginning of this function.
509	 * This is when this TA has called another TA because
510	 * __thread_enter_user_mode() also saves the stack pointer in this
511	 * field.
512	 */
513	msr	spsel, #1
514	get_thread_ctx sp, 0, 1, 2
515	msr	spsel, #0
516	add	x1, sp, #THREAD_SVC_REG_SIZE
517	str	x1, [x0, #THREAD_CTX_KERN_SP]
518
519	/* Restore registers to the required state and return*/
520	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
521	msr	elr_el1, x0
522	msr	spsr_el1, x1
523	load_xregs sp, THREAD_SVC_REG_X0, 0, 14
524	mov	x30, sp
525	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
526	mov	sp, x0
527	ldr	x0, [x30, THREAD_SVC_REG_X0]
528	ldr	x30, [x30, #THREAD_SVC_REG_X30]
529
530	eret
531END_FUNC el0_svc
532
533LOCAL_FUNC el1_sync_abort , :
534	mov	x0, sp
535	msr	spsel, #0
536	mov	x3, sp		/* Save original sp */
537
538	/*
539	 * Update core local flags.
540	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
541	 */
542	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
543	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
544	orr	w1, w1, #THREAD_CLF_ABORT
545	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
546			.Lsel_tmp_sp
547
548	/* Select abort stack */
549	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
550	b	.Lset_sp
551
552.Lsel_tmp_sp:
553	/* Select tmp stack */
554	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
555	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
556
557.Lset_sp:
558	mov	sp, x2
559	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
560
561	/*
562	 * Save state on stack
563	 */
564	sub	sp, sp, #THREAD_ABT_REGS_SIZE
565	mrs	x2, spsr_el1
566	/* Store spsr, sp_el0 */
567	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
568	/* Store original x0, x1 */
569	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
570	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
571	/* Store original x2, x3 and x4 to x29 */
572	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
573	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
574	/* Store x30, elr_el1 */
575	mrs	x0, elr_el1
576	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
577
578	/*
579	 * Call handler
580	 */
581	mov	x0, #0
582	mov	x1, sp
583	bl	abort_handler
584
585	/*
586	 * Restore state from stack
587	 */
588	/* Load x30, elr_el1 */
589	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
590	msr	elr_el1, x0
591	/* Load x0 to x29 */
592	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
593	/* Switch to SP_EL1 */
594	msr	spsel, #1
595	/* Save x0 to x3 in CORE_LOCAL */
596	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
597	/* Restore spsr_el1 and sp_el0 */
598	mrs	x3, sp_el0
599	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
600	msr	spsr_el1, x0
601	msr	sp_el0, x1
602
603	/* Update core local flags */
604	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
605	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
606	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
607
608	/* Restore x0 to x3 */
609	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
610
611	/* Return from exception */
612	eret
613END_FUNC el1_sync_abort
614
615	/* sp_el0 in x3 */
616LOCAL_FUNC el0_sync_abort , :
617	/*
618	 * Update core local flags
619	 */
620	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
621	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
622	orr	w1, w1, #THREAD_CLF_ABORT
623	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
624
625	/*
626	 * Save state on stack
627	 */
628
629	/* load abt_stack_va_end */
630	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
631	/* Keep pointer to initial record in x0 */
632	mov	x0, sp
633	/* Switch to SP_EL0 */
634	msr	spsel, #0
635	mov	sp, x1
636	sub	sp, sp, #THREAD_ABT_REGS_SIZE
637	mrs	x2, spsr_el1
638	/* Store spsr, sp_el0 */
639	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
640	/* Store original x0, x1 */
641	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
642	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
643	/* Store original x2, x3 and x4 to x29 */
644	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
645	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
646	/* Store x30, elr_el1 */
647	mrs	x0, elr_el1
648	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
649
650	/*
651	 * Call handler
652	 */
653	mov	x0, #0
654	mov	x1, sp
655	bl	abort_handler
656
657	/*
658	 * Restore state from stack
659	 */
660
661	/* Load x30, elr_el1 */
662	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
663	msr	elr_el1, x0
664	/* Load x0 to x29 */
665	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
666	/* Switch to SP_EL1 */
667	msr	spsel, #1
668	/* Save x0 to x3 in EL1_REC */
669	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
670	/* Restore spsr_el1 and sp_el0 */
671	mrs	x3, sp_el0
672	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
673	msr	spsr_el1, x0
674	msr	sp_el0, x1
675
676	/* Update core local flags */
677	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
678	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
679	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
680
681	/* Restore x0 to x3 */
682	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
683
684	/* Return from exception */
685	eret
686END_FUNC el0_sync_abort
687
688/* The handler of foreign interrupt. */
689.macro foreign_intr_handler mode:req
690	/*
691	 * Update core local flags
692	 */
693	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
694	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
695	orr	w1, w1, #THREAD_CLF_TMP
696	.ifc	\mode\(),fiq
697	orr	w1, w1, #THREAD_CLF_FIQ
698	.else
699	orr	w1, w1, #THREAD_CLF_IRQ
700	.endif
701	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
702
703	/* get pointer to current thread context in x0 */
704	get_thread_ctx sp, 0, 1, 2
705	/* Keep original SP_EL0 */
706	mrs	x2, sp_el0
707
708	/* Store original sp_el0 */
709	str	x2, [x0, #THREAD_CTX_REGS_SP]
710	/* store x4..x30 */
711	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
712	/* Load original x0..x3 into x10..x13 */
713	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
714	/* Save original x0..x3 */
715	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
716
717	/* load tmp_stack_va_end */
718	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
719	/* Switch to SP_EL0 */
720	msr	spsel, #0
721	mov	sp, x1
722
723	/*
724	 * Mark current thread as suspended
725	 */
726	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
727	mrs	x1, spsr_el1
728	mrs	x2, elr_el1
729	bl	thread_state_suspend
730	mov	w4, w0		/* Supply thread index */
731
732	/* Update core local flags */
733	/* Switch to SP_EL1 */
734	msr	spsel, #1
735	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
736	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
737	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
738	msr	spsel, #0
739
740	/*
741	 * Note that we're exiting with SP_EL0 selected since the entry
742	 * functions expects to have SP_EL0 selected with the tmp stack
743	 * set.
744	 */
745
746	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
747	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
748	mov	w2, #0
749	mov	w3, #0
750	/* w4 is already filled in above */
751	smc	#0
752	b	.	/* SMC should not return */
753.endm
754
755/*
756 * This struct is never used from C it's only here to visualize the
757 * layout.
758 *
759 * struct elx_nintr_rec {
760 * 	uint64_t x[19 - 4]; x4..x18
761 * 	uint64_t lr;
762 * 	uint64_t sp_el0;
763 * };
764 */
765#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
766#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
767#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
768#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
769
770/* The handler of native interrupt. */
771.macro native_intr_handler mode:req
772	/*
773	 * Update core local flags
774	 */
775	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
776	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
777	.ifc	\mode\(),fiq
778	orr	w1, w1, #THREAD_CLF_FIQ
779	.else
780	orr	w1, w1, #THREAD_CLF_IRQ
781	.endif
782	orr	w1, w1, #THREAD_CLF_TMP
783	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
784
785	/* load tmp_stack_va_end */
786	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
787	/* Keep original SP_EL0 */
788	mrs	x2, sp_el0
789	/* Switch to SP_EL0 */
790	msr	spsel, #0
791	mov	sp, x1
792
793	/*
794	 * Save registers on stack that can be corrupted by a call to
795	 * a C function
796	 */
797	/* Make room for struct elx_nintr_rec */
798	sub	sp, sp, #ELX_NINTR_REC_SIZE
799	/* Store x4..x18 */
800	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
801	/* Store lr and original sp_el0 */
802	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
803
804	bl	thread_check_canaries
805	adr	x16, thread_nintr_handler_ptr
806	ldr	x16, [x16]
807	blr	x16
808
809	/*
810	 * Restore registers
811	 */
812	/* Restore x4..x18 */
813	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
814	/* Load  lr and original sp_el0 */
815	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
816	/* Restore SP_El0 */
817	mov	sp, x2
818	/* Switch back to SP_EL1 */
819	msr	spsel, #1
820
821	/* Update core local flags */
822	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
823	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
824	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
825
826	/* Restore x0..x3 */
827	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
828
829	/* Return from exception */
830	eret
831.endm
832
833LOCAL_FUNC elx_irq , :
834#if defined(CFG_ARM_GICV3)
835	native_intr_handler	irq
836#else
837	foreign_intr_handler	irq
838#endif
839END_FUNC elx_irq
840
841LOCAL_FUNC elx_fiq , :
842#if defined(CFG_ARM_GICV3)
843	foreign_intr_handler	fiq
844#else
845	native_intr_handler	fiq
846#endif
847END_FUNC elx_fiq
848