xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision ed17deb1c66aa1f02391f86f0558fd244cd4277e)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm64.h>
7#include <arm64_macros.S>
8#include <asm-defines.h>
9#include <asm.S>
10#include <keep.h>
11#include <kernel/thread_defs.h>
12#include <mm/core_mmu.h>
13#include <sm/optee_smc.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16
17#include "thread_private.h"
18
19	.macro get_thread_ctx core_local, res, tmp0, tmp1
20		ldr	w\tmp0, [\core_local, \
21				#THREAD_CORE_LOCAL_CURR_THREAD]
22		adr	x\res, threads
23		mov	x\tmp1, #THREAD_CTX_SIZE
24		madd	x\res, x\tmp0, x\tmp1, x\res
25	.endm
26
27	.macro b_if_spsr_is_el0 reg, label
28		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
29		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
30		b.eq	\label
31	.endm
32
33LOCAL_FUNC vector_std_smc_entry , :
34	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
35	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
36	mov	x0, sp
37	bl	thread_handle_std_smc
38	/*
39	 * Normally thread_handle_std_smc() should return via
40	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
41	 * hasn't switched stack (error detected) it will do a normal "C"
42	 * return.
43	 */
44	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
45	add	sp, sp, #THREAD_SMC_ARGS_SIZE
46	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
47	smc	#0
48	b	.	/* SMC should not return */
49END_FUNC vector_std_smc_entry
50
51LOCAL_FUNC vector_fast_smc_entry , :
52	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
53	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
54	mov	x0, sp
55	bl	thread_handle_fast_smc
56	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
57	add	sp, sp, #THREAD_SMC_ARGS_SIZE
58	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
59	smc	#0
60	b	.	/* SMC should not return */
61END_FUNC vector_fast_smc_entry
62
63LOCAL_FUNC vector_fiq_entry , :
64	/* Secure Monitor received a FIQ and passed control to us. */
65	bl	thread_check_canaries
66	adr	x16, thread_nintr_handler_ptr
67	ldr	x16, [x16]
68	blr	x16
69	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
70	smc	#0
71	b	.	/* SMC should not return */
72END_FUNC vector_fiq_entry
73
74LOCAL_FUNC vector_cpu_on_entry , :
75	adr	x16, thread_cpu_on_handler_ptr
76	ldr	x16, [x16]
77	blr	x16
78	mov	x1, x0
79	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
80	smc	#0
81	b	.	/* SMC should not return */
82END_FUNC vector_cpu_on_entry
83
84LOCAL_FUNC vector_cpu_off_entry , :
85	adr	x16, thread_cpu_off_handler_ptr
86	ldr	x16, [x16]
87	blr	x16
88	mov	x1, x0
89	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
90	smc	#0
91	b	.	/* SMC should not return */
92END_FUNC vector_cpu_off_entry
93
94LOCAL_FUNC vector_cpu_suspend_entry , :
95	adr	x16, thread_cpu_suspend_handler_ptr
96	ldr	x16, [x16]
97	blr	x16
98	mov	x1, x0
99	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
100	smc	#0
101	b	.	/* SMC should not return */
102END_FUNC vector_cpu_suspend_entry
103
104LOCAL_FUNC vector_cpu_resume_entry , :
105	adr	x16, thread_cpu_resume_handler_ptr
106	ldr	x16, [x16]
107	blr	x16
108	mov	x1, x0
109	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
110	smc	#0
111	b	.	/* SMC should not return */
112END_FUNC vector_cpu_resume_entry
113
114LOCAL_FUNC vector_system_off_entry , :
115	adr	x16, thread_system_off_handler_ptr
116	ldr	x16, [x16]
117	blr	x16
118	mov	x1, x0
119	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
120	smc	#0
121	b	.	/* SMC should not return */
122END_FUNC vector_system_off_entry
123
124LOCAL_FUNC vector_system_reset_entry , :
125	adr	x16, thread_system_reset_handler_ptr
126	ldr	x16, [x16]
127	blr	x16
128	mov	x1, x0
129	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
130	smc	#0
131	b	.	/* SMC should not return */
132END_FUNC vector_system_reset_entry
133
134/*
135 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
136 * initialization.
137 *
138 * Note that ARM-TF depends on the layout of this vector table, any change
139 * in layout has to be synced with ARM-TF.
140 */
141FUNC thread_vector_table , :
142	b	vector_std_smc_entry
143	b	vector_fast_smc_entry
144	b	vector_cpu_on_entry
145	b	vector_cpu_off_entry
146	b	vector_cpu_resume_entry
147	b	vector_cpu_suspend_entry
148	b	vector_fiq_entry
149	b	vector_system_off_entry
150	b	vector_system_reset_entry
151END_FUNC thread_vector_table
152KEEP_PAGER thread_vector_table
153
154
155/* void thread_resume(struct thread_ctx_regs *regs) */
156FUNC thread_resume , :
157	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
158	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
159	mov	sp, x1
160	msr	elr_el1, x2
161	msr	spsr_el1, x3
162
163	b_if_spsr_is_el0 w3, 1f
164
165	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
166	ldr	x0, [x0, THREAD_CTX_REGS_X0]
167	eret
168
1691:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
170	ldr	x0, [x0, THREAD_CTX_REGS_X0]
171
172	msr	spsel, #1
173	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
174	b	eret_to_el0
175END_FUNC thread_resume
176
177FUNC thread_std_smc_entry , :
178	/* pass x0-x7 in a struct thread_smc_args */
179	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
180	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
181	mov	x0, sp
182
183	/* Call the registered handler */
184	bl	__thread_std_smc_entry
185
186	/*
187	 * Load the returned x0-x3 into preserved registers and skip the
188	 * "returned" x4-x7 since they will not be returned to normal
189	 * world.
190	 */
191	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
192	add	sp, sp, #THREAD_SMC_ARGS_SIZE
193
194	/* Mask all maskable exceptions before switching to temporary stack */
195	msr	daifset, #DAIFBIT_ALL
196	bl	thread_get_tmp_sp
197	mov	sp, x0
198
199	bl	thread_state_free
200
201	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
202	mov	x1, x20
203	mov	x2, x21
204	mov	x3, x22
205	mov	x4, x23
206	smc	#0
207	b	.	/* SMC should not return */
208END_FUNC thread_std_smc_entry
209
210/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
211FUNC thread_rpc , :
212	/* Read daif and create an SPSR */
213	mrs	x1, daif
214	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
215
216	/* Mask all maskable exceptions before switching to temporary stack */
217	msr	daifset, #DAIFBIT_ALL
218	push	x0, xzr
219	push	x1, x30
220	bl	thread_get_ctx_regs
221	ldr	x30, [sp, #8]
222	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
223	mov	x19, x0
224
225	bl	thread_get_tmp_sp
226	pop	x1, xzr		/* Match "push x1, x30" above */
227	mov	x2, sp
228	str	x2, [x19, #THREAD_CTX_REGS_SP]
229	ldr	x20, [sp]	/* Get pointer to rv[] */
230	mov	sp, x0		/* Switch to tmp stack */
231
232	adr	x2, .thread_rpc_return
233	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
234	bl	thread_state_suspend
235	mov	x4, x0		/* Supply thread index */
236	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
237	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
238	smc	#0
239	b	.		/* SMC should not return */
240
241.thread_rpc_return:
242	/*
243	 * At this point has the stack pointer been restored to the value
244	 * stored in THREAD_CTX above.
245	 *
246	 * Jumps here from thread_resume above when RPC has returned. The
247	 * IRQ and FIQ bits are restored to what they where when this
248	 * function was originally entered.
249	 */
250	pop	x16, xzr	/* Get pointer to rv[] */
251	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
252	ret
253END_FUNC thread_rpc
254KEEP_PAGER thread_rpc
255
256FUNC thread_init_vbar , :
257	adr	x0, thread_vect_table
258	msr	vbar_el1, x0
259	ret
260END_FUNC thread_init_vbar
261KEEP_PAGER thread_init_vbar
262
263/*
264 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
265 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
266 *               unsigned long user_func, unsigned long spsr,
267 *               uint32_t *exit_status0, uint32_t *exit_status1)
268 *
269 */
270FUNC __thread_enter_user_mode , :
271	ldr	x8, [sp]
272	/*
273	 * Create the and fill in the struct thread_user_mode_rec
274	 */
275	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
276	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
277	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
278
279	/*
280	 * Switch to SP_EL1
281	 * Disable exceptions
282	 * Save kern sp in x19
283	 */
284	msr	daifset, #DAIFBIT_ALL
285	mov	x19, sp
286	msr	spsel, #1
287
288	/*
289	 * Save the kernel stack pointer in the thread context
290	 */
291	/* get pointer to current thread context */
292	get_thread_ctx sp, 21, 20, 22
293	/*
294	 * Save kernel stack pointer to ensure that el0_svc() uses
295	 * correct stack pointer
296	 */
297	str	x19, [x21, #THREAD_CTX_KERN_SP]
298
299	/*
300	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
301	 */
302	msr	spsr_el1, x6
303	/* Set user sp */
304	mov	x13, x4		/* Used when running TA in Aarch32 */
305	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
306	/* Set user function */
307	msr	elr_el1, x5
308	/* Set frame pointer (user stack can't be unwound past this point) */
309	mov x29, #0
310
311	/* Jump into user mode */
312	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
313	b eret_to_el0
314END_FUNC __thread_enter_user_mode
315
316/*
317 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
318 * 		uint32_t exit_status1);
319 * See description in thread.h
320 */
321FUNC thread_unwind_user_mode , :
322	/* Store the exit status */
323	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
324	str	w1, [x3]
325	str	w2, [x4]
326	/* Restore x19..x30 */
327	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
328	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
329	/* Return from the call of thread_enter_user_mode() */
330	ret
331END_FUNC thread_unwind_user_mode
332
333	/*
334	 * This macro verifies that the a given vector doesn't exceed the
335	 * architectural limit of 32 instructions. This is meant to be placed
336	 * immedately after the last instruction in the vector. It takes the
337	 * vector entry as the parameter
338	 */
339	.macro check_vector_size since
340	  .if (. - \since) > (32 * 4)
341	    .error "Vector exceeds 32 instructions"
342	  .endif
343	.endm
344
345	.macro restore_mapping
346#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
347		/* Temporarily save x0 */
348		msr	tpidr_el1, x0
349
350		/* Update the mapping to use the full kernel mapping */
351		mrs	x0, ttbr0_el1
352		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
353		/* switch to kernel mode ASID */
354		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
355		msr	ttbr0_el1, x0
356		isb
357
358		/* Jump into the full mapping and continue execution */
359		ldr	x0, =1f
360		br	x0
361	1:
362
363		/* Point to the vector into the full mapping */
364		adr	x0, thread_vect_table
365		msr	vbar_el1, x0
366		isb
367
368		/* Restore x1 */
369		mrs	x0, tpidr_el1
370		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
371#else
372		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
373		mrs	x0, ttbr0_el1
374		/* switch to kernel mode ASID */
375		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
376		msr	ttbr0_el1, x0
377		isb
378#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
379	.endm
380
381#define INV_INSN	0
382	.section .text.thread_vect_table
383	.align	11, INV_INSN
384FUNC thread_vect_table , :
385	/* -----------------------------------------------------
386	 * EL1 with SP0 : 0x0 - 0x180
387	 * -----------------------------------------------------
388	 */
389	.align	7, INV_INSN
390el1_sync_sp0:
391	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
392	b	el1_sync_abort
393	check_vector_size el1_sync_sp0
394
395	.align	7, INV_INSN
396el1_irq_sp0:
397	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
398	b	elx_irq
399	check_vector_size el1_irq_sp0
400
401	.align	7, INV_INSN
402el1_fiq_sp0:
403	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
404	b	elx_fiq
405	check_vector_size el1_fiq_sp0
406
407	.align	7, INV_INSN
408el1_serror_sp0:
409	b	el1_serror_sp0
410	check_vector_size el1_serror_sp0
411
412	/* -----------------------------------------------------
413	 * Current EL with SP1: 0x200 - 0x380
414	 * -----------------------------------------------------
415	 */
416	.align	7, INV_INSN
417el1_sync_sp1:
418	b	el1_sync_sp1
419	check_vector_size el1_sync_sp1
420
421	.align	7, INV_INSN
422el1_irq_sp1:
423	b	el1_irq_sp1
424	check_vector_size el1_irq_sp1
425
426	.align	7, INV_INSN
427el1_fiq_sp1:
428	b	el1_fiq_sp1
429	check_vector_size el1_fiq_sp1
430
431	.align	7, INV_INSN
432el1_serror_sp1:
433	b	el1_serror_sp1
434	check_vector_size el1_serror_sp1
435
436	/* -----------------------------------------------------
437	 * Lower EL using AArch64 : 0x400 - 0x580
438	 * -----------------------------------------------------
439	 */
440	.align	7, INV_INSN
441el0_sync_a64:
442	restore_mapping
443
444	mrs	x2, esr_el1
445	mrs	x3, sp_el0
446	lsr	x2, x2, #ESR_EC_SHIFT
447	cmp	x2, #ESR_EC_AARCH64_SVC
448	b.eq	el0_svc
449	b	el0_sync_abort
450	check_vector_size el0_sync_a64
451
452	.align	7, INV_INSN
453el0_irq_a64:
454	restore_mapping
455
456	b	elx_irq
457	check_vector_size el0_irq_a64
458
459	.align	7, INV_INSN
460el0_fiq_a64:
461	restore_mapping
462
463	b	elx_fiq
464	check_vector_size el0_fiq_a64
465
466	.align	7, INV_INSN
467el0_serror_a64:
468	b   	el0_serror_a64
469	check_vector_size el0_serror_a64
470
471	/* -----------------------------------------------------
472	 * Lower EL using AArch32 : 0x0 - 0x180
473	 * -----------------------------------------------------
474	 */
475	.align	7, INV_INSN
476el0_sync_a32:
477	restore_mapping
478
479	mrs	x2, esr_el1
480	mrs	x3, sp_el0
481	lsr	x2, x2, #ESR_EC_SHIFT
482	cmp	x2, #ESR_EC_AARCH32_SVC
483	b.eq	el0_svc
484	b	el0_sync_abort
485	check_vector_size el0_sync_a32
486
487	.align	7, INV_INSN
488el0_irq_a32:
489	restore_mapping
490
491	b	elx_irq
492	check_vector_size el0_irq_a32
493
494	.align	7, INV_INSN
495el0_fiq_a32:
496	restore_mapping
497
498	b	elx_fiq
499	check_vector_size el0_fiq_a32
500
501	.align	7, INV_INSN
502el0_serror_a32:
503	b	el0_serror_a32
504	check_vector_size el0_serror_a32
505
506/*
507 * We're keeping this code in the same section as the vector to make sure
508 * that it's always available.
509 */
510eret_to_el0:
511
512#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
513	/* Point to the vector into the reduced mapping */
514	adr	x0, thread_user_kcode_offset
515	ldr	x0, [x0]
516	adr	x1, thread_vect_table
517	sub	x1, x1, x0
518	msr	vbar_el1, x1
519	isb
520
521	/* Jump into the reduced mapping and continue execution */
522	ldr	x1, =1f
523	sub	x1, x1, x0
524	br	x1
5251:
526
527	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
528	msr	tpidr_el1, x0
529
530	/* Update the mapping to exclude the full kernel mapping */
531	mrs	x0, ttbr0_el1
532	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
533	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
534	msr	ttbr0_el1, x0
535	isb
536
537	mrs	x0, tpidr_el1
538#else
539	mrs	x0, ttbr0_el1
540	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
541	msr	ttbr0_el1, x0
542	isb
543	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
544#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
545
546	eret
547
548END_FUNC thread_vect_table
549
550LOCAL_FUNC el0_svc , :
551	/* get pointer to current thread context in x0 */
552	get_thread_ctx sp, 0, 1, 2
553	/* load saved kernel sp */
554	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
555	/* Keep pointer to initial recod in x1 */
556	mov	x1, sp
557	/* Switch to SP_EL0 and restore kernel sp */
558	msr	spsel, #0
559	mov	x2, sp	/* Save SP_EL0 */
560	mov	sp, x0
561
562	/* Make room for struct thread_svc_regs */
563	sub	sp, sp, #THREAD_SVC_REG_SIZE
564	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
565
566	/* Restore x0-x3 */
567	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
568	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
569
570	/* Prepare the argument for the handler */
571	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
572	mrs	x0, elr_el1
573	mrs	x1, spsr_el1
574	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
575	mov	x0, sp
576
577	/*
578	 * Unmask native interrupts, Serror, and debug exceptions since we have
579	 * nothing left in sp_el1. Note that the SVC handler is excepted to
580	 * re-enable foreign interrupts by itself.
581	 */
582#if defined(CFG_ARM_GICV3)
583	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
584#else
585	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
586#endif
587
588	/* Call the handler */
589	bl	tee_svc_handler
590
591	/* Mask all maskable exceptions since we're switching back to sp_el1 */
592	msr	daifset, #DAIFBIT_ALL
593
594	/*
595	 * Save kernel sp we'll had at the beginning of this function.
596	 * This is when this TA has called another TA because
597	 * __thread_enter_user_mode() also saves the stack pointer in this
598	 * field.
599	 */
600	msr	spsel, #1
601	get_thread_ctx sp, 0, 1, 2
602	msr	spsel, #0
603	add	x1, sp, #THREAD_SVC_REG_SIZE
604	str	x1, [x0, #THREAD_CTX_KERN_SP]
605
606	/* Restore registers to the required state and return*/
607	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
608	msr	elr_el1, x0
609	msr	spsr_el1, x1
610	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
611	mov	x30, sp
612	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
613	mov	sp, x0
614	b_if_spsr_is_el0 w1, 1f
615	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
616	ldr	x30, [x30, #THREAD_SVC_REG_X30]
617
618	eret
619
6201:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
621	ldr	x30, [x30, #THREAD_SVC_REG_X30]
622
623	msr	spsel, #1
624	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
625	b	eret_to_el0
626END_FUNC el0_svc
627
628LOCAL_FUNC el1_sync_abort , :
629	mov	x0, sp
630	msr	spsel, #0
631	mov	x3, sp		/* Save original sp */
632
633	/*
634	 * Update core local flags.
635	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
636	 */
637	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
638	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
639	orr	w1, w1, #THREAD_CLF_ABORT
640	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
641			.Lsel_tmp_sp
642
643	/* Select abort stack */
644	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
645	b	.Lset_sp
646
647.Lsel_tmp_sp:
648	/* Select tmp stack */
649	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
650	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
651
652.Lset_sp:
653	mov	sp, x2
654	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
655
656	/*
657	 * Save state on stack
658	 */
659	sub	sp, sp, #THREAD_ABT_REGS_SIZE
660	mrs	x2, spsr_el1
661	/* Store spsr, sp_el0 */
662	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
663	/* Store original x0, x1 */
664	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
665	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
666	/* Store original x2, x3 and x4 to x29 */
667	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
668	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
669	/* Store x30, elr_el1 */
670	mrs	x0, elr_el1
671	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
672
673	/*
674	 * Call handler
675	 */
676	mov	x0, #0
677	mov	x1, sp
678	bl	abort_handler
679
680	/*
681	 * Restore state from stack
682	 */
683	/* Load x30, elr_el1 */
684	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
685	msr	elr_el1, x0
686	/* Load x0 to x29 */
687	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
688	/* Switch to SP_EL1 */
689	msr	spsel, #1
690	/* Save x0 to x3 in CORE_LOCAL */
691	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
692	/* Restore spsr_el1 and sp_el0 */
693	mrs	x3, sp_el0
694	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
695	msr	spsr_el1, x0
696	msr	sp_el0, x1
697
698	/* Update core local flags */
699	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
700	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
701	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
702
703	/* Restore x0 to x3 */
704	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
705
706	/* Return from exception */
707	eret
708END_FUNC el1_sync_abort
709
710	/* sp_el0 in x3 */
711LOCAL_FUNC el0_sync_abort , :
712	/*
713	 * Update core local flags
714	 */
715	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
716	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
717	orr	w1, w1, #THREAD_CLF_ABORT
718	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
719
720	/*
721	 * Save state on stack
722	 */
723
724	/* load abt_stack_va_end */
725	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
726	/* Keep pointer to initial record in x0 */
727	mov	x0, sp
728	/* Switch to SP_EL0 */
729	msr	spsel, #0
730	mov	sp, x1
731	sub	sp, sp, #THREAD_ABT_REGS_SIZE
732	mrs	x2, spsr_el1
733	/* Store spsr, sp_el0 */
734	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
735	/* Store original x0, x1 */
736	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
737	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
738	/* Store original x2, x3 and x4 to x29 */
739	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
740	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
741	/* Store x30, elr_el1 */
742	mrs	x0, elr_el1
743	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
744
745	/*
746	 * Call handler
747	 */
748	mov	x0, #0
749	mov	x1, sp
750	bl	abort_handler
751
752	/*
753	 * Restore state from stack
754	 */
755
756	/* Load x30, elr_el1 */
757	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
758	msr	elr_el1, x0
759	/* Load x0 to x29 */
760	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
761	/* Switch to SP_EL1 */
762	msr	spsel, #1
763	/* Save x0 to x3 in EL1_REC */
764	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
765	/* Restore spsr_el1 and sp_el0 */
766	mrs	x3, sp_el0
767	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
768	msr	spsr_el1, x0
769	msr	sp_el0, x1
770
771	/* Update core local flags */
772	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
773	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
774	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
775
776	/* Restore x2 to x3 */
777	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
778
779	b_if_spsr_is_el0 w0, 1f
780
781	/* Restore x0 to x1 */
782	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
783
784	/* Return from exception */
785	eret
7861:	b	eret_to_el0
787END_FUNC el0_sync_abort
788
789/* The handler of foreign interrupt. */
790.macro foreign_intr_handler mode:req
791	/*
792	 * Update core local flags
793	 */
794	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
795	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
796	orr	w1, w1, #THREAD_CLF_TMP
797	.ifc	\mode\(),fiq
798	orr	w1, w1, #THREAD_CLF_FIQ
799	.else
800	orr	w1, w1, #THREAD_CLF_IRQ
801	.endif
802	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
803
804	/* get pointer to current thread context in x0 */
805	get_thread_ctx sp, 0, 1, 2
806	/* Keep original SP_EL0 */
807	mrs	x2, sp_el0
808
809	/* Store original sp_el0 */
810	str	x2, [x0, #THREAD_CTX_REGS_SP]
811	/* store x4..x30 */
812	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
813	/* Load original x0..x3 into x10..x13 */
814	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
815	/* Save original x0..x3 */
816	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
817
818	/* load tmp_stack_va_end */
819	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
820	/* Switch to SP_EL0 */
821	msr	spsel, #0
822	mov	sp, x1
823
824	/*
825	 * Mark current thread as suspended
826	 */
827	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
828	mrs	x1, spsr_el1
829	mrs	x2, elr_el1
830	bl	thread_state_suspend
831	mov	w4, w0		/* Supply thread index */
832
833	/* Update core local flags */
834	/* Switch to SP_EL1 */
835	msr	spsel, #1
836	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
837	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
838	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
839	msr	spsel, #0
840
841	/*
842	 * Note that we're exiting with SP_EL0 selected since the entry
843	 * functions expects to have SP_EL0 selected with the tmp stack
844	 * set.
845	 */
846
847	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
848	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
849	mov	w2, #0
850	mov	w3, #0
851	/* w4 is already filled in above */
852	smc	#0
853	b	.	/* SMC should not return */
854.endm
855
856/*
857 * This struct is never used from C it's only here to visualize the
858 * layout.
859 *
860 * struct elx_nintr_rec {
861 * 	uint64_t x[19 - 4]; x4..x18
862 * 	uint64_t lr;
863 * 	uint64_t sp_el0;
864 * };
865 */
866#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
867#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
868#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
869#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
870
871/* The handler of native interrupt. */
872.macro native_intr_handler mode:req
873	/*
874	 * Update core local flags
875	 */
876	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
877	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
878	.ifc	\mode\(),fiq
879	orr	w1, w1, #THREAD_CLF_FIQ
880	.else
881	orr	w1, w1, #THREAD_CLF_IRQ
882	.endif
883	orr	w1, w1, #THREAD_CLF_TMP
884	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
885
886	/* load tmp_stack_va_end */
887	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
888	/* Keep original SP_EL0 */
889	mrs	x2, sp_el0
890	/* Switch to SP_EL0 */
891	msr	spsel, #0
892	mov	sp, x1
893
894	/*
895	 * Save registers on stack that can be corrupted by a call to
896	 * a C function
897	 */
898	/* Make room for struct elx_nintr_rec */
899	sub	sp, sp, #ELX_NINTR_REC_SIZE
900	/* Store x4..x18 */
901	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
902	/* Store lr and original sp_el0 */
903	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
904
905	bl	thread_check_canaries
906	adr	x16, thread_nintr_handler_ptr
907	ldr	x16, [x16]
908	blr	x16
909
910	/*
911	 * Restore registers
912	 */
913	/* Restore x4..x18 */
914	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
915	/* Load  lr and original sp_el0 */
916	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
917	/* Restore SP_El0 */
918	mov	sp, x2
919	/* Switch back to SP_EL1 */
920	msr	spsel, #1
921
922	/* Update core local flags */
923	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
924	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
925	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
926
927	mrs	x0, spsr_el1
928	/* Restore x2..x3 */
929	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
930	b_if_spsr_is_el0 w0, 1f
931
932	/* Restore x0..x1 */
933	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
934
935	/* Return from exception */
936	eret
9371:	b	eret_to_el0
938.endm
939
940LOCAL_FUNC elx_irq , :
941#if defined(CFG_ARM_GICV3)
942	native_intr_handler	irq
943#else
944	foreign_intr_handler	irq
945#endif
946END_FUNC elx_irq
947
948LOCAL_FUNC elx_fiq , :
949#if defined(CFG_ARM_GICV3)
950	foreign_intr_handler	fiq
951#else
952	native_intr_handler	fiq
953#endif
954END_FUNC elx_fiq
955