xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision fb7ef469dfeb735e60383ad0e7410fe62dd97eb1)
1/*
2 * Copyright (c) 2015-2017, Linaro Limited
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <arm64.h>
29#include <arm64_macros.S>
30#include <asm-defines.h>
31#include <asm.S>
32#include <keep.h>
33#include <kernel/thread_defs.h>
34#include <mm/core_mmu.h>
35#include <sm/optee_smc.h>
36#include <sm/teesmc_opteed.h>
37#include <sm/teesmc_opteed_macros.h>
38
39#include "thread_private.h"
40
41	.macro get_thread_ctx core_local, res, tmp0, tmp1
42		ldr	w\tmp0, [\core_local, \
43				#THREAD_CORE_LOCAL_CURR_THREAD]
44		adr	x\res, threads
45		mov	x\tmp1, #THREAD_CTX_SIZE
46		madd	x\res, x\tmp0, x\tmp1, x\res
47	.endm
48
49	.macro b_if_spsr_is_el0 reg, label
50		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
51		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
52		b.eq	\label
53	.endm
54
55LOCAL_FUNC vector_std_smc_entry , :
56	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
57	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
58	mov	x0, sp
59	bl	thread_handle_std_smc
60	/*
61	 * Normally thread_handle_std_smc() should return via
62	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
63	 * hasn't switched stack (error detected) it will do a normal "C"
64	 * return.
65	 */
66	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
67	add	sp, sp, #THREAD_SMC_ARGS_SIZE
68	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
69	smc	#0
70	b	.	/* SMC should not return */
71END_FUNC vector_std_smc_entry
72
73LOCAL_FUNC vector_fast_smc_entry , :
74	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
75	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
76	mov	x0, sp
77	bl	thread_handle_fast_smc
78	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
79	add	sp, sp, #THREAD_SMC_ARGS_SIZE
80	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
81	smc	#0
82	b	.	/* SMC should not return */
83END_FUNC vector_fast_smc_entry
84
85LOCAL_FUNC vector_fiq_entry , :
86	/* Secure Monitor received a FIQ and passed control to us. */
87	bl	thread_check_canaries
88	adr	x16, thread_nintr_handler_ptr
89	ldr	x16, [x16]
90	blr	x16
91	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
92	smc	#0
93	b	.	/* SMC should not return */
94END_FUNC vector_fiq_entry
95
96LOCAL_FUNC vector_cpu_on_entry , :
97	adr	x16, thread_cpu_on_handler_ptr
98	ldr	x16, [x16]
99	blr	x16
100	mov	x1, x0
101	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
102	smc	#0
103	b	.	/* SMC should not return */
104END_FUNC vector_cpu_on_entry
105
106LOCAL_FUNC vector_cpu_off_entry , :
107	adr	x16, thread_cpu_off_handler_ptr
108	ldr	x16, [x16]
109	blr	x16
110	mov	x1, x0
111	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
112	smc	#0
113	b	.	/* SMC should not return */
114END_FUNC vector_cpu_off_entry
115
116LOCAL_FUNC vector_cpu_suspend_entry , :
117	adr	x16, thread_cpu_suspend_handler_ptr
118	ldr	x16, [x16]
119	blr	x16
120	mov	x1, x0
121	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
122	smc	#0
123	b	.	/* SMC should not return */
124END_FUNC vector_cpu_suspend_entry
125
126LOCAL_FUNC vector_cpu_resume_entry , :
127	adr	x16, thread_cpu_resume_handler_ptr
128	ldr	x16, [x16]
129	blr	x16
130	mov	x1, x0
131	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
132	smc	#0
133	b	.	/* SMC should not return */
134END_FUNC vector_cpu_resume_entry
135
136LOCAL_FUNC vector_system_off_entry , :
137	adr	x16, thread_system_off_handler_ptr
138	ldr	x16, [x16]
139	blr	x16
140	mov	x1, x0
141	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
142	smc	#0
143	b	.	/* SMC should not return */
144END_FUNC vector_system_off_entry
145
146LOCAL_FUNC vector_system_reset_entry , :
147	adr	x16, thread_system_reset_handler_ptr
148	ldr	x16, [x16]
149	blr	x16
150	mov	x1, x0
151	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
152	smc	#0
153	b	.	/* SMC should not return */
154END_FUNC vector_system_reset_entry
155
156/*
157 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
158 * initialization.
159 *
160 * Note that ARM-TF depends on the layout of this vector table, any change
161 * in layout has to be synced with ARM-TF.
162 */
163FUNC thread_vector_table , :
164	b	vector_std_smc_entry
165	b	vector_fast_smc_entry
166	b	vector_cpu_on_entry
167	b	vector_cpu_off_entry
168	b	vector_cpu_resume_entry
169	b	vector_cpu_suspend_entry
170	b	vector_fiq_entry
171	b	vector_system_off_entry
172	b	vector_system_reset_entry
173END_FUNC thread_vector_table
174KEEP_PAGER thread_vector_table
175
176
177/* void thread_resume(struct thread_ctx_regs *regs) */
178FUNC thread_resume , :
179	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
180	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
181	mov	sp, x1
182	msr	elr_el1, x2
183	msr	spsr_el1, x3
184
185	b_if_spsr_is_el0 w3, 1f
186
187	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
188	ldr	x0, [x0, THREAD_CTX_REGS_X0]
189	eret
190
1911:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
192	ldr	x0, [x0, THREAD_CTX_REGS_X0]
193
194	msr	spsel, #1
195	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
196	b	eret_to_el0
197END_FUNC thread_resume
198
199FUNC thread_std_smc_entry , :
200	/* pass x0-x7 in a struct thread_smc_args */
201	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
202	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
203	mov	x0, sp
204
205	/* Call the registered handler */
206	bl	__thread_std_smc_entry
207
208	/*
209	 * Load the returned x0-x3 into preserved registers and skip the
210	 * "returned" x4-x7 since they will not be returned to normal
211	 * world.
212	 */
213	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
214	add	sp, sp, #THREAD_SMC_ARGS_SIZE
215
216	/* Mask all maskable exceptions before switching to temporary stack */
217	msr	daifset, #DAIFBIT_ALL
218	bl	thread_get_tmp_sp
219	mov	sp, x0
220
221	bl	thread_state_free
222
223	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
224	mov	x1, x20
225	mov	x2, x21
226	mov	x3, x22
227	mov	x4, x23
228	smc	#0
229	b	.	/* SMC should not return */
230END_FUNC thread_std_smc_entry
231
232/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
233FUNC thread_rpc , :
234	/* Read daif and create an SPSR */
235	mrs	x1, daif
236	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
237
238	/* Mask all maskable exceptions before switching to temporary stack */
239	msr	daifset, #DAIFBIT_ALL
240	push	x0, xzr
241	push	x1, x30
242	bl	thread_get_ctx_regs
243	ldr	x30, [sp, #8]
244	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
245	mov	x19, x0
246
247	bl	thread_get_tmp_sp
248	pop	x1, xzr		/* Match "push x1, x30" above */
249	mov	x2, sp
250	str	x2, [x19, #THREAD_CTX_REGS_SP]
251	ldr	x20, [sp]	/* Get pointer to rv[] */
252	mov	sp, x0		/* Switch to tmp stack */
253
254	adr	x2, .thread_rpc_return
255	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
256	bl	thread_state_suspend
257	mov	x4, x0		/* Supply thread index */
258	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
259	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
260	smc	#0
261	b	.		/* SMC should not return */
262
263.thread_rpc_return:
264	/*
265	 * At this point has the stack pointer been restored to the value
266	 * stored in THREAD_CTX above.
267	 *
268	 * Jumps here from thread_resume above when RPC has returned. The
269	 * IRQ and FIQ bits are restored to what they where when this
270	 * function was originally entered.
271	 */
272	pop	x16, xzr	/* Get pointer to rv[] */
273	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
274	ret
275END_FUNC thread_rpc
276KEEP_PAGER thread_rpc
277
278FUNC thread_init_vbar , :
279	adr	x0, thread_vect_table
280	msr	vbar_el1, x0
281	ret
282END_FUNC thread_init_vbar
283KEEP_PAGER thread_init_vbar
284
285/*
286 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
287 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
288 *               unsigned long user_func, unsigned long spsr,
289 *               uint32_t *exit_status0, uint32_t *exit_status1)
290 *
291 */
292FUNC __thread_enter_user_mode , :
293	ldr	x8, [sp]
294	/*
295	 * Create the and fill in the struct thread_user_mode_rec
296	 */
297	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
298	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
299	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
300
301	/*
302	 * Switch to SP_EL1
303	 * Disable exceptions
304	 * Save kern sp in x19
305	 */
306	msr	daifset, #DAIFBIT_ALL
307	mov	x19, sp
308	msr	spsel, #1
309
310	/*
311	 * Save the kernel stack pointer in the thread context
312	 */
313	/* get pointer to current thread context */
314	get_thread_ctx sp, 21, 20, 22
315	/*
316	 * Save kernel stack pointer to ensure that el0_svc() uses
317	 * correct stack pointer
318	 */
319	str	x19, [x21, #THREAD_CTX_KERN_SP]
320
321	/*
322	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
323	 */
324	msr	spsr_el1, x6
325	/* Set user sp */
326	mov	x13, x4		/* Used when running TA in Aarch32 */
327	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
328	/* Set user function */
329	msr	elr_el1, x5
330	/* Set frame pointer (user stack can't be unwound past this point) */
331	mov x29, #0
332
333	/* Jump into user mode */
334	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
335	b eret_to_el0
336END_FUNC __thread_enter_user_mode
337
338/*
339 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
340 * 		uint32_t exit_status1);
341 * See description in thread.h
342 */
343FUNC thread_unwind_user_mode , :
344	/* Store the exit status */
345	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
346	str	w1, [x3]
347	str	w2, [x4]
348	/* Restore x19..x30 */
349	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
350	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
351	/* Return from the call of thread_enter_user_mode() */
352	ret
353END_FUNC thread_unwind_user_mode
354
355	/*
356	 * This macro verifies that the a given vector doesn't exceed the
357	 * architectural limit of 32 instructions. This is meant to be placed
358	 * immedately after the last instruction in the vector. It takes the
359	 * vector entry as the parameter
360	 */
361	.macro check_vector_size since
362	  .if (. - \since) > (32 * 4)
363	    .error "Vector exceeds 32 instructions"
364	  .endif
365	.endm
366
367	.macro restore_mapping
368#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
369		/* Temporarily save x0 */
370		msr	tpidr_el1, x0
371
372		/* Update the mapping to use the full kernel mapping */
373		mrs	x0, ttbr0_el1
374		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
375		/* switch to kernel mode ASID */
376		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
377		msr	ttbr0_el1, x0
378		isb
379
380		/* Jump into the full mapping and continue execution */
381		ldr	x0, =1f
382		br	x0
383	1:
384
385		/* Point to the vector into the full mapping */
386		adr	x0, thread_vect_table
387		msr	vbar_el1, x0
388		isb
389
390		/* Restore x1 */
391		mrs	x0, tpidr_el1
392		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
393#else
394		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
395		mrs	x0, ttbr0_el1
396		/* switch to kernel mode ASID */
397		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
398		msr	ttbr0_el1, x0
399		isb
400#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
401	.endm
402
403	.section .text.thread_vect_table
404	.align	11
405FUNC thread_vect_table , :
406	/* -----------------------------------------------------
407	 * EL1 with SP0 : 0x0 - 0x180
408	 * -----------------------------------------------------
409	 */
410	.align	7
411sync_el1_sp0:
412	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
413	b	el1_sync_abort
414	check_vector_size sync_el1_sp0
415
416	.align	7
417irq_el1_sp0:
418	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
419	b	elx_irq
420	check_vector_size irq_el1_sp0
421
422	.align	7
423fiq_el1_sp0:
424	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
425	b	elx_fiq
426	check_vector_size fiq_el1_sp0
427
428	.align	7
429SErrorSP0:
430	b	SErrorSP0
431	check_vector_size SErrorSP0
432
433	/* -----------------------------------------------------
434	 * Current EL with SPx: 0x200 - 0x380
435	 * -----------------------------------------------------
436	 */
437	.align	7
438SynchronousExceptionSPx:
439	b	SynchronousExceptionSPx
440	check_vector_size SynchronousExceptionSPx
441
442	.align	7
443IrqSPx:
444	b	IrqSPx
445	check_vector_size IrqSPx
446
447	.align	7
448FiqSPx:
449	b	FiqSPx
450	check_vector_size FiqSPx
451
452	.align	7
453SErrorSPx:
454	b	SErrorSPx
455	check_vector_size SErrorSPx
456
457	/* -----------------------------------------------------
458	 * Lower EL using AArch64 : 0x400 - 0x580
459	 * -----------------------------------------------------
460	 */
461	.align	7
462el0_sync_a64:
463	restore_mapping
464
465	mrs	x2, esr_el1
466	mrs	x3, sp_el0
467	lsr	x2, x2, #ESR_EC_SHIFT
468	cmp	x2, #ESR_EC_AARCH64_SVC
469	b.eq	el0_svc
470	b	el0_sync_abort
471	check_vector_size el0_sync_a64
472
473	.align	7
474el0_irq_a64:
475	restore_mapping
476
477	b	elx_irq
478	check_vector_size el0_irq_a64
479
480	.align	7
481el0_fiq_a64:
482	restore_mapping
483
484	b	elx_fiq
485	check_vector_size el0_fiq_a64
486
487	.align	7
488SErrorA64:
489	b   	SErrorA64
490	check_vector_size SErrorA64
491
492	/* -----------------------------------------------------
493	 * Lower EL using AArch32 : 0x0 - 0x180
494	 * -----------------------------------------------------
495	 */
496	.align	7
497el0_sync_a32:
498	restore_mapping
499
500	mrs	x2, esr_el1
501	mrs	x3, sp_el0
502	lsr	x2, x2, #ESR_EC_SHIFT
503	cmp	x2, #ESR_EC_AARCH32_SVC
504	b.eq	el0_svc
505	b	el0_sync_abort
506	check_vector_size el0_sync_a32
507
508	.align	7
509el0_irq_a32:
510	restore_mapping
511
512	b	elx_irq
513	check_vector_size el0_irq_a32
514
515	.align	7
516el0_fiq_a32:
517	restore_mapping
518
519	b	elx_fiq
520	check_vector_size el0_fiq_a32
521
522	.align	7
523SErrorA32:
524	b	SErrorA32
525	check_vector_size SErrorA32
526
527/*
528 * We're keeping this code in the same section as the vector to make sure
529 * that it's always available.
530 */
531eret_to_el0:
532
533#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
534	/* Point to the vector into the reduced mapping */
535	adr	x0, thread_user_kcode_offset
536	ldr	x0, [x0]
537	adr	x1, thread_vect_table
538	sub	x1, x1, x0
539	msr	vbar_el1, x1
540	isb
541
542	/* Jump into the reduced mapping and continue execution */
543	ldr	x1, =1f
544	sub	x1, x1, x0
545	br	x1
5461:
547
548	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
549	msr	tpidr_el1, x0
550
551	/* Update the mapping to exclude the full kernel mapping */
552	mrs	x0, ttbr0_el1
553	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
554	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
555	msr	ttbr0_el1, x0
556	isb
557
558	mrs	x0, tpidr_el1
559#else
560	mrs	x0, ttbr0_el1
561	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
562	msr	ttbr0_el1, x0
563	isb
564	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
565#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
566
567	eret
568
569END_FUNC thread_vect_table
570
571LOCAL_FUNC el0_svc , :
572	/* get pointer to current thread context in x0 */
573	get_thread_ctx sp, 0, 1, 2
574	/* load saved kernel sp */
575	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
576	/* Keep pointer to initial recod in x1 */
577	mov	x1, sp
578	/* Switch to SP_EL0 and restore kernel sp */
579	msr	spsel, #0
580	mov	x2, sp	/* Save SP_EL0 */
581	mov	sp, x0
582
583	/* Make room for struct thread_svc_regs */
584	sub	sp, sp, #THREAD_SVC_REG_SIZE
585	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
586
587	/* Restore x0-x3 */
588	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
589	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
590
591	/* Prepare the argument for the handler */
592	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
593	mrs	x0, elr_el1
594	mrs	x1, spsr_el1
595	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
596	mov	x0, sp
597
598	/*
599	 * Unmask native interrupts, Serror, and debug exceptions since we have
600	 * nothing left in sp_el1. Note that the SVC handler is excepted to
601	 * re-enable foreign interrupts by itself.
602	 */
603#if defined(CFG_ARM_GICV3)
604	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
605#else
606	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
607#endif
608
609	/* Call the handler */
610	bl	tee_svc_handler
611
612	/* Mask all maskable exceptions since we're switching back to sp_el1 */
613	msr	daifset, #DAIFBIT_ALL
614
615	/*
616	 * Save kernel sp we'll had at the beginning of this function.
617	 * This is when this TA has called another TA because
618	 * __thread_enter_user_mode() also saves the stack pointer in this
619	 * field.
620	 */
621	msr	spsel, #1
622	get_thread_ctx sp, 0, 1, 2
623	msr	spsel, #0
624	add	x1, sp, #THREAD_SVC_REG_SIZE
625	str	x1, [x0, #THREAD_CTX_KERN_SP]
626
627	/* Restore registers to the required state and return*/
628	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
629	msr	elr_el1, x0
630	msr	spsr_el1, x1
631	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
632	mov	x30, sp
633	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
634	mov	sp, x0
635	b_if_spsr_is_el0 w1, 1f
636	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
637	ldr	x30, [x30, #THREAD_SVC_REG_X30]
638
639	eret
640
6411:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
642	ldr	x30, [x30, #THREAD_SVC_REG_X30]
643
644	msr	spsel, #1
645	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
646	b	eret_to_el0
647END_FUNC el0_svc
648
649LOCAL_FUNC el1_sync_abort , :
650	mov	x0, sp
651	msr	spsel, #0
652	mov	x3, sp		/* Save original sp */
653
654	/*
655	 * Update core local flags.
656	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
657	 */
658	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
659	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
660	orr	w1, w1, #THREAD_CLF_ABORT
661	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
662			.Lsel_tmp_sp
663
664	/* Select abort stack */
665	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
666	b	.Lset_sp
667
668.Lsel_tmp_sp:
669	/* Select tmp stack */
670	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
671	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
672
673.Lset_sp:
674	mov	sp, x2
675	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
676
677	/*
678	 * Save state on stack
679	 */
680	sub	sp, sp, #THREAD_ABT_REGS_SIZE
681	mrs	x2, spsr_el1
682	/* Store spsr, sp_el0 */
683	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
684	/* Store original x0, x1 */
685	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
686	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
687	/* Store original x2, x3 and x4 to x29 */
688	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
689	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
690	/* Store x30, elr_el1 */
691	mrs	x0, elr_el1
692	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
693
694	/*
695	 * Call handler
696	 */
697	mov	x0, #0
698	mov	x1, sp
699	bl	abort_handler
700
701	/*
702	 * Restore state from stack
703	 */
704	/* Load x30, elr_el1 */
705	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
706	msr	elr_el1, x0
707	/* Load x0 to x29 */
708	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
709	/* Switch to SP_EL1 */
710	msr	spsel, #1
711	/* Save x0 to x3 in CORE_LOCAL */
712	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
713	/* Restore spsr_el1 and sp_el0 */
714	mrs	x3, sp_el0
715	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
716	msr	spsr_el1, x0
717	msr	sp_el0, x1
718
719	/* Update core local flags */
720	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
721	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
722	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
723
724	/* Restore x0 to x3 */
725	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
726
727	/* Return from exception */
728	eret
729END_FUNC el1_sync_abort
730
731	/* sp_el0 in x3 */
732LOCAL_FUNC el0_sync_abort , :
733	/*
734	 * Update core local flags
735	 */
736	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
737	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
738	orr	w1, w1, #THREAD_CLF_ABORT
739	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
740
741	/*
742	 * Save state on stack
743	 */
744
745	/* load abt_stack_va_end */
746	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
747	/* Keep pointer to initial record in x0 */
748	mov	x0, sp
749	/* Switch to SP_EL0 */
750	msr	spsel, #0
751	mov	sp, x1
752	sub	sp, sp, #THREAD_ABT_REGS_SIZE
753	mrs	x2, spsr_el1
754	/* Store spsr, sp_el0 */
755	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
756	/* Store original x0, x1 */
757	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
758	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
759	/* Store original x2, x3 and x4 to x29 */
760	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
761	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
762	/* Store x30, elr_el1 */
763	mrs	x0, elr_el1
764	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
765
766	/*
767	 * Call handler
768	 */
769	mov	x0, #0
770	mov	x1, sp
771	bl	abort_handler
772
773	/*
774	 * Restore state from stack
775	 */
776
777	/* Load x30, elr_el1 */
778	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
779	msr	elr_el1, x0
780	/* Load x0 to x29 */
781	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
782	/* Switch to SP_EL1 */
783	msr	spsel, #1
784	/* Save x0 to x3 in EL1_REC */
785	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
786	/* Restore spsr_el1 and sp_el0 */
787	mrs	x3, sp_el0
788	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
789	msr	spsr_el1, x0
790	msr	sp_el0, x1
791
792	/* Update core local flags */
793	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
794	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
795	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
796
797	/* Restore x2 to x3 */
798	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
799
800	b_if_spsr_is_el0 w0, eret_to_el0
801
802	/* Restore x0 to x1 */
803	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
804
805	/* Return from exception */
806	eret
807END_FUNC el0_sync_abort
808
809/* The handler of foreign interrupt. */
810.macro foreign_intr_handler mode:req
811	/*
812	 * Update core local flags
813	 */
814	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
815	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
816	orr	w1, w1, #THREAD_CLF_TMP
817	.ifc	\mode\(),fiq
818	orr	w1, w1, #THREAD_CLF_FIQ
819	.else
820	orr	w1, w1, #THREAD_CLF_IRQ
821	.endif
822	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
823
824	/* get pointer to current thread context in x0 */
825	get_thread_ctx sp, 0, 1, 2
826	/* Keep original SP_EL0 */
827	mrs	x2, sp_el0
828
829	/* Store original sp_el0 */
830	str	x2, [x0, #THREAD_CTX_REGS_SP]
831	/* store x4..x30 */
832	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
833	/* Load original x0..x3 into x10..x13 */
834	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
835	/* Save original x0..x3 */
836	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
837
838	/* load tmp_stack_va_end */
839	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
840	/* Switch to SP_EL0 */
841	msr	spsel, #0
842	mov	sp, x1
843
844	/*
845	 * Mark current thread as suspended
846	 */
847	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
848	mrs	x1, spsr_el1
849	mrs	x2, elr_el1
850	bl	thread_state_suspend
851	mov	w4, w0		/* Supply thread index */
852
853	/* Update core local flags */
854	/* Switch to SP_EL1 */
855	msr	spsel, #1
856	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
857	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
858	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
859	msr	spsel, #0
860
861	/*
862	 * Note that we're exiting with SP_EL0 selected since the entry
863	 * functions expects to have SP_EL0 selected with the tmp stack
864	 * set.
865	 */
866
867	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
868	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
869	mov	w2, #0
870	mov	w3, #0
871	/* w4 is already filled in above */
872	smc	#0
873	b	.	/* SMC should not return */
874.endm
875
876/*
877 * This struct is never used from C it's only here to visualize the
878 * layout.
879 *
880 * struct elx_nintr_rec {
881 * 	uint64_t x[19 - 4]; x4..x18
882 * 	uint64_t lr;
883 * 	uint64_t sp_el0;
884 * };
885 */
886#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
887#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
888#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
889#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
890
891/* The handler of native interrupt. */
892.macro native_intr_handler mode:req
893	/*
894	 * Update core local flags
895	 */
896	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
897	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
898	.ifc	\mode\(),fiq
899	orr	w1, w1, #THREAD_CLF_FIQ
900	.else
901	orr	w1, w1, #THREAD_CLF_IRQ
902	.endif
903	orr	w1, w1, #THREAD_CLF_TMP
904	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
905
906	/* load tmp_stack_va_end */
907	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
908	/* Keep original SP_EL0 */
909	mrs	x2, sp_el0
910	/* Switch to SP_EL0 */
911	msr	spsel, #0
912	mov	sp, x1
913
914	/*
915	 * Save registers on stack that can be corrupted by a call to
916	 * a C function
917	 */
918	/* Make room for struct elx_nintr_rec */
919	sub	sp, sp, #ELX_NINTR_REC_SIZE
920	/* Store x4..x18 */
921	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
922	/* Store lr and original sp_el0 */
923	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
924
925	bl	thread_check_canaries
926	adr	x16, thread_nintr_handler_ptr
927	ldr	x16, [x16]
928	blr	x16
929
930	/*
931	 * Restore registers
932	 */
933	/* Restore x4..x18 */
934	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
935	/* Load  lr and original sp_el0 */
936	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
937	/* Restore SP_El0 */
938	mov	sp, x2
939	/* Switch back to SP_EL1 */
940	msr	spsel, #1
941
942	/* Update core local flags */
943	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
944	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
945	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
946
947	mrs	x0, spsr_el1
948	/* Restore x2..x3 */
949	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
950	b_if_spsr_is_el0 w0, eret_to_el0
951
952	/* Restore x0..x1 */
953	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
954
955	/* Return from exception */
956	eret
957.endm
958
959LOCAL_FUNC elx_irq , :
960#if defined(CFG_ARM_GICV3)
961	native_intr_handler	irq
962#else
963	foreign_intr_handler	irq
964#endif
965END_FUNC elx_irq
966
967LOCAL_FUNC elx_fiq , :
968#if defined(CFG_ARM_GICV3)
969	foreign_intr_handler	fiq
970#else
971	native_intr_handler	fiq
972#endif
973END_FUNC elx_fiq
974