xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 1bb929836182ecb96d2d9d268daa807c67596396)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <arm64.h>
30#include <arm64_macros.S>
31#include <asm-defines.h>
32#include <asm.S>
33#include <keep.h>
34#include <kernel/thread_defs.h>
35#include <mm/core_mmu.h>
36#include <sm/optee_smc.h>
37#include <sm/teesmc_opteed.h>
38#include <sm/teesmc_opteed_macros.h>
39
40#include "thread_private.h"
41
42	.macro get_thread_ctx core_local, res, tmp0, tmp1
43		ldr	w\tmp0, [\core_local, \
44				#THREAD_CORE_LOCAL_CURR_THREAD]
45		adr	x\res, threads
46		mov	x\tmp1, #THREAD_CTX_SIZE
47		madd	x\res, x\tmp0, x\tmp1, x\res
48	.endm
49
50	.macro b_if_spsr_is_el0 reg, label
51		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
52		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
53		b.eq	\label
54	.endm
55
56LOCAL_FUNC vector_std_smc_entry , :
57	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
58	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
59	mov	x0, sp
60	bl	thread_handle_std_smc
61	/*
62	 * Normally thread_handle_std_smc() should return via
63	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
64	 * hasn't switched stack (error detected) it will do a normal "C"
65	 * return.
66	 */
67	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
68	add	sp, sp, #THREAD_SMC_ARGS_SIZE
69	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
70	smc	#0
71	b	.	/* SMC should not return */
72END_FUNC vector_std_smc_entry
73
74LOCAL_FUNC vector_fast_smc_entry , :
75	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
76	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
77	mov	x0, sp
78	bl	thread_handle_fast_smc
79	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
80	add	sp, sp, #THREAD_SMC_ARGS_SIZE
81	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
82	smc	#0
83	b	.	/* SMC should not return */
84END_FUNC vector_fast_smc_entry
85
86LOCAL_FUNC vector_fiq_entry , :
87	/* Secure Monitor received a FIQ and passed control to us. */
88	bl	thread_check_canaries
89	adr	x16, thread_nintr_handler_ptr
90	ldr	x16, [x16]
91	blr	x16
92	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
93	smc	#0
94	b	.	/* SMC should not return */
95END_FUNC vector_fiq_entry
96
97LOCAL_FUNC vector_cpu_on_entry , :
98	adr	x16, thread_cpu_on_handler_ptr
99	ldr	x16, [x16]
100	blr	x16
101	mov	x1, x0
102	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
103	smc	#0
104	b	.	/* SMC should not return */
105END_FUNC vector_cpu_on_entry
106
107LOCAL_FUNC vector_cpu_off_entry , :
108	adr	x16, thread_cpu_off_handler_ptr
109	ldr	x16, [x16]
110	blr	x16
111	mov	x1, x0
112	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
113	smc	#0
114	b	.	/* SMC should not return */
115END_FUNC vector_cpu_off_entry
116
117LOCAL_FUNC vector_cpu_suspend_entry , :
118	adr	x16, thread_cpu_suspend_handler_ptr
119	ldr	x16, [x16]
120	blr	x16
121	mov	x1, x0
122	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
123	smc	#0
124	b	.	/* SMC should not return */
125END_FUNC vector_cpu_suspend_entry
126
127LOCAL_FUNC vector_cpu_resume_entry , :
128	adr	x16, thread_cpu_resume_handler_ptr
129	ldr	x16, [x16]
130	blr	x16
131	mov	x1, x0
132	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
133	smc	#0
134	b	.	/* SMC should not return */
135END_FUNC vector_cpu_resume_entry
136
137LOCAL_FUNC vector_system_off_entry , :
138	adr	x16, thread_system_off_handler_ptr
139	ldr	x16, [x16]
140	blr	x16
141	mov	x1, x0
142	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
143	smc	#0
144	b	.	/* SMC should not return */
145END_FUNC vector_system_off_entry
146
147LOCAL_FUNC vector_system_reset_entry , :
148	adr	x16, thread_system_reset_handler_ptr
149	ldr	x16, [x16]
150	blr	x16
151	mov	x1, x0
152	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
153	smc	#0
154	b	.	/* SMC should not return */
155END_FUNC vector_system_reset_entry
156
157/*
158 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
159 * initialization.
160 *
161 * Note that ARM-TF depends on the layout of this vector table, any change
162 * in layout has to be synced with ARM-TF.
163 */
164FUNC thread_vector_table , :
165	b	vector_std_smc_entry
166	b	vector_fast_smc_entry
167	b	vector_cpu_on_entry
168	b	vector_cpu_off_entry
169	b	vector_cpu_resume_entry
170	b	vector_cpu_suspend_entry
171	b	vector_fiq_entry
172	b	vector_system_off_entry
173	b	vector_system_reset_entry
174END_FUNC thread_vector_table
175KEEP_PAGER thread_vector_table
176
177
178/* void thread_resume(struct thread_ctx_regs *regs) */
179FUNC thread_resume , :
180	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
181	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
182	mov	sp, x1
183	msr	elr_el1, x2
184	msr	spsr_el1, x3
185
186	b_if_spsr_is_el0 w3, 1f
187
188	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
189	ldr	x0, [x0, THREAD_CTX_REGS_X0]
190	eret
191
1921:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
193	ldr	x0, [x0, THREAD_CTX_REGS_X0]
194
195	msr	spsel, #1
196	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
197	b	eret_to_el0
198END_FUNC thread_resume
199
200FUNC thread_std_smc_entry , :
201	/* pass x0-x7 in a struct thread_smc_args */
202	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
203	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
204	mov	x0, sp
205
206	/* Call the registered handler */
207	bl	__thread_std_smc_entry
208
209	/*
210	 * Load the returned x0-x3 into preserved registers and skip the
211	 * "returned" x4-x7 since they will not be returned to normal
212	 * world.
213	 */
214	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
215	add	sp, sp, #THREAD_SMC_ARGS_SIZE
216
217	/* Mask all maskable exceptions before switching to temporary stack */
218	msr	daifset, #DAIFBIT_ALL
219	bl	thread_get_tmp_sp
220	mov	sp, x0
221
222	bl	thread_state_free
223
224	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
225	mov	x1, x20
226	mov	x2, x21
227	mov	x3, x22
228	mov	x4, x23
229	smc	#0
230	b	.	/* SMC should not return */
231END_FUNC thread_std_smc_entry
232
233/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
234FUNC thread_rpc , :
235	/* Read daif and create an SPSR */
236	mrs	x1, daif
237	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
238
239	/* Mask all maskable exceptions before switching to temporary stack */
240	msr	daifset, #DAIFBIT_ALL
241	push	x0, xzr
242	push	x1, x30
243	bl	thread_get_ctx_regs
244	ldr	x30, [sp, #8]
245	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
246	mov	x19, x0
247
248	bl	thread_get_tmp_sp
249	pop	x1, xzr		/* Match "push x1, x30" above */
250	mov	x2, sp
251	str	x2, [x19, #THREAD_CTX_REGS_SP]
252	ldr	x20, [sp]	/* Get pointer to rv[] */
253	mov	sp, x0		/* Switch to tmp stack */
254
255	adr	x2, .thread_rpc_return
256	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
257	bl	thread_state_suspend
258	mov	x4, x0		/* Supply thread index */
259	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
260	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
261	smc	#0
262	b	.		/* SMC should not return */
263
264.thread_rpc_return:
265	/*
266	 * At this point has the stack pointer been restored to the value
267	 * stored in THREAD_CTX above.
268	 *
269	 * Jumps here from thread_resume above when RPC has returned. The
270	 * IRQ and FIQ bits are restored to what they where when this
271	 * function was originally entered.
272	 */
273	pop	x16, xzr	/* Get pointer to rv[] */
274	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
275	ret
276END_FUNC thread_rpc
277KEEP_PAGER thread_rpc
278
279FUNC thread_init_vbar , :
280	adr	x0, thread_vect_table
281	msr	vbar_el1, x0
282	ret
283END_FUNC thread_init_vbar
284KEEP_PAGER thread_init_vbar
285
286/*
287 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
288 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
289 *               unsigned long user_func, unsigned long spsr,
290 *               uint32_t *exit_status0, uint32_t *exit_status1)
291 *
292 */
293FUNC __thread_enter_user_mode , :
294	ldr	x8, [sp]
295	/*
296	 * Create the and fill in the struct thread_user_mode_rec
297	 */
298	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
299	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
300	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
301
302	/*
303	 * Switch to SP_EL1
304	 * Disable exceptions
305	 * Save kern sp in x19
306	 */
307	msr	daifset, #DAIFBIT_ALL
308	mov	x19, sp
309	msr	spsel, #1
310
311	/*
312	 * Save the kernel stack pointer in the thread context
313	 */
314	/* get pointer to current thread context */
315	get_thread_ctx sp, 21, 20, 22
316	/*
317	 * Save kernel stack pointer to ensure that el0_svc() uses
318	 * correct stack pointer
319	 */
320	str	x19, [x21, #THREAD_CTX_KERN_SP]
321
322	/*
323	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
324	 */
325	msr	spsr_el1, x6
326	/* Set user sp */
327	mov	x13, x4		/* Used when running TA in Aarch32 */
328	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
329	/* Set user function */
330	msr	elr_el1, x5
331	/* Set frame pointer (user stack can't be unwound past this point) */
332	mov x29, #0
333
334	/* Jump into user mode */
335	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
336	b eret_to_el0
337END_FUNC __thread_enter_user_mode
338
339/*
340 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
341 * 		uint32_t exit_status1);
342 * See description in thread.h
343 */
344FUNC thread_unwind_user_mode , :
345	/* Store the exit status */
346	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
347	str	w1, [x3]
348	str	w2, [x4]
349	/* Restore x19..x30 */
350	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
351	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
352	/* Return from the call of thread_enter_user_mode() */
353	ret
354END_FUNC thread_unwind_user_mode
355
356	/*
357	 * This macro verifies that the a given vector doesn't exceed the
358	 * architectural limit of 32 instructions. This is meant to be placed
359	 * immedately after the last instruction in the vector. It takes the
360	 * vector entry as the parameter
361	 */
362	.macro check_vector_size since
363	  .if (. - \since) > (32 * 4)
364	    .error "Vector exceeds 32 instructions"
365	  .endif
366	.endm
367
368	.macro restore_mapping
369#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
370		/* Temporarily save x0 */
371		msr	tpidr_el1, x0
372
373		/* Update the mapping to use the full kernel mapping */
374		mrs	x0, ttbr0_el1
375		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
376		/* switch to kernel mode ASID */
377		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
378		msr	ttbr0_el1, x0
379		isb
380
381		/* Jump into the full mapping and continue execution */
382		ldr	x0, =1f
383		br	x0
384	1:
385
386		/* Point to the vector into the full mapping */
387		adr	x0, thread_vect_table
388		msr	vbar_el1, x0
389		isb
390
391		/* Restore x1 */
392		mrs	x0, tpidr_el1
393		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
394#else
395		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
396		mrs	x0, ttbr0_el1
397		/* switch to kernel mode ASID */
398		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
399		msr	ttbr0_el1, x0
400		isb
401#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
402	.endm
403
404	.section .text.thread_vect_table
405	.align	11
406FUNC thread_vect_table , :
407	/* -----------------------------------------------------
408	 * EL1 with SP0 : 0x0 - 0x180
409	 * -----------------------------------------------------
410	 */
411	.align	7
412sync_el1_sp0:
413	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
414	b	el1_sync_abort
415	check_vector_size sync_el1_sp0
416
417	.align	7
418irq_el1_sp0:
419	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
420	b	elx_irq
421	check_vector_size irq_el1_sp0
422
423	.align	7
424fiq_el1_sp0:
425	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
426	b	elx_fiq
427	check_vector_size fiq_el1_sp0
428
429	.align	7
430SErrorSP0:
431	b	SErrorSP0
432	check_vector_size SErrorSP0
433
434	/* -----------------------------------------------------
435	 * Current EL with SPx: 0x200 - 0x380
436	 * -----------------------------------------------------
437	 */
438	.align	7
439SynchronousExceptionSPx:
440	b	SynchronousExceptionSPx
441	check_vector_size SynchronousExceptionSPx
442
443	.align	7
444IrqSPx:
445	b	IrqSPx
446	check_vector_size IrqSPx
447
448	.align	7
449FiqSPx:
450	b	FiqSPx
451	check_vector_size FiqSPx
452
453	.align	7
454SErrorSPx:
455	b	SErrorSPx
456	check_vector_size SErrorSPx
457
458	/* -----------------------------------------------------
459	 * Lower EL using AArch64 : 0x400 - 0x580
460	 * -----------------------------------------------------
461	 */
462	.align	7
463el0_sync_a64:
464	restore_mapping
465
466	mrs	x2, esr_el1
467	mrs	x3, sp_el0
468	lsr	x2, x2, #ESR_EC_SHIFT
469	cmp	x2, #ESR_EC_AARCH64_SVC
470	b.eq	el0_svc
471	b	el0_sync_abort
472	check_vector_size el0_sync_a64
473
474	.align	7
475el0_irq_a64:
476	restore_mapping
477
478	b	elx_irq
479	check_vector_size el0_irq_a64
480
481	.align	7
482el0_fiq_a64:
483	restore_mapping
484
485	b	elx_fiq
486	check_vector_size el0_fiq_a64
487
488	.align	7
489SErrorA64:
490	b   	SErrorA64
491	check_vector_size SErrorA64
492
493	/* -----------------------------------------------------
494	 * Lower EL using AArch32 : 0x0 - 0x180
495	 * -----------------------------------------------------
496	 */
497	.align	7
498el0_sync_a32:
499	restore_mapping
500
501	mrs	x2, esr_el1
502	mrs	x3, sp_el0
503	lsr	x2, x2, #ESR_EC_SHIFT
504	cmp	x2, #ESR_EC_AARCH32_SVC
505	b.eq	el0_svc
506	b	el0_sync_abort
507	check_vector_size el0_sync_a32
508
509	.align	7
510el0_irq_a32:
511	restore_mapping
512
513	b	elx_irq
514	check_vector_size el0_irq_a32
515
516	.align	7
517el0_fiq_a32:
518	restore_mapping
519
520	b	elx_fiq
521	check_vector_size el0_fiq_a32
522
523	.align	7
524SErrorA32:
525	b	SErrorA32
526	check_vector_size SErrorA32
527
528/*
529 * We're keeping this code in the same section as the vector to make sure
530 * that it's always available.
531 */
532eret_to_el0:
533
534#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
535	/* Point to the vector into the reduced mapping */
536	adr	x0, thread_user_kcode_offset
537	ldr	x0, [x0]
538	adr	x1, thread_vect_table
539	sub	x1, x1, x0
540	msr	vbar_el1, x1
541	isb
542
543	/* Jump into the reduced mapping and continue execution */
544	ldr	x1, =1f
545	sub	x1, x1, x0
546	br	x1
5471:
548
549	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
550	msr	tpidr_el1, x0
551
552	/* Update the mapping to exclude the full kernel mapping */
553	mrs	x0, ttbr0_el1
554	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
555	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
556	msr	ttbr0_el1, x0
557	isb
558
559	mrs	x0, tpidr_el1
560#else
561	mrs	x0, ttbr0_el1
562	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
563	msr	ttbr0_el1, x0
564	isb
565	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
566#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
567
568	eret
569
570END_FUNC thread_vect_table
571
572LOCAL_FUNC el0_svc , :
573	/* get pointer to current thread context in x0 */
574	get_thread_ctx sp, 0, 1, 2
575	/* load saved kernel sp */
576	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
577	/* Keep pointer to initial recod in x1 */
578	mov	x1, sp
579	/* Switch to SP_EL0 and restore kernel sp */
580	msr	spsel, #0
581	mov	x2, sp	/* Save SP_EL0 */
582	mov	sp, x0
583
584	/* Make room for struct thread_svc_regs */
585	sub	sp, sp, #THREAD_SVC_REG_SIZE
586	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
587
588	/* Restore x0-x3 */
589	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
590	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
591
592	/* Prepare the argument for the handler */
593	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
594	mrs	x0, elr_el1
595	mrs	x1, spsr_el1
596	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
597	mov	x0, sp
598
599	/*
600	 * Unmask native interrupts, Serror, and debug exceptions since we have
601	 * nothing left in sp_el1. Note that the SVC handler is excepted to
602	 * re-enable foreign interrupts by itself.
603	 */
604#if defined(CFG_ARM_GICV3)
605	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
606#else
607	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
608#endif
609
610	/* Call the handler */
611	bl	tee_svc_handler
612
613	/* Mask all maskable exceptions since we're switching back to sp_el1 */
614	msr	daifset, #DAIFBIT_ALL
615
616	/*
617	 * Save kernel sp we'll had at the beginning of this function.
618	 * This is when this TA has called another TA because
619	 * __thread_enter_user_mode() also saves the stack pointer in this
620	 * field.
621	 */
622	msr	spsel, #1
623	get_thread_ctx sp, 0, 1, 2
624	msr	spsel, #0
625	add	x1, sp, #THREAD_SVC_REG_SIZE
626	str	x1, [x0, #THREAD_CTX_KERN_SP]
627
628	/* Restore registers to the required state and return*/
629	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
630	msr	elr_el1, x0
631	msr	spsr_el1, x1
632	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
633	mov	x30, sp
634	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
635	mov	sp, x0
636	b_if_spsr_is_el0 w1, 1f
637	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
638	ldr	x30, [x30, #THREAD_SVC_REG_X30]
639
640	eret
641
6421:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
643	ldr	x30, [x30, #THREAD_SVC_REG_X30]
644
645	msr	spsel, #1
646	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
647	b	eret_to_el0
648END_FUNC el0_svc
649
650LOCAL_FUNC el1_sync_abort , :
651	mov	x0, sp
652	msr	spsel, #0
653	mov	x3, sp		/* Save original sp */
654
655	/*
656	 * Update core local flags.
657	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
658	 */
659	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
660	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
661	orr	w1, w1, #THREAD_CLF_ABORT
662	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
663			.Lsel_tmp_sp
664
665	/* Select abort stack */
666	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
667	b	.Lset_sp
668
669.Lsel_tmp_sp:
670	/* Select tmp stack */
671	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
672	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
673
674.Lset_sp:
675	mov	sp, x2
676	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
677
678	/*
679	 * Save state on stack
680	 */
681	sub	sp, sp, #THREAD_ABT_REGS_SIZE
682	mrs	x2, spsr_el1
683	/* Store spsr, sp_el0 */
684	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
685	/* Store original x0, x1 */
686	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
687	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
688	/* Store original x2, x3 and x4 to x29 */
689	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
690	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
691	/* Store x30, elr_el1 */
692	mrs	x0, elr_el1
693	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
694
695	/*
696	 * Call handler
697	 */
698	mov	x0, #0
699	mov	x1, sp
700	bl	abort_handler
701
702	/*
703	 * Restore state from stack
704	 */
705	/* Load x30, elr_el1 */
706	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
707	msr	elr_el1, x0
708	/* Load x0 to x29 */
709	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
710	/* Switch to SP_EL1 */
711	msr	spsel, #1
712	/* Save x0 to x3 in CORE_LOCAL */
713	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
714	/* Restore spsr_el1 and sp_el0 */
715	mrs	x3, sp_el0
716	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
717	msr	spsr_el1, x0
718	msr	sp_el0, x1
719
720	/* Update core local flags */
721	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
722	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
723	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
724
725	/* Restore x0 to x3 */
726	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
727
728	/* Return from exception */
729	eret
730END_FUNC el1_sync_abort
731
732	/* sp_el0 in x3 */
733LOCAL_FUNC el0_sync_abort , :
734	/*
735	 * Update core local flags
736	 */
737	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
738	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
739	orr	w1, w1, #THREAD_CLF_ABORT
740	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
741
742	/*
743	 * Save state on stack
744	 */
745
746	/* load abt_stack_va_end */
747	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
748	/* Keep pointer to initial record in x0 */
749	mov	x0, sp
750	/* Switch to SP_EL0 */
751	msr	spsel, #0
752	mov	sp, x1
753	sub	sp, sp, #THREAD_ABT_REGS_SIZE
754	mrs	x2, spsr_el1
755	/* Store spsr, sp_el0 */
756	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
757	/* Store original x0, x1 */
758	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
759	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
760	/* Store original x2, x3 and x4 to x29 */
761	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
762	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
763	/* Store x30, elr_el1 */
764	mrs	x0, elr_el1
765	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
766
767	/*
768	 * Call handler
769	 */
770	mov	x0, #0
771	mov	x1, sp
772	bl	abort_handler
773
774	/*
775	 * Restore state from stack
776	 */
777
778	/* Load x30, elr_el1 */
779	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
780	msr	elr_el1, x0
781	/* Load x0 to x29 */
782	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
783	/* Switch to SP_EL1 */
784	msr	spsel, #1
785	/* Save x0 to x3 in EL1_REC */
786	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
787	/* Restore spsr_el1 and sp_el0 */
788	mrs	x3, sp_el0
789	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
790	msr	spsr_el1, x0
791	msr	sp_el0, x1
792
793	/* Update core local flags */
794	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
795	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
796	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
797
798	/* Restore x2 to x3 */
799	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
800
801	b_if_spsr_is_el0 w0, eret_to_el0
802
803	/* Restore x0 to x1 */
804	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
805
806	/* Return from exception */
807	eret
808END_FUNC el0_sync_abort
809
810/* The handler of foreign interrupt. */
811.macro foreign_intr_handler mode:req
812	/*
813	 * Update core local flags
814	 */
815	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
816	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
817	orr	w1, w1, #THREAD_CLF_TMP
818	.ifc	\mode\(),fiq
819	orr	w1, w1, #THREAD_CLF_FIQ
820	.else
821	orr	w1, w1, #THREAD_CLF_IRQ
822	.endif
823	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
824
825	/* get pointer to current thread context in x0 */
826	get_thread_ctx sp, 0, 1, 2
827	/* Keep original SP_EL0 */
828	mrs	x2, sp_el0
829
830	/* Store original sp_el0 */
831	str	x2, [x0, #THREAD_CTX_REGS_SP]
832	/* store x4..x30 */
833	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
834	/* Load original x0..x3 into x10..x13 */
835	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
836	/* Save original x0..x3 */
837	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
838
839	/* load tmp_stack_va_end */
840	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
841	/* Switch to SP_EL0 */
842	msr	spsel, #0
843	mov	sp, x1
844
845	/*
846	 * Mark current thread as suspended
847	 */
848	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
849	mrs	x1, spsr_el1
850	mrs	x2, elr_el1
851	bl	thread_state_suspend
852	mov	w4, w0		/* Supply thread index */
853
854	/* Update core local flags */
855	/* Switch to SP_EL1 */
856	msr	spsel, #1
857	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
858	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
859	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
860	msr	spsel, #0
861
862	/*
863	 * Note that we're exiting with SP_EL0 selected since the entry
864	 * functions expects to have SP_EL0 selected with the tmp stack
865	 * set.
866	 */
867
868	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
869	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
870	mov	w2, #0
871	mov	w3, #0
872	/* w4 is already filled in above */
873	smc	#0
874	b	.	/* SMC should not return */
875.endm
876
877/*
878 * This struct is never used from C it's only here to visualize the
879 * layout.
880 *
881 * struct elx_nintr_rec {
882 * 	uint64_t x[19 - 4]; x4..x18
883 * 	uint64_t lr;
884 * 	uint64_t sp_el0;
885 * };
886 */
887#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
888#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
889#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
890#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
891
892/* The handler of native interrupt. */
893.macro native_intr_handler mode:req
894	/*
895	 * Update core local flags
896	 */
897	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
898	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
899	.ifc	\mode\(),fiq
900	orr	w1, w1, #THREAD_CLF_FIQ
901	.else
902	orr	w1, w1, #THREAD_CLF_IRQ
903	.endif
904	orr	w1, w1, #THREAD_CLF_TMP
905	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
906
907	/* load tmp_stack_va_end */
908	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
909	/* Keep original SP_EL0 */
910	mrs	x2, sp_el0
911	/* Switch to SP_EL0 */
912	msr	spsel, #0
913	mov	sp, x1
914
915	/*
916	 * Save registers on stack that can be corrupted by a call to
917	 * a C function
918	 */
919	/* Make room for struct elx_nintr_rec */
920	sub	sp, sp, #ELX_NINTR_REC_SIZE
921	/* Store x4..x18 */
922	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
923	/* Store lr and original sp_el0 */
924	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
925
926	bl	thread_check_canaries
927	adr	x16, thread_nintr_handler_ptr
928	ldr	x16, [x16]
929	blr	x16
930
931	/*
932	 * Restore registers
933	 */
934	/* Restore x4..x18 */
935	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
936	/* Load  lr and original sp_el0 */
937	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
938	/* Restore SP_El0 */
939	mov	sp, x2
940	/* Switch back to SP_EL1 */
941	msr	spsel, #1
942
943	/* Update core local flags */
944	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
945	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
946	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
947
948	mrs	x0, spsr_el1
949	/* Restore x2..x3 */
950	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
951	b_if_spsr_is_el0 w0, eret_to_el0
952
953	/* Restore x0..x1 */
954	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
955
956	/* Return from exception */
957	eret
958.endm
959
960LOCAL_FUNC elx_irq , :
961#if defined(CFG_ARM_GICV3)
962	native_intr_handler	irq
963#else
964	foreign_intr_handler	irq
965#endif
966END_FUNC elx_irq
967
968LOCAL_FUNC elx_fiq , :
969#if defined(CFG_ARM_GICV3)
970	foreign_intr_handler	fiq
971#else
972	native_intr_handler	fiq
973#endif
974END_FUNC elx_fiq
975