xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision ab61a1dcba453d19f7183a8a31dfa227387c57f5)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm64.h>
7#include <arm64_macros.S>
8#include <asm-defines.h>
9#include <asm.S>
10#include <keep.h>
11#include <kernel/thread_defs.h>
12#include <mm/core_mmu.h>
13#include <sm/optee_smc.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16
17#include "thread_private.h"
18
19	.macro get_thread_ctx core_local, res, tmp0, tmp1
20		ldr	w\tmp0, [\core_local, \
21				#THREAD_CORE_LOCAL_CURR_THREAD]
22		adr	x\res, threads
23		mov	x\tmp1, #THREAD_CTX_SIZE
24		madd	x\res, x\tmp0, x\tmp1, x\res
25	.endm
26
27	.macro b_if_spsr_is_el0 reg, label
28		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
29		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
30		b.eq	\label
31	.endm
32
33LOCAL_FUNC vector_std_smc_entry , :
34	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
35	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
36	mov	x0, sp
37	bl	thread_handle_std_smc
38	/*
39	 * Normally thread_handle_std_smc() should return via
40	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
41	 * hasn't switched stack (error detected) it will do a normal "C"
42	 * return.
43	 */
44	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
45	add	sp, sp, #THREAD_SMC_ARGS_SIZE
46	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
47	smc	#0
48	b	.	/* SMC should not return */
49END_FUNC vector_std_smc_entry
50
51LOCAL_FUNC vector_fast_smc_entry , :
52	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
53	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
54	mov	x0, sp
55	bl	thread_handle_fast_smc
56	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
57	add	sp, sp, #THREAD_SMC_ARGS_SIZE
58	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
59	smc	#0
60	b	.	/* SMC should not return */
61END_FUNC vector_fast_smc_entry
62
63LOCAL_FUNC vector_fiq_entry , :
64	/* Secure Monitor received a FIQ and passed control to us. */
65	bl	thread_check_canaries
66	adr	x16, thread_nintr_handler_ptr
67	ldr	x16, [x16]
68	blr	x16
69	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
70	smc	#0
71	b	.	/* SMC should not return */
72END_FUNC vector_fiq_entry
73
74LOCAL_FUNC vector_cpu_on_entry , :
75	adr	x16, thread_cpu_on_handler_ptr
76	ldr	x16, [x16]
77	blr	x16
78	mov	x1, x0
79	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
80	smc	#0
81	b	.	/* SMC should not return */
82END_FUNC vector_cpu_on_entry
83
84LOCAL_FUNC vector_cpu_off_entry , :
85	adr	x16, thread_cpu_off_handler_ptr
86	ldr	x16, [x16]
87	blr	x16
88	mov	x1, x0
89	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
90	smc	#0
91	b	.	/* SMC should not return */
92END_FUNC vector_cpu_off_entry
93
94LOCAL_FUNC vector_cpu_suspend_entry , :
95	adr	x16, thread_cpu_suspend_handler_ptr
96	ldr	x16, [x16]
97	blr	x16
98	mov	x1, x0
99	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
100	smc	#0
101	b	.	/* SMC should not return */
102END_FUNC vector_cpu_suspend_entry
103
104LOCAL_FUNC vector_cpu_resume_entry , :
105	adr	x16, thread_cpu_resume_handler_ptr
106	ldr	x16, [x16]
107	blr	x16
108	mov	x1, x0
109	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
110	smc	#0
111	b	.	/* SMC should not return */
112END_FUNC vector_cpu_resume_entry
113
114LOCAL_FUNC vector_system_off_entry , :
115	adr	x16, thread_system_off_handler_ptr
116	ldr	x16, [x16]
117	blr	x16
118	mov	x1, x0
119	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
120	smc	#0
121	b	.	/* SMC should not return */
122END_FUNC vector_system_off_entry
123
124LOCAL_FUNC vector_system_reset_entry , :
125	adr	x16, thread_system_reset_handler_ptr
126	ldr	x16, [x16]
127	blr	x16
128	mov	x1, x0
129	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
130	smc	#0
131	b	.	/* SMC should not return */
132END_FUNC vector_system_reset_entry
133
134/*
135 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
136 * initialization.
137 *
138 * Note that ARM-TF depends on the layout of this vector table, any change
139 * in layout has to be synced with ARM-TF.
140 */
141FUNC thread_vector_table , :
142	b	vector_std_smc_entry
143	b	vector_fast_smc_entry
144	b	vector_cpu_on_entry
145	b	vector_cpu_off_entry
146	b	vector_cpu_resume_entry
147	b	vector_cpu_suspend_entry
148	b	vector_fiq_entry
149	b	vector_system_off_entry
150	b	vector_system_reset_entry
151END_FUNC thread_vector_table
152KEEP_PAGER thread_vector_table
153
154
155/* void thread_resume(struct thread_ctx_regs *regs) */
156FUNC thread_resume , :
157	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
158	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
159	mov	sp, x1
160	msr	elr_el1, x2
161	msr	spsr_el1, x3
162
163	b_if_spsr_is_el0 w3, 1f
164
165	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
166	ldr	x0, [x0, THREAD_CTX_REGS_X0]
167	eret
168
1691:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
170	ldr	x0, [x0, THREAD_CTX_REGS_X0]
171
172	msr	spsel, #1
173	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
174	b	eret_to_el0
175END_FUNC thread_resume
176
177FUNC thread_std_smc_entry , :
178	/* pass x0-x7 in a struct thread_smc_args */
179	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
180	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
181	mov	x0, sp
182
183	/* Call the registered handler */
184	bl	__thread_std_smc_entry
185
186	/*
187	 * Load the returned x0-x3 into preserved registers and skip the
188	 * "returned" x4-x7 since they will not be returned to normal
189	 * world.
190	 */
191	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
192	add	sp, sp, #THREAD_SMC_ARGS_SIZE
193
194	/* Mask all maskable exceptions before switching to temporary stack */
195	msr	daifset, #DAIFBIT_ALL
196	bl	thread_get_tmp_sp
197	mov	sp, x0
198
199	bl	thread_state_free
200
201	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
202	mov	x1, x20
203	mov	x2, x21
204	mov	x3, x22
205	mov	x4, x23
206	smc	#0
207	b	.	/* SMC should not return */
208END_FUNC thread_std_smc_entry
209
210/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
211FUNC thread_rpc , :
212	/* Read daif and create an SPSR */
213	mrs	x1, daif
214	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
215
216	/* Mask all maskable exceptions before switching to temporary stack */
217	msr	daifset, #DAIFBIT_ALL
218	push	x0, xzr
219	push	x1, x30
220	bl	thread_get_ctx_regs
221	ldr	x30, [sp, #8]
222	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
223	mov	x19, x0
224
225	bl	thread_get_tmp_sp
226	pop	x1, xzr		/* Match "push x1, x30" above */
227	mov	x2, sp
228	str	x2, [x19, #THREAD_CTX_REGS_SP]
229	ldr	x20, [sp]	/* Get pointer to rv[] */
230	mov	sp, x0		/* Switch to tmp stack */
231
232	adr	x2, .thread_rpc_return
233	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
234	bl	thread_state_suspend
235	mov	x4, x0		/* Supply thread index */
236	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
237	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
238	smc	#0
239	b	.		/* SMC should not return */
240
241.thread_rpc_return:
242	/*
243	 * At this point has the stack pointer been restored to the value
244	 * stored in THREAD_CTX above.
245	 *
246	 * Jumps here from thread_resume above when RPC has returned. The
247	 * IRQ and FIQ bits are restored to what they where when this
248	 * function was originally entered.
249	 */
250	pop	x16, xzr	/* Get pointer to rv[] */
251	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
252	ret
253END_FUNC thread_rpc
254KEEP_PAGER thread_rpc
255
256FUNC thread_init_vbar , :
257	adr	x0, thread_vect_table
258	msr	vbar_el1, x0
259	ret
260END_FUNC thread_init_vbar
261KEEP_PAGER thread_init_vbar
262
263/*
264 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
265 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
266 *               unsigned long user_func, unsigned long spsr,
267 *               uint32_t *exit_status0, uint32_t *exit_status1)
268 *
269 */
270FUNC __thread_enter_user_mode , :
271	ldr	x8, [sp]
272	/*
273	 * Create the and fill in the struct thread_user_mode_rec
274	 */
275	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
276	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
277	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
278
279	/*
280	 * Switch to SP_EL1
281	 * Disable exceptions
282	 * Save kern sp in x19
283	 */
284	msr	daifset, #DAIFBIT_ALL
285	mov	x19, sp
286	msr	spsel, #1
287
288	/*
289	 * Save the kernel stack pointer in the thread context
290	 */
291	/* get pointer to current thread context */
292	get_thread_ctx sp, 21, 20, 22
293	/*
294	 * Save kernel stack pointer to ensure that el0_svc() uses
295	 * correct stack pointer
296	 */
297	str	x19, [x21, #THREAD_CTX_KERN_SP]
298
299	/*
300	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
301	 */
302	msr	spsr_el1, x6
303	/* Set user sp */
304	mov	x13, x4		/* Used when running TA in Aarch32 */
305	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
306	/* Set user function */
307	msr	elr_el1, x5
308	/* Set frame pointer (user stack can't be unwound past this point) */
309	mov x29, #0
310
311	/* Jump into user mode */
312	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
313	b eret_to_el0
314END_FUNC __thread_enter_user_mode
315
316/*
317 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
318 * 		uint32_t exit_status1);
319 * See description in thread.h
320 */
321FUNC thread_unwind_user_mode , :
322	/* Store the exit status */
323	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
324	str	w1, [x3]
325	str	w2, [x4]
326	/* Restore x19..x30 */
327	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
328	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
329	/* Return from the call of thread_enter_user_mode() */
330	ret
331END_FUNC thread_unwind_user_mode
332
333	/*
334	 * This macro verifies that the a given vector doesn't exceed the
335	 * architectural limit of 32 instructions. This is meant to be placed
336	 * immedately after the last instruction in the vector. It takes the
337	 * vector entry as the parameter
338	 */
339	.macro check_vector_size since
340	  .if (. - \since) > (32 * 4)
341	    .error "Vector exceeds 32 instructions"
342	  .endif
343	.endm
344
345	.macro restore_mapping
346#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
347		/* Temporarily save x0, x1 */
348		msr	tpidr_el1, x0
349		msr	tpidrro_el0, x1
350
351		/* Update the mapping to use the full kernel mapping */
352		mrs	x0, ttbr0_el1
353		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
354		/* switch to kernel mode ASID */
355		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
356		msr	ttbr0_el1, x0
357		isb
358
359		/* Jump into the full mapping and continue execution */
360		ldr	x0, =1f
361		br	x0
362	1:
363
364		/* Point to the vector into the full mapping */
365		adr	x0, thread_user_kcode_offset
366		ldr	x0, [x0]
367		mrs	x1, vbar_el1
368		add	x1, x1, x0
369		msr	vbar_el1, x1
370		isb
371
372		/* Restore x0, x1 */
373		mrs	x0, tpidr_el1
374		mrs	x1, tpidrro_el0
375		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
376#else
377		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
378		mrs	x0, ttbr0_el1
379		/* switch to kernel mode ASID */
380		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
381		msr	ttbr0_el1, x0
382		isb
383#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
384	.endm
385
386#define INV_INSN	0
387	.section .text.thread_vect_table
388	.align	11, INV_INSN
389FUNC thread_vect_table , :
390	/* -----------------------------------------------------
391	 * EL1 with SP0 : 0x0 - 0x180
392	 * -----------------------------------------------------
393	 */
394	.align	7, INV_INSN
395el1_sync_sp0:
396	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
397	b	el1_sync_abort
398	check_vector_size el1_sync_sp0
399
400	.align	7, INV_INSN
401el1_irq_sp0:
402	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
403	b	elx_irq
404	check_vector_size el1_irq_sp0
405
406	.align	7, INV_INSN
407el1_fiq_sp0:
408	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
409	b	elx_fiq
410	check_vector_size el1_fiq_sp0
411
412	.align	7, INV_INSN
413el1_serror_sp0:
414	b	el1_serror_sp0
415	check_vector_size el1_serror_sp0
416
417	/* -----------------------------------------------------
418	 * Current EL with SP1: 0x200 - 0x380
419	 * -----------------------------------------------------
420	 */
421	.align	7, INV_INSN
422el1_sync_sp1:
423	b	el1_sync_sp1
424	check_vector_size el1_sync_sp1
425
426	.align	7, INV_INSN
427el1_irq_sp1:
428	b	el1_irq_sp1
429	check_vector_size el1_irq_sp1
430
431	.align	7, INV_INSN
432el1_fiq_sp1:
433	b	el1_fiq_sp1
434	check_vector_size el1_fiq_sp1
435
436	.align	7, INV_INSN
437el1_serror_sp1:
438	b	el1_serror_sp1
439	check_vector_size el1_serror_sp1
440
441	/* -----------------------------------------------------
442	 * Lower EL using AArch64 : 0x400 - 0x580
443	 * -----------------------------------------------------
444	 */
445	.align	7, INV_INSN
446el0_sync_a64:
447	restore_mapping
448
449	mrs	x2, esr_el1
450	mrs	x3, sp_el0
451	lsr	x2, x2, #ESR_EC_SHIFT
452	cmp	x2, #ESR_EC_AARCH64_SVC
453	b.eq	el0_svc
454	b	el0_sync_abort
455	check_vector_size el0_sync_a64
456
457	.align	7, INV_INSN
458el0_irq_a64:
459	restore_mapping
460
461	b	elx_irq
462	check_vector_size el0_irq_a64
463
464	.align	7, INV_INSN
465el0_fiq_a64:
466	restore_mapping
467
468	b	elx_fiq
469	check_vector_size el0_fiq_a64
470
471	.align	7, INV_INSN
472el0_serror_a64:
473	b   	el0_serror_a64
474	check_vector_size el0_serror_a64
475
476	/* -----------------------------------------------------
477	 * Lower EL using AArch32 : 0x0 - 0x180
478	 * -----------------------------------------------------
479	 */
480	.align	7, INV_INSN
481el0_sync_a32:
482	restore_mapping
483
484	mrs	x2, esr_el1
485	mrs	x3, sp_el0
486	lsr	x2, x2, #ESR_EC_SHIFT
487	cmp	x2, #ESR_EC_AARCH32_SVC
488	b.eq	el0_svc
489	b	el0_sync_abort
490	check_vector_size el0_sync_a32
491
492	.align	7, INV_INSN
493el0_irq_a32:
494	restore_mapping
495
496	b	elx_irq
497	check_vector_size el0_irq_a32
498
499	.align	7, INV_INSN
500el0_fiq_a32:
501	restore_mapping
502
503	b	elx_fiq
504	check_vector_size el0_fiq_a32
505
506	.align	7, INV_INSN
507el0_serror_a32:
508	b	el0_serror_a32
509	check_vector_size el0_serror_a32
510
511/*
512 * We're keeping this code in the same section as the vector to make sure
513 * that it's always available.
514 */
515eret_to_el0:
516
517#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
518	/* Point to the vector into the reduced mapping */
519	adr	x0, thread_user_kcode_offset
520	ldr	x0, [x0]
521	mrs	x1, vbar_el1
522	sub	x1, x1, x0
523	msr	vbar_el1, x1
524	isb
525
526	/* Jump into the reduced mapping and continue execution */
527	ldr	x1, =1f
528	sub	x1, x1, x0
529	br	x1
5301:
531
532	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
533	msr	tpidr_el1, x0
534
535	/* Update the mapping to exclude the full kernel mapping */
536	mrs	x0, ttbr0_el1
537	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
538	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
539	msr	ttbr0_el1, x0
540	isb
541
542	mrs	x0, tpidr_el1
543#else
544	mrs	x0, ttbr0_el1
545	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
546	msr	ttbr0_el1, x0
547	isb
548	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
549#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
550
551	eret
552
553END_FUNC thread_vect_table
554
555LOCAL_FUNC el0_svc , :
556	/* get pointer to current thread context in x0 */
557	get_thread_ctx sp, 0, 1, 2
558	/* load saved kernel sp */
559	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
560	/* Keep pointer to initial recod in x1 */
561	mov	x1, sp
562	/* Switch to SP_EL0 and restore kernel sp */
563	msr	spsel, #0
564	mov	x2, sp	/* Save SP_EL0 */
565	mov	sp, x0
566
567	/* Make room for struct thread_svc_regs */
568	sub	sp, sp, #THREAD_SVC_REG_SIZE
569	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
570
571	/* Restore x0-x3 */
572	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
573	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
574
575	/* Prepare the argument for the handler */
576	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
577	mrs	x0, elr_el1
578	mrs	x1, spsr_el1
579	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
580	mov	x0, sp
581
582	/*
583	 * Unmask native interrupts, Serror, and debug exceptions since we have
584	 * nothing left in sp_el1. Note that the SVC handler is excepted to
585	 * re-enable foreign interrupts by itself.
586	 */
587#if defined(CFG_ARM_GICV3)
588	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
589#else
590	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
591#endif
592
593	/* Call the handler */
594	bl	tee_svc_handler
595
596	/* Mask all maskable exceptions since we're switching back to sp_el1 */
597	msr	daifset, #DAIFBIT_ALL
598
599	/*
600	 * Save kernel sp we'll had at the beginning of this function.
601	 * This is when this TA has called another TA because
602	 * __thread_enter_user_mode() also saves the stack pointer in this
603	 * field.
604	 */
605	msr	spsel, #1
606	get_thread_ctx sp, 0, 1, 2
607	msr	spsel, #0
608	add	x1, sp, #THREAD_SVC_REG_SIZE
609	str	x1, [x0, #THREAD_CTX_KERN_SP]
610
611	/* Restore registers to the required state and return*/
612	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
613	msr	elr_el1, x0
614	msr	spsr_el1, x1
615	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
616	mov	x30, sp
617	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
618	mov	sp, x0
619	b_if_spsr_is_el0 w1, 1f
620	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
621	ldr	x30, [x30, #THREAD_SVC_REG_X30]
622
623	eret
624
6251:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
626	ldr	x30, [x30, #THREAD_SVC_REG_X30]
627
628	msr	spsel, #1
629	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
630	b	eret_to_el0
631END_FUNC el0_svc
632
633LOCAL_FUNC el1_sync_abort , :
634	mov	x0, sp
635	msr	spsel, #0
636	mov	x3, sp		/* Save original sp */
637
638	/*
639	 * Update core local flags.
640	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
641	 */
642	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
643	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
644	orr	w1, w1, #THREAD_CLF_ABORT
645	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
646			.Lsel_tmp_sp
647
648	/* Select abort stack */
649	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
650	b	.Lset_sp
651
652.Lsel_tmp_sp:
653	/* Select tmp stack */
654	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
655	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
656
657.Lset_sp:
658	mov	sp, x2
659	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
660
661	/*
662	 * Save state on stack
663	 */
664	sub	sp, sp, #THREAD_ABT_REGS_SIZE
665	mrs	x2, spsr_el1
666	/* Store spsr, sp_el0 */
667	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
668	/* Store original x0, x1 */
669	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
670	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
671	/* Store original x2, x3 and x4 to x29 */
672	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
673	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
674	/* Store x30, elr_el1 */
675	mrs	x0, elr_el1
676	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
677
678	/*
679	 * Call handler
680	 */
681	mov	x0, #0
682	mov	x1, sp
683	bl	abort_handler
684
685	/*
686	 * Restore state from stack
687	 */
688	/* Load x30, elr_el1 */
689	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
690	msr	elr_el1, x0
691	/* Load x0 to x29 */
692	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
693	/* Switch to SP_EL1 */
694	msr	spsel, #1
695	/* Save x0 to x3 in CORE_LOCAL */
696	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
697	/* Restore spsr_el1 and sp_el0 */
698	mrs	x3, sp_el0
699	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
700	msr	spsr_el1, x0
701	msr	sp_el0, x1
702
703	/* Update core local flags */
704	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
705	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
706	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
707
708	/* Restore x0 to x3 */
709	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
710
711	/* Return from exception */
712	eret
713END_FUNC el1_sync_abort
714
715	/* sp_el0 in x3 */
716LOCAL_FUNC el0_sync_abort , :
717	/*
718	 * Update core local flags
719	 */
720	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
721	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
722	orr	w1, w1, #THREAD_CLF_ABORT
723	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
724
725	/*
726	 * Save state on stack
727	 */
728
729	/* load abt_stack_va_end */
730	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
731	/* Keep pointer to initial record in x0 */
732	mov	x0, sp
733	/* Switch to SP_EL0 */
734	msr	spsel, #0
735	mov	sp, x1
736	sub	sp, sp, #THREAD_ABT_REGS_SIZE
737	mrs	x2, spsr_el1
738	/* Store spsr, sp_el0 */
739	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
740	/* Store original x0, x1 */
741	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
742	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
743	/* Store original x2, x3 and x4 to x29 */
744	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
745	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
746	/* Store x30, elr_el1 */
747	mrs	x0, elr_el1
748	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
749
750	/*
751	 * Call handler
752	 */
753	mov	x0, #0
754	mov	x1, sp
755	bl	abort_handler
756
757	/*
758	 * Restore state from stack
759	 */
760
761	/* Load x30, elr_el1 */
762	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
763	msr	elr_el1, x0
764	/* Load x0 to x29 */
765	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
766	/* Switch to SP_EL1 */
767	msr	spsel, #1
768	/* Save x0 to x3 in EL1_REC */
769	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
770	/* Restore spsr_el1 and sp_el0 */
771	mrs	x3, sp_el0
772	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
773	msr	spsr_el1, x0
774	msr	sp_el0, x1
775
776	/* Update core local flags */
777	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
778	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
779	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
780
781	/* Restore x2 to x3 */
782	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
783
784	b_if_spsr_is_el0 w0, 1f
785
786	/* Restore x0 to x1 */
787	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
788
789	/* Return from exception */
790	eret
7911:	b	eret_to_el0
792END_FUNC el0_sync_abort
793
794/* The handler of foreign interrupt. */
795.macro foreign_intr_handler mode:req
796	/*
797	 * Update core local flags
798	 */
799	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
800	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
801	orr	w1, w1, #THREAD_CLF_TMP
802	.ifc	\mode\(),fiq
803	orr	w1, w1, #THREAD_CLF_FIQ
804	.else
805	orr	w1, w1, #THREAD_CLF_IRQ
806	.endif
807	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
808
809	/* get pointer to current thread context in x0 */
810	get_thread_ctx sp, 0, 1, 2
811	/* Keep original SP_EL0 */
812	mrs	x2, sp_el0
813
814	/* Store original sp_el0 */
815	str	x2, [x0, #THREAD_CTX_REGS_SP]
816	/* store x4..x30 */
817	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
818	/* Load original x0..x3 into x10..x13 */
819	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
820	/* Save original x0..x3 */
821	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
822
823	/* load tmp_stack_va_end */
824	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
825	/* Switch to SP_EL0 */
826	msr	spsel, #0
827	mov	sp, x1
828
829	/*
830	 * Mark current thread as suspended
831	 */
832	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
833	mrs	x1, spsr_el1
834	mrs	x2, elr_el1
835	bl	thread_state_suspend
836	mov	w4, w0		/* Supply thread index */
837
838	/* Update core local flags */
839	/* Switch to SP_EL1 */
840	msr	spsel, #1
841	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
842	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
843	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
844	msr	spsel, #0
845
846	/*
847	 * Note that we're exiting with SP_EL0 selected since the entry
848	 * functions expects to have SP_EL0 selected with the tmp stack
849	 * set.
850	 */
851
852	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
853	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
854	mov	w2, #0
855	mov	w3, #0
856	/* w4 is already filled in above */
857	smc	#0
858	b	.	/* SMC should not return */
859.endm
860
861/*
862 * This struct is never used from C it's only here to visualize the
863 * layout.
864 *
865 * struct elx_nintr_rec {
866 * 	uint64_t x[19 - 4]; x4..x18
867 * 	uint64_t lr;
868 * 	uint64_t sp_el0;
869 * };
870 */
871#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
872#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
873#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
874#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
875
876/* The handler of native interrupt. */
877.macro native_intr_handler mode:req
878	/*
879	 * Update core local flags
880	 */
881	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
882	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
883	.ifc	\mode\(),fiq
884	orr	w1, w1, #THREAD_CLF_FIQ
885	.else
886	orr	w1, w1, #THREAD_CLF_IRQ
887	.endif
888	orr	w1, w1, #THREAD_CLF_TMP
889	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
890
891	/* load tmp_stack_va_end */
892	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
893	/* Keep original SP_EL0 */
894	mrs	x2, sp_el0
895	/* Switch to SP_EL0 */
896	msr	spsel, #0
897	mov	sp, x1
898
899	/*
900	 * Save registers on stack that can be corrupted by a call to
901	 * a C function
902	 */
903	/* Make room for struct elx_nintr_rec */
904	sub	sp, sp, #ELX_NINTR_REC_SIZE
905	/* Store x4..x18 */
906	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
907	/* Store lr and original sp_el0 */
908	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
909
910	bl	thread_check_canaries
911	adr	x16, thread_nintr_handler_ptr
912	ldr	x16, [x16]
913	blr	x16
914
915	/*
916	 * Restore registers
917	 */
918	/* Restore x4..x18 */
919	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
920	/* Load  lr and original sp_el0 */
921	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
922	/* Restore SP_El0 */
923	mov	sp, x2
924	/* Switch back to SP_EL1 */
925	msr	spsel, #1
926
927	/* Update core local flags */
928	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
929	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
930	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
931
932	mrs	x0, spsr_el1
933	/* Restore x2..x3 */
934	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
935	b_if_spsr_is_el0 w0, 1f
936
937	/* Restore x0..x1 */
938	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
939
940	/* Return from exception */
941	eret
9421:	b	eret_to_el0
943.endm
944
945LOCAL_FUNC elx_irq , :
946#if defined(CFG_ARM_GICV3)
947	native_intr_handler	irq
948#else
949	foreign_intr_handler	irq
950#endif
951END_FUNC elx_irq
952
953LOCAL_FUNC elx_fiq , :
954#if defined(CFG_ARM_GICV3)
955	foreign_intr_handler	fiq
956#else
957	native_intr_handler	fiq
958#endif
959END_FUNC elx_fiq
960