xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision b1d7375c01ec8bcbf3561d27425d320afed23bce)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm64.h>
7#include <arm64_macros.S>
8#include <asm-defines.h>
9#include <asm.S>
10#include <keep.h>
11#include <kernel/thread_defs.h>
12#include <mm/core_mmu.h>
13#include <sm/optee_smc.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16
17#include "thread_private.h"
18
19	.macro get_thread_ctx core_local, res, tmp0, tmp1
20		ldr	w\tmp0, [\core_local, \
21				#THREAD_CORE_LOCAL_CURR_THREAD]
22		adr	x\res, threads
23		mov	x\tmp1, #THREAD_CTX_SIZE
24		madd	x\res, x\tmp0, x\tmp1, x\res
25	.endm
26
27	.macro b_if_spsr_is_el0 reg, label
28		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
29		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
30		b.eq	\label
31	.endm
32
33LOCAL_FUNC vector_std_smc_entry , :
34	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
35	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
36	mov	x0, sp
37	bl	thread_handle_std_smc
38	/*
39	 * Normally thread_handle_std_smc() should return via
40	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
41	 * hasn't switched stack (error detected) it will do a normal "C"
42	 * return.
43	 */
44	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
45	add	sp, sp, #THREAD_SMC_ARGS_SIZE
46	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
47	smc	#0
48	b	.	/* SMC should not return */
49END_FUNC vector_std_smc_entry
50
51LOCAL_FUNC vector_fast_smc_entry , :
52	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
53	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
54	mov	x0, sp
55	bl	thread_handle_fast_smc
56	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
57	add	sp, sp, #THREAD_SMC_ARGS_SIZE
58	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
59	smc	#0
60	b	.	/* SMC should not return */
61END_FUNC vector_fast_smc_entry
62
63LOCAL_FUNC vector_fiq_entry , :
64	/* Secure Monitor received a FIQ and passed control to us. */
65	bl	thread_check_canaries
66	adr	x16, thread_nintr_handler_ptr
67	ldr	x16, [x16]
68	blr	x16
69	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
70	smc	#0
71	b	.	/* SMC should not return */
72END_FUNC vector_fiq_entry
73
74LOCAL_FUNC vector_cpu_on_entry , :
75	adr	x16, thread_cpu_on_handler_ptr
76	ldr	x16, [x16]
77	blr	x16
78	mov	x1, x0
79	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
80	smc	#0
81	b	.	/* SMC should not return */
82END_FUNC vector_cpu_on_entry
83
84LOCAL_FUNC vector_cpu_off_entry , :
85	adr	x16, thread_cpu_off_handler_ptr
86	ldr	x16, [x16]
87	blr	x16
88	mov	x1, x0
89	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
90	smc	#0
91	b	.	/* SMC should not return */
92END_FUNC vector_cpu_off_entry
93
94LOCAL_FUNC vector_cpu_suspend_entry , :
95	adr	x16, thread_cpu_suspend_handler_ptr
96	ldr	x16, [x16]
97	blr	x16
98	mov	x1, x0
99	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
100	smc	#0
101	b	.	/* SMC should not return */
102END_FUNC vector_cpu_suspend_entry
103
104LOCAL_FUNC vector_cpu_resume_entry , :
105	adr	x16, thread_cpu_resume_handler_ptr
106	ldr	x16, [x16]
107	blr	x16
108	mov	x1, x0
109	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
110	smc	#0
111	b	.	/* SMC should not return */
112END_FUNC vector_cpu_resume_entry
113
114LOCAL_FUNC vector_system_off_entry , :
115	adr	x16, thread_system_off_handler_ptr
116	ldr	x16, [x16]
117	blr	x16
118	mov	x1, x0
119	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
120	smc	#0
121	b	.	/* SMC should not return */
122END_FUNC vector_system_off_entry
123
124LOCAL_FUNC vector_system_reset_entry , :
125	adr	x16, thread_system_reset_handler_ptr
126	ldr	x16, [x16]
127	blr	x16
128	mov	x1, x0
129	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
130	smc	#0
131	b	.	/* SMC should not return */
132END_FUNC vector_system_reset_entry
133
134/*
135 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
136 * initialization.
137 *
138 * Note that ARM-TF depends on the layout of this vector table, any change
139 * in layout has to be synced with ARM-TF.
140 */
141FUNC thread_vector_table , :
142	b	vector_std_smc_entry
143	b	vector_fast_smc_entry
144	b	vector_cpu_on_entry
145	b	vector_cpu_off_entry
146	b	vector_cpu_resume_entry
147	b	vector_cpu_suspend_entry
148	b	vector_fiq_entry
149	b	vector_system_off_entry
150	b	vector_system_reset_entry
151END_FUNC thread_vector_table
152KEEP_PAGER thread_vector_table
153
154
155/* void thread_resume(struct thread_ctx_regs *regs) */
156FUNC thread_resume , :
157	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
158	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
159	mov	sp, x1
160	msr	elr_el1, x2
161	msr	spsr_el1, x3
162
163	b_if_spsr_is_el0 w3, 1f
164
165	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
166	ldr	x0, [x0, THREAD_CTX_REGS_X0]
167	eret
168
1691:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
170	ldr	x0, [x0, THREAD_CTX_REGS_X0]
171
172	msr	spsel, #1
173	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
174	b	eret_to_el0
175END_FUNC thread_resume
176
177FUNC thread_std_smc_entry , :
178	/* pass x0-x7 in a struct thread_smc_args */
179	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
180	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
181	mov	x0, sp
182
183	/* Call the registered handler */
184	bl	__thread_std_smc_entry
185
186	/*
187	 * Load the returned x0-x3 into preserved registers and skip the
188	 * "returned" x4-x7 since they will not be returned to normal
189	 * world.
190	 */
191	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
192	add	sp, sp, #THREAD_SMC_ARGS_SIZE
193
194	/* Mask all maskable exceptions before switching to temporary stack */
195	msr	daifset, #DAIFBIT_ALL
196	bl	thread_get_tmp_sp
197	mov	sp, x0
198
199	bl	thread_state_free
200
201	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
202	mov	x1, x20
203	mov	x2, x21
204	mov	x3, x22
205	mov	x4, x23
206	smc	#0
207	b	.	/* SMC should not return */
208END_FUNC thread_std_smc_entry
209
210/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
211FUNC thread_rpc , :
212	/* Read daif and create an SPSR */
213	mrs	x1, daif
214	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
215
216	/* Mask all maskable exceptions before switching to temporary stack */
217	msr	daifset, #DAIFBIT_ALL
218	push	x0, xzr
219	push	x1, x30
220	bl	thread_get_ctx_regs
221	ldr	x30, [sp, #8]
222	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
223	mov	x19, x0
224
225	bl	thread_get_tmp_sp
226	pop	x1, xzr		/* Match "push x1, x30" above */
227	mov	x2, sp
228	str	x2, [x19, #THREAD_CTX_REGS_SP]
229	ldr	x20, [sp]	/* Get pointer to rv[] */
230	mov	sp, x0		/* Switch to tmp stack */
231
232	adr	x2, .thread_rpc_return
233	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
234	bl	thread_state_suspend
235	mov	x4, x0		/* Supply thread index */
236	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
237	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
238	smc	#0
239	b	.		/* SMC should not return */
240
241.thread_rpc_return:
242	/*
243	 * At this point has the stack pointer been restored to the value
244	 * stored in THREAD_CTX above.
245	 *
246	 * Jumps here from thread_resume above when RPC has returned. The
247	 * IRQ and FIQ bits are restored to what they where when this
248	 * function was originally entered.
249	 */
250	pop	x16, xzr	/* Get pointer to rv[] */
251	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
252	ret
253END_FUNC thread_rpc
254KEEP_PAGER thread_rpc
255
256FUNC thread_init_vbar , :
257	adr	x0, thread_vect_table
258	msr	vbar_el1, x0
259	ret
260END_FUNC thread_init_vbar
261KEEP_PAGER thread_init_vbar
262
263/*
264 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
265 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
266 *               unsigned long user_func, unsigned long spsr,
267 *               uint32_t *exit_status0, uint32_t *exit_status1)
268 *
269 */
270FUNC __thread_enter_user_mode , :
271	ldr	x8, [sp]
272	/*
273	 * Create the and fill in the struct thread_user_mode_rec
274	 */
275	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
276	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
277	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
278
279	/*
280	 * Switch to SP_EL1
281	 * Disable exceptions
282	 * Save kern sp in x19
283	 */
284	msr	daifset, #DAIFBIT_ALL
285	mov	x19, sp
286	msr	spsel, #1
287
288	/*
289	 * Save the kernel stack pointer in the thread context
290	 */
291	/* get pointer to current thread context */
292	get_thread_ctx sp, 21, 20, 22
293	/*
294	 * Save kernel stack pointer to ensure that el0_svc() uses
295	 * correct stack pointer
296	 */
297	str	x19, [x21, #THREAD_CTX_KERN_SP]
298
299	/*
300	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
301	 */
302	msr	spsr_el1, x6
303	/* Set user sp */
304	mov	x13, x4		/* Used when running TA in Aarch32 */
305	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
306	/* Set user function */
307	msr	elr_el1, x5
308	/* Set frame pointer (user stack can't be unwound past this point) */
309	mov x29, #0
310
311	/* Jump into user mode */
312	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
313	b eret_to_el0
314END_FUNC __thread_enter_user_mode
315
316/*
317 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
318 * 		uint32_t exit_status1);
319 * See description in thread.h
320 */
321FUNC thread_unwind_user_mode , :
322	/* Store the exit status */
323	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
324	str	w1, [x3]
325	str	w2, [x4]
326	/* Restore x19..x30 */
327	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
328	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
329	/* Return from the call of thread_enter_user_mode() */
330	ret
331END_FUNC thread_unwind_user_mode
332
333	/*
334	 * This macro verifies that the a given vector doesn't exceed the
335	 * architectural limit of 32 instructions. This is meant to be placed
336	 * immedately after the last instruction in the vector. It takes the
337	 * vector entry as the parameter
338	 */
339	.macro check_vector_size since
340	  .if (. - \since) > (32 * 4)
341	    .error "Vector exceeds 32 instructions"
342	  .endif
343	.endm
344
345	.macro restore_mapping
346#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
347		/* Temporarily save x0 */
348		msr	tpidr_el1, x0
349
350		/* Update the mapping to use the full kernel mapping */
351		mrs	x0, ttbr0_el1
352		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
353		/* switch to kernel mode ASID */
354		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
355		msr	ttbr0_el1, x0
356		isb
357
358		/* Jump into the full mapping and continue execution */
359		ldr	x0, =1f
360		br	x0
361	1:
362
363		/* Point to the vector into the full mapping */
364		adr	x0, thread_vect_table
365		msr	vbar_el1, x0
366		isb
367
368		/* Restore x1 */
369		mrs	x0, tpidr_el1
370		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
371#else
372		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
373		mrs	x0, ttbr0_el1
374		/* switch to kernel mode ASID */
375		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
376		msr	ttbr0_el1, x0
377		isb
378#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
379	.endm
380
381	.section .text.thread_vect_table
382	.align	11
383FUNC thread_vect_table , :
384	/* -----------------------------------------------------
385	 * EL1 with SP0 : 0x0 - 0x180
386	 * -----------------------------------------------------
387	 */
388	.align	7
389sync_el1_sp0:
390	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
391	b	el1_sync_abort
392	check_vector_size sync_el1_sp0
393
394	.align	7
395irq_el1_sp0:
396	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
397	b	elx_irq
398	check_vector_size irq_el1_sp0
399
400	.align	7
401fiq_el1_sp0:
402	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
403	b	elx_fiq
404	check_vector_size fiq_el1_sp0
405
406	.align	7
407SErrorSP0:
408	b	SErrorSP0
409	check_vector_size SErrorSP0
410
411	/* -----------------------------------------------------
412	 * Current EL with SPx: 0x200 - 0x380
413	 * -----------------------------------------------------
414	 */
415	.align	7
416SynchronousExceptionSPx:
417	b	SynchronousExceptionSPx
418	check_vector_size SynchronousExceptionSPx
419
420	.align	7
421IrqSPx:
422	b	IrqSPx
423	check_vector_size IrqSPx
424
425	.align	7
426FiqSPx:
427	b	FiqSPx
428	check_vector_size FiqSPx
429
430	.align	7
431SErrorSPx:
432	b	SErrorSPx
433	check_vector_size SErrorSPx
434
435	/* -----------------------------------------------------
436	 * Lower EL using AArch64 : 0x400 - 0x580
437	 * -----------------------------------------------------
438	 */
439	.align	7
440el0_sync_a64:
441	restore_mapping
442
443	mrs	x2, esr_el1
444	mrs	x3, sp_el0
445	lsr	x2, x2, #ESR_EC_SHIFT
446	cmp	x2, #ESR_EC_AARCH64_SVC
447	b.eq	el0_svc
448	b	el0_sync_abort
449	check_vector_size el0_sync_a64
450
451	.align	7
452el0_irq_a64:
453	restore_mapping
454
455	b	elx_irq
456	check_vector_size el0_irq_a64
457
458	.align	7
459el0_fiq_a64:
460	restore_mapping
461
462	b	elx_fiq
463	check_vector_size el0_fiq_a64
464
465	.align	7
466SErrorA64:
467	b   	SErrorA64
468	check_vector_size SErrorA64
469
470	/* -----------------------------------------------------
471	 * Lower EL using AArch32 : 0x0 - 0x180
472	 * -----------------------------------------------------
473	 */
474	.align	7
475el0_sync_a32:
476	restore_mapping
477
478	mrs	x2, esr_el1
479	mrs	x3, sp_el0
480	lsr	x2, x2, #ESR_EC_SHIFT
481	cmp	x2, #ESR_EC_AARCH32_SVC
482	b.eq	el0_svc
483	b	el0_sync_abort
484	check_vector_size el0_sync_a32
485
486	.align	7
487el0_irq_a32:
488	restore_mapping
489
490	b	elx_irq
491	check_vector_size el0_irq_a32
492
493	.align	7
494el0_fiq_a32:
495	restore_mapping
496
497	b	elx_fiq
498	check_vector_size el0_fiq_a32
499
500	.align	7
501SErrorA32:
502	b	SErrorA32
503	check_vector_size SErrorA32
504
505/*
506 * We're keeping this code in the same section as the vector to make sure
507 * that it's always available.
508 */
509eret_to_el0:
510
511#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
512	/* Point to the vector into the reduced mapping */
513	adr	x0, thread_user_kcode_offset
514	ldr	x0, [x0]
515	adr	x1, thread_vect_table
516	sub	x1, x1, x0
517	msr	vbar_el1, x1
518	isb
519
520	/* Jump into the reduced mapping and continue execution */
521	ldr	x1, =1f
522	sub	x1, x1, x0
523	br	x1
5241:
525
526	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
527	msr	tpidr_el1, x0
528
529	/* Update the mapping to exclude the full kernel mapping */
530	mrs	x0, ttbr0_el1
531	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
532	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
533	msr	ttbr0_el1, x0
534	isb
535
536	mrs	x0, tpidr_el1
537#else
538	mrs	x0, ttbr0_el1
539	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
540	msr	ttbr0_el1, x0
541	isb
542	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
543#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
544
545	eret
546
547END_FUNC thread_vect_table
548
549LOCAL_FUNC el0_svc , :
550	/* get pointer to current thread context in x0 */
551	get_thread_ctx sp, 0, 1, 2
552	/* load saved kernel sp */
553	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
554	/* Keep pointer to initial recod in x1 */
555	mov	x1, sp
556	/* Switch to SP_EL0 and restore kernel sp */
557	msr	spsel, #0
558	mov	x2, sp	/* Save SP_EL0 */
559	mov	sp, x0
560
561	/* Make room for struct thread_svc_regs */
562	sub	sp, sp, #THREAD_SVC_REG_SIZE
563	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
564
565	/* Restore x0-x3 */
566	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
567	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
568
569	/* Prepare the argument for the handler */
570	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
571	mrs	x0, elr_el1
572	mrs	x1, spsr_el1
573	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
574	mov	x0, sp
575
576	/*
577	 * Unmask native interrupts, Serror, and debug exceptions since we have
578	 * nothing left in sp_el1. Note that the SVC handler is excepted to
579	 * re-enable foreign interrupts by itself.
580	 */
581#if defined(CFG_ARM_GICV3)
582	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
583#else
584	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
585#endif
586
587	/* Call the handler */
588	bl	tee_svc_handler
589
590	/* Mask all maskable exceptions since we're switching back to sp_el1 */
591	msr	daifset, #DAIFBIT_ALL
592
593	/*
594	 * Save kernel sp we'll had at the beginning of this function.
595	 * This is when this TA has called another TA because
596	 * __thread_enter_user_mode() also saves the stack pointer in this
597	 * field.
598	 */
599	msr	spsel, #1
600	get_thread_ctx sp, 0, 1, 2
601	msr	spsel, #0
602	add	x1, sp, #THREAD_SVC_REG_SIZE
603	str	x1, [x0, #THREAD_CTX_KERN_SP]
604
605	/* Restore registers to the required state and return*/
606	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
607	msr	elr_el1, x0
608	msr	spsr_el1, x1
609	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
610	mov	x30, sp
611	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
612	mov	sp, x0
613	b_if_spsr_is_el0 w1, 1f
614	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
615	ldr	x30, [x30, #THREAD_SVC_REG_X30]
616
617	eret
618
6191:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
620	ldr	x30, [x30, #THREAD_SVC_REG_X30]
621
622	msr	spsel, #1
623	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
624	b	eret_to_el0
625END_FUNC el0_svc
626
627LOCAL_FUNC el1_sync_abort , :
628	mov	x0, sp
629	msr	spsel, #0
630	mov	x3, sp		/* Save original sp */
631
632	/*
633	 * Update core local flags.
634	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
635	 */
636	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
637	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
638	orr	w1, w1, #THREAD_CLF_ABORT
639	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
640			.Lsel_tmp_sp
641
642	/* Select abort stack */
643	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
644	b	.Lset_sp
645
646.Lsel_tmp_sp:
647	/* Select tmp stack */
648	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
649	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
650
651.Lset_sp:
652	mov	sp, x2
653	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
654
655	/*
656	 * Save state on stack
657	 */
658	sub	sp, sp, #THREAD_ABT_REGS_SIZE
659	mrs	x2, spsr_el1
660	/* Store spsr, sp_el0 */
661	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
662	/* Store original x0, x1 */
663	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
664	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
665	/* Store original x2, x3 and x4 to x29 */
666	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
667	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
668	/* Store x30, elr_el1 */
669	mrs	x0, elr_el1
670	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
671
672	/*
673	 * Call handler
674	 */
675	mov	x0, #0
676	mov	x1, sp
677	bl	abort_handler
678
679	/*
680	 * Restore state from stack
681	 */
682	/* Load x30, elr_el1 */
683	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
684	msr	elr_el1, x0
685	/* Load x0 to x29 */
686	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
687	/* Switch to SP_EL1 */
688	msr	spsel, #1
689	/* Save x0 to x3 in CORE_LOCAL */
690	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
691	/* Restore spsr_el1 and sp_el0 */
692	mrs	x3, sp_el0
693	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
694	msr	spsr_el1, x0
695	msr	sp_el0, x1
696
697	/* Update core local flags */
698	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
699	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
700	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
701
702	/* Restore x0 to x3 */
703	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
704
705	/* Return from exception */
706	eret
707END_FUNC el1_sync_abort
708
709	/* sp_el0 in x3 */
710LOCAL_FUNC el0_sync_abort , :
711	/*
712	 * Update core local flags
713	 */
714	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
715	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
716	orr	w1, w1, #THREAD_CLF_ABORT
717	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
718
719	/*
720	 * Save state on stack
721	 */
722
723	/* load abt_stack_va_end */
724	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
725	/* Keep pointer to initial record in x0 */
726	mov	x0, sp
727	/* Switch to SP_EL0 */
728	msr	spsel, #0
729	mov	sp, x1
730	sub	sp, sp, #THREAD_ABT_REGS_SIZE
731	mrs	x2, spsr_el1
732	/* Store spsr, sp_el0 */
733	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
734	/* Store original x0, x1 */
735	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
736	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
737	/* Store original x2, x3 and x4 to x29 */
738	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
739	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
740	/* Store x30, elr_el1 */
741	mrs	x0, elr_el1
742	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
743
744	/*
745	 * Call handler
746	 */
747	mov	x0, #0
748	mov	x1, sp
749	bl	abort_handler
750
751	/*
752	 * Restore state from stack
753	 */
754
755	/* Load x30, elr_el1 */
756	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
757	msr	elr_el1, x0
758	/* Load x0 to x29 */
759	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
760	/* Switch to SP_EL1 */
761	msr	spsel, #1
762	/* Save x0 to x3 in EL1_REC */
763	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
764	/* Restore spsr_el1 and sp_el0 */
765	mrs	x3, sp_el0
766	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
767	msr	spsr_el1, x0
768	msr	sp_el0, x1
769
770	/* Update core local flags */
771	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
772	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
773	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
774
775	/* Restore x2 to x3 */
776	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
777
778	b_if_spsr_is_el0 w0, eret_to_el0
779
780	/* Restore x0 to x1 */
781	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
782
783	/* Return from exception */
784	eret
785END_FUNC el0_sync_abort
786
787/* The handler of foreign interrupt. */
788.macro foreign_intr_handler mode:req
789	/*
790	 * Update core local flags
791	 */
792	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
793	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
794	orr	w1, w1, #THREAD_CLF_TMP
795	.ifc	\mode\(),fiq
796	orr	w1, w1, #THREAD_CLF_FIQ
797	.else
798	orr	w1, w1, #THREAD_CLF_IRQ
799	.endif
800	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
801
802	/* get pointer to current thread context in x0 */
803	get_thread_ctx sp, 0, 1, 2
804	/* Keep original SP_EL0 */
805	mrs	x2, sp_el0
806
807	/* Store original sp_el0 */
808	str	x2, [x0, #THREAD_CTX_REGS_SP]
809	/* store x4..x30 */
810	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
811	/* Load original x0..x3 into x10..x13 */
812	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
813	/* Save original x0..x3 */
814	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
815
816	/* load tmp_stack_va_end */
817	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
818	/* Switch to SP_EL0 */
819	msr	spsel, #0
820	mov	sp, x1
821
822	/*
823	 * Mark current thread as suspended
824	 */
825	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
826	mrs	x1, spsr_el1
827	mrs	x2, elr_el1
828	bl	thread_state_suspend
829	mov	w4, w0		/* Supply thread index */
830
831	/* Update core local flags */
832	/* Switch to SP_EL1 */
833	msr	spsel, #1
834	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
835	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
836	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
837	msr	spsel, #0
838
839	/*
840	 * Note that we're exiting with SP_EL0 selected since the entry
841	 * functions expects to have SP_EL0 selected with the tmp stack
842	 * set.
843	 */
844
845	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
846	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
847	mov	w2, #0
848	mov	w3, #0
849	/* w4 is already filled in above */
850	smc	#0
851	b	.	/* SMC should not return */
852.endm
853
854/*
855 * This struct is never used from C it's only here to visualize the
856 * layout.
857 *
858 * struct elx_nintr_rec {
859 * 	uint64_t x[19 - 4]; x4..x18
860 * 	uint64_t lr;
861 * 	uint64_t sp_el0;
862 * };
863 */
864#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
865#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
866#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
867#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
868
869/* The handler of native interrupt. */
870.macro native_intr_handler mode:req
871	/*
872	 * Update core local flags
873	 */
874	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
875	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
876	.ifc	\mode\(),fiq
877	orr	w1, w1, #THREAD_CLF_FIQ
878	.else
879	orr	w1, w1, #THREAD_CLF_IRQ
880	.endif
881	orr	w1, w1, #THREAD_CLF_TMP
882	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
883
884	/* load tmp_stack_va_end */
885	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
886	/* Keep original SP_EL0 */
887	mrs	x2, sp_el0
888	/* Switch to SP_EL0 */
889	msr	spsel, #0
890	mov	sp, x1
891
892	/*
893	 * Save registers on stack that can be corrupted by a call to
894	 * a C function
895	 */
896	/* Make room for struct elx_nintr_rec */
897	sub	sp, sp, #ELX_NINTR_REC_SIZE
898	/* Store x4..x18 */
899	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
900	/* Store lr and original sp_el0 */
901	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
902
903	bl	thread_check_canaries
904	adr	x16, thread_nintr_handler_ptr
905	ldr	x16, [x16]
906	blr	x16
907
908	/*
909	 * Restore registers
910	 */
911	/* Restore x4..x18 */
912	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
913	/* Load  lr and original sp_el0 */
914	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
915	/* Restore SP_El0 */
916	mov	sp, x2
917	/* Switch back to SP_EL1 */
918	msr	spsel, #1
919
920	/* Update core local flags */
921	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
922	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
923	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
924
925	mrs	x0, spsr_el1
926	/* Restore x2..x3 */
927	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
928	b_if_spsr_is_el0 w0, eret_to_el0
929
930	/* Restore x0..x1 */
931	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
932
933	/* Return from exception */
934	eret
935.endm
936
937LOCAL_FUNC elx_irq , :
938#if defined(CFG_ARM_GICV3)
939	native_intr_handler	irq
940#else
941	foreign_intr_handler	irq
942#endif
943END_FUNC elx_irq
944
945LOCAL_FUNC elx_fiq , :
946#if defined(CFG_ARM_GICV3)
947	foreign_intr_handler	fiq
948#else
949	native_intr_handler	fiq
950#endif
951END_FUNC elx_fiq
952