xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 75200110483dcee11cdcf4cef3d0ac4d92f63c14)
1/*
2 * Copyright (c) 2015-2017, Linaro Limited
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <arm64.h>
29#include <arm64_macros.S>
30#include <asm-defines.h>
31#include <asm.S>
32#include <keep.h>
33#include <kernel/thread_defs.h>
34#include <sm/optee_smc.h>
35#include <sm/teesmc_opteed.h>
36#include <sm/teesmc_opteed_macros.h>
37
38#include "thread_private.h"
39
40	.macro get_thread_ctx core_local, res, tmp0, tmp1
41		ldr	w\tmp0, [\core_local, \
42				#THREAD_CORE_LOCAL_CURR_THREAD]
43		adr	x\res, threads
44		mov	x\tmp1, #THREAD_CTX_SIZE
45		madd	x\res, x\tmp0, x\tmp1, x\res
46	.endm
47
48LOCAL_FUNC vector_std_smc_entry , :
49	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
50	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
51	mov	x0, sp
52	bl	thread_handle_std_smc
53	/*
54	 * Normally thread_handle_std_smc() should return via
55	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
56	 * hasn't switched stack (error detected) it will do a normal "C"
57	 * return.
58	 */
59	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
60	add	sp, sp, #THREAD_SMC_ARGS_SIZE
61	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
62	smc	#0
63	b	.	/* SMC should not return */
64END_FUNC vector_std_smc_entry
65
66LOCAL_FUNC vector_fast_smc_entry , :
67	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
68	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
69	mov	x0, sp
70	bl	thread_handle_fast_smc
71	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
72	add	sp, sp, #THREAD_SMC_ARGS_SIZE
73	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
74	smc	#0
75	b	.	/* SMC should not return */
76END_FUNC vector_fast_smc_entry
77
78LOCAL_FUNC vector_fiq_entry , :
79	/* Secure Monitor received a FIQ and passed control to us. */
80	bl	thread_check_canaries
81	adr	x16, thread_nintr_handler_ptr
82	ldr	x16, [x16]
83	blr	x16
84	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
85	smc	#0
86	b	.	/* SMC should not return */
87END_FUNC vector_fiq_entry
88
89LOCAL_FUNC vector_cpu_on_entry , :
90	adr	x16, thread_cpu_on_handler_ptr
91	ldr	x16, [x16]
92	blr	x16
93	mov	x1, x0
94	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
95	smc	#0
96	b	.	/* SMC should not return */
97END_FUNC vector_cpu_on_entry
98
99LOCAL_FUNC vector_cpu_off_entry , :
100	adr	x16, thread_cpu_off_handler_ptr
101	ldr	x16, [x16]
102	blr	x16
103	mov	x1, x0
104	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
105	smc	#0
106	b	.	/* SMC should not return */
107END_FUNC vector_cpu_off_entry
108
109LOCAL_FUNC vector_cpu_suspend_entry , :
110	adr	x16, thread_cpu_suspend_handler_ptr
111	ldr	x16, [x16]
112	blr	x16
113	mov	x1, x0
114	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
115	smc	#0
116	b	.	/* SMC should not return */
117END_FUNC vector_cpu_suspend_entry
118
119LOCAL_FUNC vector_cpu_resume_entry , :
120	adr	x16, thread_cpu_resume_handler_ptr
121	ldr	x16, [x16]
122	blr	x16
123	mov	x1, x0
124	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
125	smc	#0
126	b	.	/* SMC should not return */
127END_FUNC vector_cpu_resume_entry
128
129LOCAL_FUNC vector_system_off_entry , :
130	adr	x16, thread_system_off_handler_ptr
131	ldr	x16, [x16]
132	blr	x16
133	mov	x1, x0
134	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
135	smc	#0
136	b	.	/* SMC should not return */
137END_FUNC vector_system_off_entry
138
139LOCAL_FUNC vector_system_reset_entry , :
140	adr	x16, thread_system_reset_handler_ptr
141	ldr	x16, [x16]
142	blr	x16
143	mov	x1, x0
144	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
145	smc	#0
146	b	.	/* SMC should not return */
147END_FUNC vector_system_reset_entry
148
149/*
150 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
151 * initialization.
152 *
153 * Note that ARM-TF depends on the layout of this vector table, any change
154 * in layout has to be synced with ARM-TF.
155 */
156FUNC thread_vector_table , :
157	b	vector_std_smc_entry
158	b	vector_fast_smc_entry
159	b	vector_cpu_on_entry
160	b	vector_cpu_off_entry
161	b	vector_cpu_resume_entry
162	b	vector_cpu_suspend_entry
163	b	vector_fiq_entry
164	b	vector_system_off_entry
165	b	vector_system_reset_entry
166END_FUNC thread_vector_table
167KEEP_PAGER thread_vector_table
168
169
170/* void thread_resume(struct thread_ctx_regs *regs) */
171FUNC thread_resume , :
172	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
173	mov	sp, x1
174	msr	elr_el1, x2
175	msr	spsr_el1, x3
176	load_xregs x0, THREAD_CTX_REGS_X1, 1, 30
177	ldr	x0, [x0, THREAD_CTX_REGS_X0]
178	eret
179END_FUNC thread_resume
180
181FUNC thread_std_smc_entry , :
182	/* pass x0-x7 in a struct thread_smc_args */
183	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
184	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
185	mov	x0, sp
186
187	/* Call the registered handler */
188	bl	__thread_std_smc_entry
189
190	/*
191	 * Load the returned x0-x3 into preserved registers and skip the
192	 * "returned" x4-x7 since they will not be returned to normal
193	 * world.
194	 */
195	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
196	add	sp, sp, #THREAD_SMC_ARGS_SIZE
197
198	/* Mask all maskable exceptions before switching to temporary stack */
199	msr	daifset, #DAIFBIT_ALL
200	bl	thread_get_tmp_sp
201	mov	sp, x0
202
203	bl	thread_state_free
204
205	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
206	mov	x1, x20
207	mov	x2, x21
208	mov	x3, x22
209	mov	x4, x23
210	smc	#0
211	b	.	/* SMC should not return */
212END_FUNC thread_std_smc_entry
213
214/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
215FUNC thread_rpc , :
216	/* Read daif and create an SPSR */
217	mrs	x1, daif
218	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
219
220	/* Mask all maskable exceptions before switching to temporary stack */
221	msr	daifset, #DAIFBIT_ALL
222	push	x0, xzr
223	push	x1, x30
224	bl	thread_get_ctx_regs
225	ldr	x30, [sp, #8]
226	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
227	mov	x19, x0
228
229	bl	thread_get_tmp_sp
230	pop	x1, xzr		/* Match "push x1, x30" above */
231	mov	x2, sp
232	str	x2, [x19, #THREAD_CTX_REGS_SP]
233	ldr	x20, [sp]	/* Get pointer to rv[] */
234	mov	sp, x0		/* Switch to tmp stack */
235
236	adr	x2, .thread_rpc_return
237	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
238	bl	thread_state_suspend
239	mov	x4, x0		/* Supply thread index */
240	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
241	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
242	smc	#0
243	b	.		/* SMC should not return */
244
245.thread_rpc_return:
246	/*
247	 * At this point has the stack pointer been restored to the value
248	 * stored in THREAD_CTX above.
249	 *
250	 * Jumps here from thread_resume above when RPC has returned. The
251	 * IRQ and FIQ bits are restored to what they where when this
252	 * function was originally entered.
253	 */
254	pop	x16, xzr	/* Get pointer to rv[] */
255	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
256	ret
257END_FUNC thread_rpc
258KEEP_PAGER thread_rpc
259
260FUNC thread_init_vbar , :
261	adr	x0, thread_vect_table
262	msr	vbar_el1, x0
263	ret
264END_FUNC thread_init_vbar
265KEEP_PAGER thread_init_vbar
266
267/*
268 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
269 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
270 *               unsigned long user_func, unsigned long spsr,
271 *               uint32_t *exit_status0, uint32_t *exit_status1)
272 *
273 */
274FUNC __thread_enter_user_mode , :
275	ldr	x8, [sp]
276	/*
277	 * Create the and fill in the struct thread_user_mode_rec
278	 */
279	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
280	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
281	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
282
283	/*
284	 * Switch to SP_EL1
285	 * Disable exceptions
286	 * Save kern sp in x19
287	 */
288	msr	daifset, #DAIFBIT_ALL
289	mov	x19, sp
290	msr	spsel, #1
291
292	/*
293	 * Save the kernel stack pointer in the thread context
294	 */
295	/* get pointer to current thread context */
296	get_thread_ctx sp, 21, 20, 22
297	/*
298	 * Save kernel stack pointer to ensure that el0_svc() uses
299	 * correct stack pointer
300	 */
301	str	x19, [x21, #THREAD_CTX_KERN_SP]
302
303	/*
304	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
305	 */
306	msr	spsr_el1, x6
307	/* Set user sp */
308	mov	x13, x4		/* Used when running TA in Aarch32 */
309	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
310	/* Set user function */
311	msr	elr_el1, x5
312	/* Set frame pointer (user stack can't be unwound past this point) */
313	mov x29, #0
314
315	/* Jump into user mode */
316	eret
317END_FUNC __thread_enter_user_mode
318
319/*
320 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
321 * 		uint32_t exit_status1);
322 * See description in thread.h
323 */
324FUNC thread_unwind_user_mode , :
325	/* Store the exit status */
326	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
327	str	w1, [x3]
328	str	w2, [x4]
329	/* Restore x19..x30 */
330	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
331	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
332	/* Return from the call of thread_enter_user_mode() */
333	ret
334END_FUNC thread_unwind_user_mode
335
336	/*
337	 * This macro verifies that the a given vector doesn't exceed the
338	 * architectural limit of 32 instructions. This is meant to be placed
339	 * immedately after the last instruction in the vector. It takes the
340	 * vector entry as the parameter
341	 */
342	.macro check_vector_size since
343	  .if (. - \since) > (32 * 4)
344	    .error "Vector exceeds 32 instructions"
345	  .endif
346	.endm
347
348
349	.section .text.thread_vect_table
350	.align	11
351LOCAL_FUNC thread_vect_table , :
352	/* -----------------------------------------------------
353	 * EL1 with SP0 : 0x0 - 0x180
354	 * -----------------------------------------------------
355	 */
356	.align	7
357sync_el1_sp0:
358	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
359	b	el1_sync_abort
360	check_vector_size sync_el1_sp0
361
362	.align	7
363irq_el1_sp0:
364	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
365	b	elx_irq
366	check_vector_size irq_el1_sp0
367
368	.align	7
369fiq_el1_sp0:
370	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
371	b	elx_fiq
372	check_vector_size fiq_el1_sp0
373
374	.align	7
375SErrorSP0:
376	b	SErrorSP0
377	check_vector_size SErrorSP0
378
379	/* -----------------------------------------------------
380	 * Current EL with SPx: 0x200 - 0x380
381	 * -----------------------------------------------------
382	 */
383	.align	7
384SynchronousExceptionSPx:
385	b	SynchronousExceptionSPx
386	check_vector_size SynchronousExceptionSPx
387
388	.align	7
389IrqSPx:
390	b	IrqSPx
391	check_vector_size IrqSPx
392
393	.align	7
394FiqSPx:
395	b	FiqSPx
396	check_vector_size FiqSPx
397
398	.align	7
399SErrorSPx:
400	b	SErrorSPx
401	check_vector_size SErrorSPx
402
403	/* -----------------------------------------------------
404	 * Lower EL using AArch64 : 0x400 - 0x580
405	 * -----------------------------------------------------
406	 */
407	.align	7
408el0_sync_a64:
409	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
410	mrs	x2, esr_el1
411	mrs	x3, sp_el0
412	lsr	x2, x2, #ESR_EC_SHIFT
413	cmp	x2, #ESR_EC_AARCH64_SVC
414	b.eq	el0_svc
415	b	el0_sync_abort
416	check_vector_size el0_sync_a64
417
418	.align	7
419el0_irq_a64:
420	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
421	b	elx_irq
422	check_vector_size el0_irq_a64
423
424	.align	7
425el0_fiq_a64:
426	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
427	b	elx_fiq
428	check_vector_size el0_fiq_a64
429
430	.align	7
431SErrorA64:
432	b   	SErrorA64
433	check_vector_size SErrorA64
434
435	/* -----------------------------------------------------
436	 * Lower EL using AArch32 : 0x0 - 0x180
437	 * -----------------------------------------------------
438	 */
439	.align	7
440el0_sync_a32:
441	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
442	mrs	x2, esr_el1
443	mrs	x3, sp_el0
444	lsr	x2, x2, #ESR_EC_SHIFT
445	cmp	x2, #ESR_EC_AARCH32_SVC
446	b.eq	el0_svc
447	b	el0_sync_abort
448	check_vector_size el0_sync_a32
449
450	.align	7
451el0_irq_a32:
452	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
453	b	elx_irq
454	check_vector_size el0_irq_a32
455
456	.align	7
457el0_fiq_a32:
458	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
459	b	elx_fiq
460	check_vector_size el0_fiq_a32
461
462	.align	7
463SErrorA32:
464	b	SErrorA32
465	check_vector_size SErrorA32
466
467END_FUNC thread_vect_table
468
469LOCAL_FUNC el0_svc , :
470	/* get pointer to current thread context in x0 */
471	get_thread_ctx sp, 0, 1, 2
472	/* load saved kernel sp */
473	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
474	/* Keep pointer to initial recod in x1 */
475	mov	x1, sp
476	/* Switch to SP_EL0 and restore kernel sp */
477	msr	spsel, #0
478	mov	x2, sp	/* Save SP_EL0 */
479	mov	sp, x0
480
481	/* Make room for struct thread_svc_regs */
482	sub	sp, sp, #THREAD_SVC_REG_SIZE
483	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
484
485	/* Restore x0-x3 */
486	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
487	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
488
489	/* Prepare the argument for the handler */
490	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
491	mrs	x0, elr_el1
492	mrs	x1, spsr_el1
493	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
494	mov	x0, sp
495
496	/*
497	 * Unmask native interrupts, Serror, and debug exceptions since we have
498	 * nothing left in sp_el1. Note that the SVC handler is excepted to
499	 * re-enable foreign interrupts by itself.
500	 */
501#if defined(CFG_ARM_GICV3)
502	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
503#else
504	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
505#endif
506
507	/* Call the handler */
508	bl	tee_svc_handler
509
510	/* Mask all maskable exceptions since we're switching back to sp_el1 */
511	msr	daifset, #DAIFBIT_ALL
512
513	/*
514	 * Save kernel sp we'll had at the beginning of this function.
515	 * This is when this TA has called another TA because
516	 * __thread_enter_user_mode() also saves the stack pointer in this
517	 * field.
518	 */
519	msr	spsel, #1
520	get_thread_ctx sp, 0, 1, 2
521	msr	spsel, #0
522	add	x1, sp, #THREAD_SVC_REG_SIZE
523	str	x1, [x0, #THREAD_CTX_KERN_SP]
524
525	/* Restore registers to the required state and return*/
526	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
527	msr	elr_el1, x0
528	msr	spsr_el1, x1
529	load_xregs sp, THREAD_SVC_REG_X0, 0, 14
530	mov	x30, sp
531	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
532	mov	sp, x0
533	ldr	x0, [x30, THREAD_SVC_REG_X0]
534	ldr	x30, [x30, #THREAD_SVC_REG_X30]
535
536	eret
537END_FUNC el0_svc
538
539LOCAL_FUNC el1_sync_abort , :
540	mov	x0, sp
541	msr	spsel, #0
542	mov	x3, sp		/* Save original sp */
543
544	/*
545	 * Update core local flags.
546	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
547	 */
548	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
549	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
550	orr	w1, w1, #THREAD_CLF_ABORT
551	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
552			.Lsel_tmp_sp
553
554	/* Select abort stack */
555	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
556	b	.Lset_sp
557
558.Lsel_tmp_sp:
559	/* Select tmp stack */
560	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
561	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
562
563.Lset_sp:
564	mov	sp, x2
565	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
566
567	/*
568	 * Save state on stack
569	 */
570	sub	sp, sp, #THREAD_ABT_REGS_SIZE
571	mrs	x2, spsr_el1
572	/* Store spsr, sp_el0 */
573	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
574	/* Store original x0, x1 */
575	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
576	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
577	/* Store original x2, x3 and x4 to x29 */
578	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
579	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
580	/* Store x30, elr_el1 */
581	mrs	x0, elr_el1
582	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
583
584	/*
585	 * Call handler
586	 */
587	mov	x0, #0
588	mov	x1, sp
589	bl	abort_handler
590
591	/*
592	 * Restore state from stack
593	 */
594	/* Load x30, elr_el1 */
595	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
596	msr	elr_el1, x0
597	/* Load x0 to x29 */
598	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
599	/* Switch to SP_EL1 */
600	msr	spsel, #1
601	/* Save x0 to x3 in CORE_LOCAL */
602	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
603	/* Restore spsr_el1 and sp_el0 */
604	mrs	x3, sp_el0
605	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
606	msr	spsr_el1, x0
607	msr	sp_el0, x1
608
609	/* Update core local flags */
610	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
611	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
612	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
613
614	/* Restore x0 to x3 */
615	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
616
617	/* Return from exception */
618	eret
619END_FUNC el1_sync_abort
620
621	/* sp_el0 in x3 */
622LOCAL_FUNC el0_sync_abort , :
623	/*
624	 * Update core local flags
625	 */
626	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
627	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
628	orr	w1, w1, #THREAD_CLF_ABORT
629	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
630
631	/*
632	 * Save state on stack
633	 */
634
635	/* load abt_stack_va_end */
636	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
637	/* Keep pointer to initial record in x0 */
638	mov	x0, sp
639	/* Switch to SP_EL0 */
640	msr	spsel, #0
641	mov	sp, x1
642	sub	sp, sp, #THREAD_ABT_REGS_SIZE
643	mrs	x2, spsr_el1
644	/* Store spsr, sp_el0 */
645	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
646	/* Store original x0, x1 */
647	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
648	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
649	/* Store original x2, x3 and x4 to x29 */
650	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
651	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
652	/* Store x30, elr_el1 */
653	mrs	x0, elr_el1
654	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
655
656	/*
657	 * Call handler
658	 */
659	mov	x0, #0
660	mov	x1, sp
661	bl	abort_handler
662
663	/*
664	 * Restore state from stack
665	 */
666
667	/* Load x30, elr_el1 */
668	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
669	msr	elr_el1, x0
670	/* Load x0 to x29 */
671	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
672	/* Switch to SP_EL1 */
673	msr	spsel, #1
674	/* Save x0 to x3 in EL1_REC */
675	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
676	/* Restore spsr_el1 and sp_el0 */
677	mrs	x3, sp_el0
678	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
679	msr	spsr_el1, x0
680	msr	sp_el0, x1
681
682	/* Update core local flags */
683	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
684	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
685	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
686
687	/* Restore x0 to x3 */
688	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
689
690	/* Return from exception */
691	eret
692END_FUNC el0_sync_abort
693
694/* The handler of foreign interrupt. */
695.macro foreign_intr_handler mode:req
696	/*
697	 * Update core local flags
698	 */
699	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
700	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
701	orr	w1, w1, #THREAD_CLF_TMP
702	.ifc	\mode\(),fiq
703	orr	w1, w1, #THREAD_CLF_FIQ
704	.else
705	orr	w1, w1, #THREAD_CLF_IRQ
706	.endif
707	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
708
709	/* get pointer to current thread context in x0 */
710	get_thread_ctx sp, 0, 1, 2
711	/* Keep original SP_EL0 */
712	mrs	x2, sp_el0
713
714	/* Store original sp_el0 */
715	str	x2, [x0, #THREAD_CTX_REGS_SP]
716	/* store x4..x30 */
717	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
718	/* Load original x0..x3 into x10..x13 */
719	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
720	/* Save original x0..x3 */
721	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
722
723	/* load tmp_stack_va_end */
724	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
725	/* Switch to SP_EL0 */
726	msr	spsel, #0
727	mov	sp, x1
728
729	/*
730	 * Mark current thread as suspended
731	 */
732	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
733	mrs	x1, spsr_el1
734	mrs	x2, elr_el1
735	bl	thread_state_suspend
736	mov	w4, w0		/* Supply thread index */
737
738	/* Update core local flags */
739	/* Switch to SP_EL1 */
740	msr	spsel, #1
741	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
742	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
743	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
744	msr	spsel, #0
745
746	/*
747	 * Note that we're exiting with SP_EL0 selected since the entry
748	 * functions expects to have SP_EL0 selected with the tmp stack
749	 * set.
750	 */
751
752	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
753	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
754	mov	w2, #0
755	mov	w3, #0
756	/* w4 is already filled in above */
757	smc	#0
758	b	.	/* SMC should not return */
759.endm
760
761/*
762 * This struct is never used from C it's only here to visualize the
763 * layout.
764 *
765 * struct elx_nintr_rec {
766 * 	uint64_t x[19 - 4]; x4..x18
767 * 	uint64_t lr;
768 * 	uint64_t sp_el0;
769 * };
770 */
771#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
772#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
773#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
774#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
775
776/* The handler of native interrupt. */
777.macro native_intr_handler mode:req
778	/*
779	 * Update core local flags
780	 */
781	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
782	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
783	.ifc	\mode\(),fiq
784	orr	w1, w1, #THREAD_CLF_FIQ
785	.else
786	orr	w1, w1, #THREAD_CLF_IRQ
787	.endif
788	orr	w1, w1, #THREAD_CLF_TMP
789	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
790
791	/* load tmp_stack_va_end */
792	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
793	/* Keep original SP_EL0 */
794	mrs	x2, sp_el0
795	/* Switch to SP_EL0 */
796	msr	spsel, #0
797	mov	sp, x1
798
799	/*
800	 * Save registers on stack that can be corrupted by a call to
801	 * a C function
802	 */
803	/* Make room for struct elx_nintr_rec */
804	sub	sp, sp, #ELX_NINTR_REC_SIZE
805	/* Store x4..x18 */
806	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
807	/* Store lr and original sp_el0 */
808	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
809
810	bl	thread_check_canaries
811	adr	x16, thread_nintr_handler_ptr
812	ldr	x16, [x16]
813	blr	x16
814
815	/*
816	 * Restore registers
817	 */
818	/* Restore x4..x18 */
819	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
820	/* Load  lr and original sp_el0 */
821	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
822	/* Restore SP_El0 */
823	mov	sp, x2
824	/* Switch back to SP_EL1 */
825	msr	spsel, #1
826
827	/* Update core local flags */
828	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
829	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
830	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
831
832	/* Restore x0..x3 */
833	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
834
835	/* Return from exception */
836	eret
837.endm
838
839LOCAL_FUNC elx_irq , :
840#if defined(CFG_ARM_GICV3)
841	native_intr_handler	irq
842#else
843	foreign_intr_handler	irq
844#endif
845END_FUNC elx_irq
846
847LOCAL_FUNC elx_fiq , :
848#if defined(CFG_ARM_GICV3)
849	foreign_intr_handler	fiq
850#else
851	native_intr_handler	fiq
852#endif
853END_FUNC elx_fiq
854