xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 31a2964210f0d04eb6e8d940c445a3d0c9c8705f)
1/*
2 * Copyright (c) 2015-2017, Linaro Limited
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <asm.S>
29#include <arm64_macros.S>
30#include <arm64.h>
31#include <sm/optee_smc.h>
32#include <sm/teesmc_opteed_macros.h>
33#include <sm/teesmc_opteed.h>
34#include <asm-defines.h>
35#include <kernel/thread_defs.h>
36#include "thread_private.h"
37
38	.macro get_thread_ctx core_local, res, tmp0, tmp1
39		ldr	w\tmp0, [\core_local, \
40				#THREAD_CORE_LOCAL_CURR_THREAD]
41		adr	x\res, threads
42		mov	x\tmp1, #THREAD_CTX_SIZE
43		madd	x\res, x\tmp0, x\tmp1, x\res
44	.endm
45
46	.section .text.thread_asm
47LOCAL_FUNC vector_std_smc_entry , :
48	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
49	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
50	mov	x0, sp
51	bl	thread_handle_std_smc
52	/*
53	 * Normally thread_handle_std_smc() should return via
54	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
55	 * hasn't switched stack (error detected) it will do a normal "C"
56	 * return.
57	 */
58	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
59	add	sp, sp, #THREAD_SMC_ARGS_SIZE
60	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
61	smc	#0
62	b	.	/* SMC should not return */
63END_FUNC vector_std_smc_entry
64
65LOCAL_FUNC vector_fast_smc_entry , :
66	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
67	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
68	mov	x0, sp
69	bl	thread_handle_fast_smc
70	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
71	add	sp, sp, #THREAD_SMC_ARGS_SIZE
72	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
73	smc	#0
74	b	.	/* SMC should not return */
75END_FUNC vector_fast_smc_entry
76
77LOCAL_FUNC vector_fiq_entry , :
78	/* Secure Monitor received a FIQ and passed control to us. */
79	bl	thread_check_canaries
80	adr	x16, thread_nintr_handler_ptr
81	ldr	x16, [x16]
82	blr	x16
83	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
84	smc	#0
85	b	.	/* SMC should not return */
86END_FUNC vector_fiq_entry
87
88LOCAL_FUNC vector_cpu_on_entry , :
89	adr	x16, thread_cpu_on_handler_ptr
90	ldr	x16, [x16]
91	blr	x16
92	mov	x1, x0
93	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
94	smc	#0
95	b	.	/* SMC should not return */
96END_FUNC vector_cpu_on_entry
97
98LOCAL_FUNC vector_cpu_off_entry , :
99	adr	x16, thread_cpu_off_handler_ptr
100	ldr	x16, [x16]
101	blr	x16
102	mov	x1, x0
103	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
104	smc	#0
105	b	.	/* SMC should not return */
106END_FUNC vector_cpu_off_entry
107
108LOCAL_FUNC vector_cpu_suspend_entry , :
109	adr	x16, thread_cpu_suspend_handler_ptr
110	ldr	x16, [x16]
111	blr	x16
112	mov	x1, x0
113	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
114	smc	#0
115	b	.	/* SMC should not return */
116END_FUNC vector_cpu_suspend_entry
117
118LOCAL_FUNC vector_cpu_resume_entry , :
119	adr	x16, thread_cpu_resume_handler_ptr
120	ldr	x16, [x16]
121	blr	x16
122	mov	x1, x0
123	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
124	smc	#0
125	b	.	/* SMC should not return */
126END_FUNC vector_cpu_resume_entry
127
128LOCAL_FUNC vector_system_off_entry , :
129	adr	x16, thread_system_off_handler_ptr
130	ldr	x16, [x16]
131	blr	x16
132	mov	x1, x0
133	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
134	smc	#0
135	b	.	/* SMC should not return */
136END_FUNC vector_system_off_entry
137
138LOCAL_FUNC vector_system_reset_entry , :
139	adr	x16, thread_system_reset_handler_ptr
140	ldr	x16, [x16]
141	blr	x16
142	mov	x1, x0
143	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
144	smc	#0
145	b	.	/* SMC should not return */
146END_FUNC vector_system_reset_entry
147
148/*
149 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
150 * initialization.
151 *
152 * Note that ARM-TF depends on the layout of this vector table, any change
153 * in layout has to be synced with ARM-TF.
154 */
155FUNC thread_vector_table , :
156	b	vector_std_smc_entry
157	b	vector_fast_smc_entry
158	b	vector_cpu_on_entry
159	b	vector_cpu_off_entry
160	b	vector_cpu_resume_entry
161	b	vector_cpu_suspend_entry
162	b	vector_fiq_entry
163	b	vector_system_off_entry
164	b	vector_system_reset_entry
165END_FUNC thread_vector_table
166
167
168/* void thread_resume(struct thread_ctx_regs *regs) */
169FUNC thread_resume , :
170	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
171	mov	sp, x1
172	msr	elr_el1, x2
173	msr	spsr_el1, x3
174	load_xregs x0, THREAD_CTX_REGS_X1, 1, 30
175	ldr	x0, [x0, THREAD_CTX_REGS_X0]
176	eret
177END_FUNC thread_resume
178
179FUNC thread_std_smc_entry , :
180	/* pass x0-x7 in a struct thread_smc_args */
181	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
182	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
183	mov	x0, sp
184
185	/* Call the registered handler */
186	bl	__thread_std_smc_entry
187
188	/*
189	 * Load the returned x0-x3 into preserved registers and skip the
190	 * "returned" x4-x7 since they will not be returned to normal
191	 * world.
192	 */
193	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
194	add	sp, sp, #THREAD_SMC_ARGS_SIZE
195
196	/* Mask all maskable exceptions before switching to temporary stack */
197	msr	daifset, #DAIFBIT_ALL
198	bl	thread_get_tmp_sp
199	mov	sp, x0
200
201	bl	thread_state_free
202
203	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
204	mov	x1, x20
205	mov	x2, x21
206	mov	x3, x22
207	mov	x4, x23
208	smc	#0
209	b	.	/* SMC should not return */
210END_FUNC thread_std_smc_entry
211
212/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
213FUNC thread_rpc , :
214	/* Read daif and create an SPSR */
215	mrs	x1, daif
216	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
217
218	/* Mask all maskable exceptions before switching to temporary stack */
219	msr	daifset, #DAIFBIT_ALL
220	push	x0, xzr
221	push	x1, x30
222	bl	thread_get_ctx_regs
223	ldr	x30, [sp, #8]
224	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
225	mov	x19, x0
226
227	bl	thread_get_tmp_sp
228	pop	x1, xzr		/* Match "push x1, x30" above */
229	mov	x2, sp
230	str	x2, [x19, #THREAD_CTX_REGS_SP]
231	ldr	x20, [sp]	/* Get pointer to rv[] */
232	mov	sp, x0		/* Switch to tmp stack */
233
234	adr	x2, .thread_rpc_return
235	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
236	bl	thread_state_suspend
237	mov	x4, x0		/* Supply thread index */
238	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
239	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
240	smc	#0
241	b	.		/* SMC should not return */
242
243.thread_rpc_return:
244	/*
245	 * At this point has the stack pointer been restored to the value
246	 * stored in THREAD_CTX above.
247	 *
248	 * Jumps here from thread_resume above when RPC has returned. The
249	 * IRQ and FIQ bits are restored to what they where when this
250	 * function was originally entered.
251	 */
252	pop	x16, xzr	/* Get pointer to rv[] */
253	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
254	ret
255END_FUNC thread_rpc
256
257FUNC thread_init_vbar , :
258	adr	x0, thread_vect_table
259	msr	vbar_el1, x0
260	ret
261END_FUNC thread_init_vbar
262
263/*
264 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
265 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
266 *               unsigned long user_func, unsigned long spsr,
267 *               uint32_t *exit_status0, uint32_t *exit_status1)
268 *
269 */
270FUNC __thread_enter_user_mode , :
271	ldr	x8, [sp]
272	/*
273	 * Create the and fill in the struct thread_user_mode_rec
274	 */
275	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
276	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
277	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
278
279	/*
280	 * Switch to SP_EL1
281	 * Disable exceptions
282	 * Save kern sp in x19
283	 */
284	msr	daifset, #DAIFBIT_ALL
285	mov	x19, sp
286	msr	spsel, #1
287
288	/*
289	 * Save the kernel stack pointer in the thread context
290	 */
291	/* get pointer to current thread context */
292	get_thread_ctx sp, 21, 20, 22
293	/*
294	 * Save kernel stack pointer to ensure that el0_svc() uses
295	 * correct stack pointer
296	 */
297	str	x19, [x21, #THREAD_CTX_KERN_SP]
298
299	/*
300	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
301	 */
302	msr	spsr_el1, x6
303	/* Set user sp */
304	mov	x13, x4		/* Used when running TA in Aarch32 */
305	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
306	/* Set user function */
307	msr	elr_el1, x5
308	/* Set frame pointer (user stack can't be unwound past this point) */
309	mov x29, #0
310
311	/* Jump into user mode */
312	eret
313END_FUNC __thread_enter_user_mode
314
315/*
316 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
317 * 		uint32_t exit_status1);
318 * See description in thread.h
319 */
320FUNC thread_unwind_user_mode , :
321	/* Store the exit status */
322	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
323	str	w1, [x3]
324	str	w2, [x4]
325	/* Restore x19..x30 */
326	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
327	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
328	/* Return from the call of thread_enter_user_mode() */
329	ret
330END_FUNC thread_unwind_user_mode
331
332	/*
333	 * This macro verifies that the a given vector doesn't exceed the
334	 * architectural limit of 32 instructions. This is meant to be placed
335	 * immedately after the last instruction in the vector. It takes the
336	 * vector entry as the parameter
337	 */
338	.macro check_vector_size since
339	  .if (. - \since) > (32 * 4)
340	    .error "Vector exceeds 32 instructions"
341	  .endif
342	.endm
343
344
345	.align	11
346LOCAL_FUNC thread_vect_table , :
347	/* -----------------------------------------------------
348	 * EL1 with SP0 : 0x0 - 0x180
349	 * -----------------------------------------------------
350	 */
351	.align	7
352sync_el1_sp0:
353	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
354	b	el1_sync_abort
355	check_vector_size sync_el1_sp0
356
357	.align	7
358irq_el1_sp0:
359	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
360	b	elx_irq
361	check_vector_size irq_el1_sp0
362
363	.align	7
364fiq_el1_sp0:
365	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
366	b	elx_fiq
367	check_vector_size fiq_el1_sp0
368
369	.align	7
370SErrorSP0:
371	b	SErrorSP0
372	check_vector_size SErrorSP0
373
374	/* -----------------------------------------------------
375	 * Current EL with SPx: 0x200 - 0x380
376	 * -----------------------------------------------------
377	 */
378	.align	7
379SynchronousExceptionSPx:
380	b	SynchronousExceptionSPx
381	check_vector_size SynchronousExceptionSPx
382
383	.align	7
384IrqSPx:
385	b	IrqSPx
386	check_vector_size IrqSPx
387
388	.align	7
389FiqSPx:
390	b	FiqSPx
391	check_vector_size FiqSPx
392
393	.align	7
394SErrorSPx:
395	b	SErrorSPx
396	check_vector_size SErrorSPx
397
398	/* -----------------------------------------------------
399	 * Lower EL using AArch64 : 0x400 - 0x580
400	 * -----------------------------------------------------
401	 */
402	.align	7
403el0_sync_a64:
404	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
405	mrs	x2, esr_el1
406	mrs	x3, sp_el0
407	lsr	x2, x2, #ESR_EC_SHIFT
408	cmp	x2, #ESR_EC_AARCH64_SVC
409	b.eq	el0_svc
410	b	el0_sync_abort
411	check_vector_size el0_sync_a64
412
413	.align	7
414el0_irq_a64:
415	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
416	b	elx_irq
417	check_vector_size el0_irq_a64
418
419	.align	7
420el0_fiq_a64:
421	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
422	b	elx_fiq
423	check_vector_size el0_fiq_a64
424
425	.align	7
426SErrorA64:
427	b   	SErrorA64
428	check_vector_size SErrorA64
429
430	/* -----------------------------------------------------
431	 * Lower EL using AArch32 : 0x0 - 0x180
432	 * -----------------------------------------------------
433	 */
434	.align	7
435el0_sync_a32:
436	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
437	mrs	x2, esr_el1
438	mrs	x3, sp_el0
439	lsr	x2, x2, #ESR_EC_SHIFT
440	cmp	x2, #ESR_EC_AARCH32_SVC
441	b.eq	el0_svc
442	b	el0_sync_abort
443	check_vector_size el0_sync_a32
444
445	.align	7
446el0_irq_a32:
447	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
448	b	elx_irq
449	check_vector_size el0_irq_a32
450
451	.align	7
452el0_fiq_a32:
453	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
454	b	elx_fiq
455	check_vector_size el0_fiq_a32
456
457	.align	7
458SErrorA32:
459	b	SErrorA32
460	check_vector_size SErrorA32
461
462END_FUNC thread_vect_table
463
464LOCAL_FUNC el0_svc , :
465	/* get pointer to current thread context in x0 */
466	get_thread_ctx sp, 0, 1, 2
467	/* load saved kernel sp */
468	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
469	/* Keep pointer to initial recod in x1 */
470	mov	x1, sp
471	/* Switch to SP_EL0 and restore kernel sp */
472	msr	spsel, #0
473	mov	x2, sp	/* Save SP_EL0 */
474	mov	sp, x0
475
476	/* Make room for struct thread_svc_regs */
477	sub	sp, sp, #THREAD_SVC_REG_SIZE
478	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
479
480	/* Restore x0-x3 */
481	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
482	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
483
484	/* Prepare the argument for the handler */
485	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
486	mrs	x0, elr_el1
487	mrs	x1, spsr_el1
488	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
489	mov	x0, sp
490
491	/*
492	 * Unmask native interrupts, Serror, and debug exceptions since we have
493	 * nothing left in sp_el1. Note that the SVC handler is excepted to
494	 * re-enable foreign interrupts by itself.
495	 */
496	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
497
498	/* Call the handler */
499	bl	tee_svc_handler
500
501	/* Mask all maskable exceptions since we're switching back to sp_el1 */
502	msr	daifset, #DAIFBIT_ALL
503
504	/*
505	 * Save kernel sp we'll had at the beginning of this function.
506	 * This is when this TA has called another TA because
507	 * __thread_enter_user_mode() also saves the stack pointer in this
508	 * field.
509	 */
510	msr	spsel, #1
511	get_thread_ctx sp, 0, 1, 2
512	msr	spsel, #0
513	add	x1, sp, #THREAD_SVC_REG_SIZE
514	str	x1, [x0, #THREAD_CTX_KERN_SP]
515
516	/* Restore registers to the required state and return*/
517	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
518	msr	elr_el1, x0
519	msr	spsr_el1, x1
520	load_xregs sp, THREAD_SVC_REG_X0, 0, 14
521	mov	x30, sp
522	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
523	mov	sp, x0
524	ldr	x0, [x30, THREAD_SVC_REG_X0]
525	ldr	x30, [x30, #THREAD_SVC_REG_X30]
526
527	eret
528END_FUNC el0_svc
529
530LOCAL_FUNC el1_sync_abort , :
531	mov	x0, sp
532	msr	spsel, #0
533	mov	x3, sp		/* Save original sp */
534
535	/*
536	 * Update core local flags.
537	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
538	 */
539	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
540	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
541	orr	w1, w1, #THREAD_CLF_ABORT
542	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
543			.Lsel_tmp_sp
544
545	/* Select abort stack */
546	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
547	b	.Lset_sp
548
549.Lsel_tmp_sp:
550	/* Select tmp stack */
551	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
552	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
553
554.Lset_sp:
555	mov	sp, x2
556	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
557
558	/*
559	 * Save state on stack
560	 */
561	sub	sp, sp, #THREAD_ABT_REGS_SIZE
562	mrs	x2, spsr_el1
563	/* Store spsr, sp_el0 */
564	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
565	/* Store original x0, x1 */
566	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
567	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
568	/* Store original x2, x3 and x4 to x29 */
569	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
570	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
571	/* Store x30, elr_el1 */
572	mrs	x0, elr_el1
573	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
574
575	/*
576	 * Call handler
577	 */
578	mov	x0, #0
579	mov	x1, sp
580	bl	abort_handler
581
582	/*
583	 * Restore state from stack
584	 */
585	/* Load x30, elr_el1 */
586	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
587	msr	elr_el1, x0
588	/* Load x0 to x29 */
589	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
590	/* Switch to SP_EL1 */
591	msr	spsel, #1
592	/* Save x0 to x3 in CORE_LOCAL */
593	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
594	/* Restore spsr_el1 and sp_el0 */
595	mrs	x3, sp_el0
596	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
597	msr	spsr_el1, x0
598	msr	sp_el0, x1
599
600	/* Update core local flags */
601	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
602	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
603	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
604
605	/* Restore x0 to x3 */
606	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
607
608	/* Return from exception */
609	eret
610END_FUNC el1_sync_abort
611
612	/* sp_el0 in x3 */
613LOCAL_FUNC el0_sync_abort , :
614	/*
615	 * Update core local flags
616	 */
617	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
618	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
619	orr	w1, w1, #THREAD_CLF_ABORT
620	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
621
622	/*
623	 * Save state on stack
624	 */
625
626	/* load abt_stack_va_end */
627	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
628	/* Keep pointer to initial record in x0 */
629	mov	x0, sp
630	/* Switch to SP_EL0 */
631	msr	spsel, #0
632	mov	sp, x1
633	sub	sp, sp, #THREAD_ABT_REGS_SIZE
634	mrs	x2, spsr_el1
635	/* Store spsr, sp_el0 */
636	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
637	/* Store original x0, x1 */
638	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
639	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
640	/* Store original x2, x3 and x4 to x29 */
641	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
642	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
643	/* Store x30, elr_el1 */
644	mrs	x0, elr_el1
645	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
646
647	/*
648	 * Call handler
649	 */
650	mov	x0, #0
651	mov	x1, sp
652	bl	abort_handler
653
654	/*
655	 * Restore state from stack
656	 */
657
658	/* Load x30, elr_el1 */
659	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
660	msr	elr_el1, x0
661	/* Load x0 to x29 */
662	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
663	/* Switch to SP_EL1 */
664	msr	spsel, #1
665	/* Save x0 to x3 in EL1_REC */
666	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
667	/* Restore spsr_el1 and sp_el0 */
668	mrs	x3, sp_el0
669	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
670	msr	spsr_el1, x0
671	msr	sp_el0, x1
672
673	/* Update core local flags */
674	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
675	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
676	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
677
678	/* Restore x0 to x3 */
679	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
680
681	/* Return from exception */
682	eret
683END_FUNC el0_sync_abort
684
685/* The handler of foreign interrupt. */
686.macro foreign_intr_handler mode:req
687	/*
688	 * Update core local flags
689	 */
690	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
691	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
692	orr	w1, w1, #THREAD_CLF_TMP
693	.ifc	\mode\(),fiq
694	orr	w1, w1, #THREAD_CLF_FIQ
695	.else
696	orr	w1, w1, #THREAD_CLF_IRQ
697	.endif
698	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
699
700	/* get pointer to current thread context in x0 */
701	get_thread_ctx sp, 0, 1, 2
702	/* Keep original SP_EL0 */
703	mrs	x2, sp_el0
704
705	/* Store original sp_el0 */
706	str	x2, [x0, #THREAD_CTX_REGS_SP]
707	/* store x4..x30 */
708	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
709	/* Load original x0..x3 into x10..x13 */
710	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
711	/* Save original x0..x3 */
712	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
713
714	/* load tmp_stack_va_end */
715	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
716	/* Switch to SP_EL0 */
717	msr	spsel, #0
718	mov	sp, x1
719
720	/*
721	 * Mark current thread as suspended
722	 */
723	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
724	mrs	x1, spsr_el1
725	mrs	x2, elr_el1
726	bl	thread_state_suspend
727	mov	w4, w0		/* Supply thread index */
728
729	/* Update core local flags */
730	/* Switch to SP_EL1 */
731	msr	spsel, #1
732	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
733	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
734	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
735	msr	spsel, #0
736
737	/*
738	 * Note that we're exiting with SP_EL0 selected since the entry
739	 * functions expects to have SP_EL0 selected with the tmp stack
740	 * set.
741	 */
742
743	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
744	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
745	mov	w2, #0
746	mov	w3, #0
747	/* w4 is already filled in above */
748	smc	#0
749	b	.	/* SMC should not return */
750.endm
751
752/*
753 * This struct is never used from C it's only here to visualize the
754 * layout.
755 *
756 * struct elx_nintr_rec {
757 * 	uint64_t x[19 - 4]; x4..x18
758 * 	uint64_t lr;
759 * 	uint64_t sp_el0;
760 * };
761 */
762#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
763#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
764#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
765#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
766
767/* The handler of native interrupt. */
768.macro native_intr_handler mode:req
769	/*
770	 * Update core local flags
771	 */
772	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
773	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
774	.ifc	\mode\(),fiq
775	orr	w1, w1, #THREAD_CLF_FIQ
776	.else
777	orr	w1, w1, #THREAD_CLF_IRQ
778	.endif
779	orr	w1, w1, #THREAD_CLF_TMP
780	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
781
782	/* load tmp_stack_va_end */
783	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
784	/* Keep original SP_EL0 */
785	mrs	x2, sp_el0
786	/* Switch to SP_EL0 */
787	msr	spsel, #0
788	mov	sp, x1
789
790	/*
791	 * Save registers on stack that can be corrupted by a call to
792	 * a C function
793	 */
794	/* Make room for struct elx_nintr_rec */
795	sub	sp, sp, #ELX_NINTR_REC_SIZE
796	/* Store x4..x18 */
797	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
798	/* Store lr and original sp_el0 */
799	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
800
801	bl	thread_check_canaries
802	adr	x16, thread_nintr_handler_ptr
803	ldr	x16, [x16]
804	blr	x16
805
806	/*
807	 * Restore registers
808	 */
809	/* Restore x4..x18 */
810	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
811	/* Load  lr and original sp_el0 */
812	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
813	/* Restore SP_El0 */
814	mov	sp, x2
815	/* Switch back to SP_EL1 */
816	msr	spsel, #1
817
818	/* Update core local flags */
819	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
820	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
821	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
822
823	/* Restore x0..x3 */
824	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
825
826	/* Return from exception */
827	eret
828.endm
829
830LOCAL_FUNC elx_irq , :
831#if defined(CFG_ARM_GICV3)
832	native_intr_handler	irq
833#else
834	foreign_intr_handler	irq
835#endif
836END_FUNC elx_irq
837
838LOCAL_FUNC elx_fiq , :
839#if defined(CFG_ARM_GICV3)
840	foreign_intr_handler	fiq
841#else
842	native_intr_handler	fiq
843#endif
844END_FUNC elx_fiq
845