xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 31908aeac446be4859fe9dc98dc4e1e0c13b528c)
1/*
2 * Copyright (c) 2015, Linaro Limited
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <asm.S>
29#include <arm64_macros.S>
30#include <arm64.h>
31#include <sm/teesmc.h>
32#include <sm/teesmc_opteed_macros.h>
33#include <sm/teesmc_opteed.h>
34#include <kernel/thread_defs.h>
35#include <kernel/thread.h>
36#include "thread_private.h"
37
38	.macro get_thread_ctx core_local, res, tmp0, tmp1
39		ldr	\tmp0, [\core_local, \
40				#THREAD_CORE_LOCAL_CURR_THREAD_OFFSET]
41		adr	\res, threads
42		mov	\tmp1, #THREAD_CTX_SIZE
43		madd	\res, \tmp0, \tmp1, \res
44	.endm
45
46	.section .text.thread_asm
47LOCAL_FUNC vector_std_smc_entry , :
48	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
49	store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7
50	mov	x0, sp
51	bl	thread_handle_std_smc
52	/*
53	 * Normally thread_handle_std_smc() should return via
54	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
55	 * hasn't switched stack (error detected) it will do a normal "C"
56	 * return.
57	 */
58	load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 1, 8
59	add	sp, sp, #THREAD_SMC_ARGS_SIZE
60	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
61	smc	#0
62	b	.	/* SMC should not return */
63END_FUNC vector_std_smc_entry
64
65LOCAL_FUNC vector_fast_smc_entry , :
66	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
67	store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7
68	mov	x0, sp
69	bl	thread_handle_fast_smc
70	load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 1, 8
71	add	sp, sp, #THREAD_SMC_ARGS_SIZE
72	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
73	smc	#0
74	b	.	/* SMC should not return */
75END_FUNC vector_fast_smc_entry
76
77LOCAL_FUNC vector_fiq_entry , :
78	/* Secure Monitor received a FIQ and passed control to us. */
79	bl	thread_check_canaries
80	adr	x16, thread_fiq_handler_ptr
81	ldr	x16, [x16]
82	blr	x16
83	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
84	smc	#0
85	b	.	/* SMC should not return */
86END_FUNC vector_fiq_entry
87
88LOCAL_FUNC vector_cpu_on_entry , :
89	adr	x16, thread_cpu_on_handler_ptr
90	ldr	x16, [x16]
91	blr	x16
92	mov	x1, x0
93	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
94	smc	#0
95	b	.	/* SMC should not return */
96END_FUNC vector_cpu_on_entry
97
98LOCAL_FUNC vector_cpu_off_entry , :
99	adr	x16, thread_cpu_off_handler_ptr
100	ldr	x16, [x16]
101	blr	x16
102	mov	x1, x0
103	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
104	smc	#0
105	b	.	/* SMC should not return */
106END_FUNC vector_cpu_off_entry
107
108LOCAL_FUNC vector_cpu_suspend_entry , :
109	adr	x16, thread_cpu_suspend_handler_ptr
110	ldr	x16, [x16]
111	blr	x16
112	mov	x1, x0
113	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
114	smc	#0
115	b	.	/* SMC should not return */
116END_FUNC vector_cpu_suspend_entry
117
118LOCAL_FUNC vector_cpu_resume_entry , :
119	adr	x16, thread_cpu_resume_handler_ptr
120	ldr	x16, [x16]
121	blr	x16
122	mov	x1, x0
123	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
124	smc	#0
125	b	.	/* SMC should not return */
126END_FUNC vector_cpu_resume_entry
127
128LOCAL_FUNC vector_system_off_entry , :
129	adr	x16, thread_system_off_handler_ptr
130	ldr	x16, [x16]
131	blr	x16
132	mov	x1, x0
133	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
134	smc	#0
135	b	.	/* SMC should not return */
136END_FUNC vector_system_off_entry
137
138LOCAL_FUNC vector_system_reset_entry , :
139	adr	x16, thread_system_reset_handler_ptr
140	ldr	x16, [x16]
141	blr	x16
142	mov	x1, x0
143	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
144	smc	#0
145	b	.	/* SMC should not return */
146END_FUNC vector_system_reset_entry
147
148/*
149 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
150 * initialization.
151 *
152 * Note that ARM-TF depends on the layout of this vector table, any change
153 * in layout has to be synced with ARM-TF.
154 */
155FUNC thread_vector_table , :
156	b	vector_std_smc_entry
157	b	vector_fast_smc_entry
158	b	vector_cpu_on_entry
159	b	vector_cpu_off_entry
160	b	vector_cpu_resume_entry
161	b	vector_cpu_suspend_entry
162	b	vector_fiq_entry
163	b	vector_system_off_entry
164	b	vector_system_reset_entry
165END_FUNC thread_vector_table
166
167
168/* void thread_resume(struct thread_ctx_regs *regs) */
169FUNC thread_resume , :
170	load_xregs x0, THREAD_CTX_REGS_SP_OFFSET, 1, 3
171	mov	sp, x1
172	msr	elr_el1, x2
173	msr	spsr_el1, x3
174	load_xregs x0, THREAD_CTX_REGS_X_OFFSET(1), 1, 30
175	ldr	x0, [x0, THREAD_CTX_REGS_X_OFFSET(0)]
176	eret
177END_FUNC thread_resume
178
179FUNC thread_std_smc_entry , :
180	/* pass x0-x7 in a struct thread_smc_args */
181	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
182	store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7
183	mov	x0, sp
184
185	/* Call the registered handler */
186	adr	x16, thread_std_smc_handler_ptr
187	ldr	x16, [x16]
188	blr	x16
189
190	/*
191	 * Load the returned x0-x3 into preserved registers and skip the
192	 * "returned" x4-x7 since they will not be returned to normal
193	 * world.
194	 */
195	load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 20, 23
196	add	sp, sp, #THREAD_SMC_ARGS_SIZE
197
198	/* Disable interrupts before switching to temporary stack */
199	msr	daifset, #(DAIFBIT_FIQ | DAIFBIT_IRQ)
200	bl	thread_get_tmp_sp
201	mov	sp, x0
202
203	bl	thread_state_free
204
205	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
206	mov	x1, x20
207	mov	x2, x21
208	mov	x3, x22
209	mov	x4, x23
210	smc	#0
211	b	.	/* SMC should not return */
212END_FUNC thread_std_smc_entry
213
214/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
215FUNC thread_rpc , :
216	/* Read daif and create an SPSR */
217	mrs	x1, daif
218	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
219
220	msr	daifset, #DAIFBIT_ALL
221	push	x0, xzr
222	push	x1, x30
223	bl	thread_get_ctx_regs
224	ldr	x30, [sp, #8]
225	store_xregs x0, THREAD_CTX_REGS_X_OFFSET(19), 19, 30
226	mov	x19, x0
227
228	bl	thread_get_tmp_sp
229	pop	x1, xzr		/* Match "push x1, x30" above */
230	mov	x2, sp
231	str	x2, [x19, #THREAD_CTX_REGS_SP_OFFSET]
232	ldr	x20, [sp]	/* Get pointer to rv[] */
233	mov	sp, x0		/* Switch to tmp stack */
234
235	adr	x2, .thread_rpc_return
236	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
237	bl	thread_state_suspend
238	mov	x4, x0		/* Supply thread index */
239	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
240	load_wregs x20, 0, 1, 3	/* Load rv[] into x0-x2 */
241	smc	#0
242	b	.		/* SMC should not return */
243
244.thread_rpc_return:
245	/*
246	 * At this point has the stack pointer been restored to the value
247	 * stored in THREAD_CTX above.
248	 *
249	 * Jumps here from thread_resume above when RPC has returned. The
250	 * IRQ and FIQ bits are restored to what they where when this
251	 * function was originally entered.
252	 */
253	pop	x4, xzr		/* Get pointer to rv[] */
254	store_wregs x4, 0, 0, 2	/* Store x0-x2 into rv[] */
255	ret
256END_FUNC thread_rpc
257
258FUNC thread_init_vbar , :
259	adr	x0, thread_vect_table
260	msr	vbar_el1, x0
261	ret
262END_FUNC thread_init_vbar
263
264/*
265 * uint32_t thread_enter_user_mode(uint32_t a0, uint32_t a1, uint32_t a2,
266 * 		uint32_t a3, vaddr_t user_sp, vaddr_t user_func,
267 * 		uint32_t *exit_status0, uint32_t *exit_status1);
268 * See description in thread.h
269 */
270FUNC thread_enter_user_mode , :
271	/*
272	 * Create the and fill in the struct thread_user_mode_rec
273	 */
274	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
275	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET, 6, 7
276	store_xregs sp, THREAD_USER_MODE_REC_X_OFFSET(19), 19, 30
277
278	/*
279	 * Switch to SP_EL1
280	 * Save interrupt bits in x23
281	 * Disable exceptions
282	 * Save kern sp in x19
283	 */
284	mrs	x23, daif
285	msr	daifset, #DAIFBIT_ALL
286	mov	x19, sp
287	msr	spsel, #1
288
289	/*
290	 * Save the kernel stack pointer in the thread context
291	 */
292	/* get pointer to current thread context */
293	get_thread_ctx sp, x21, x20, x22
294	/* save kernel stack pointer */
295	str	x19, [x21, #THREAD_CTX_KERN_SP_OFFSET]
296
297	/*
298	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
299	 */
300	/* Keep only the AIF bits */
301	and	x23, x23, #(SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT)
302	/* Set Aarch32 */
303	orr	x23, x23, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)
304	/* Set thumb mode for thumb function */
305	and	x24, x5, #SPSR_32_T_MASK
306	orr	x23, x23, x24, lsl #SPSR_32_T_SHIFT
307	msr	spsr_el1, x23
308	/* Set user sp */
309	mov	x13, x4
310	msr	sp_el0, x4 /* TODO remove, only here to invalidate sp_el0 */
311	/* Set user function */
312	msr	elr_el1, x5
313
314	/* Jump into user mode */
315	eret
316END_FUNC thread_enter_user_mode
317
318/*
319 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
320 * 		uint32_t exit_status1);
321 * See description in thread.h
322 */
323FUNC thread_unwind_user_mode , :
324	/* Store the exit status */
325	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET]
326	str	w1, [x3]
327	str	w2, [x4]
328	/* Restore x19..x30 */
329	load_xregs sp, THREAD_USER_MODE_REC_X_OFFSET(19), 19, 30
330	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
331	/* Return from the call of thread_enter_user_mode() */
332	ret
333END_FUNC thread_unwind_user_mode
334
335	/*
336	 * This macro verifies that the a given vector doesn't exceed the
337	 * architectural limit of 32 instructions. This is meant to be placed
338	 * immedately after the last instruction in the vector. It takes the
339	 * vector entry as the parameter
340	 */
341	.macro check_vector_size since
342	  .if (. - \since) > (32 * 4)
343	    .error "Vector exceeds 32 instructions"
344	  .endif
345	.endm
346
347
348	.align	11
349LOCAL_FUNC thread_vect_table , :
350	/* -----------------------------------------------------
351	 * EL1 with SP0 : 0x0 - 0x180
352	 * -----------------------------------------------------
353	 */
354	.align	7
355sync_el1_sp0:
356	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
357	b	el1_sync_abort
358	check_vector_size sync_el1_sp0
359
360	.align	7
361irq_el1_sp0:
362	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
363	b	elx_irq
364	check_vector_size irq_el1_sp0
365
366	.align	7
367fiq_el1_sp0:
368	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
369	b	elx_fiq
370	check_vector_size fiq_el1_sp0
371
372	.align	7
373SErrorSP0:
374	b	SErrorSP0
375	check_vector_size SErrorSP0
376
377	/* -----------------------------------------------------
378	 * Current EL with SPx: 0x200 - 0x380
379	 * -----------------------------------------------------
380	 */
381	.align	7
382SynchronousExceptionSPx:
383	b	SynchronousExceptionSPx
384	check_vector_size SynchronousExceptionSPx
385
386	.align	7
387IrqSPx:
388	b	IrqSPx
389	check_vector_size IrqSPx
390
391	.align	7
392FiqSPx:
393	b	FiqSPx
394	check_vector_size FiqSPx
395
396	.align	7
397SErrorSPx:
398	b	SErrorSPx
399	check_vector_size SErrorSPx
400
401	/* -----------------------------------------------------
402	 * Lower EL using AArch64 : 0x400 - 0x580
403	 * -----------------------------------------------------
404	 */
405	.align	7
406el0_sync_a64:
407	b	el0_sync_a64
408	check_vector_size el0_sync_a64
409
410	.align	7
411IrqA64:
412	b	IrqA64
413	check_vector_size IrqA64
414
415	.align	7
416FiqA64:
417	b	FiqA64
418	check_vector_size FiqA64
419
420	.align	7
421SErrorA64:
422	b   	SErrorA64
423	check_vector_size SErrorA64
424
425	/* -----------------------------------------------------
426	 * Lower EL using AArch32 : 0x0 - 0x180
427	 * -----------------------------------------------------
428	 */
429	.align	7
430el0_sync_a32:
431	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
432	mrs	x2, esr_el1
433	mrs	x3, sp_el0
434	lsr	x2, x2, #ESR_EC_SHIFT
435	cmp	x2, #ESR_EC_AARCH32_SVC
436	b.eq	el0_sync_a32_svc
437	b	el0_sync_abort
438	check_vector_size el0_sync_a32
439
440	.align	7
441el0_irq_a32:
442	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
443	b	elx_irq
444	check_vector_size el0_irq_a32
445
446	.align	7
447el0_fiq_a32:
448	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
449	b	elx_fiq
450	check_vector_size el0_fiq_a32
451
452	.align	7
453SErrorA32:
454	b	SErrorA32
455	check_vector_size SErrorA32
456
457END_FUNC thread_vect_table
458
459LOCAL_FUNC el0_sync_a32_svc , :
460	/* get pointer to current thread context in x0 */
461	get_thread_ctx sp, x0, x1, x2
462	/* load saved kernel sp */
463	ldr	x0, [x0, #THREAD_CTX_KERN_SP_OFFSET]
464	/* Keep pointer to initial recod in x1 */
465	mov	x1, sp
466	/* Switch to SP_EL0 and restore kernel sp */
467	msr	spsel, #0
468	mov	sp, x0
469	/* Restore x0-x3 */
470	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X_OFFSET(2)]
471	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X_OFFSET(0)]
472
473	/* Prepare the argument for the handler */
474	sub	sp, sp, #THREAD_SVC_REG_SIZE
475	store_xregs sp, THREAD_SVC_REG_X_OFFS(0), 0, 14
476	mrs	x0, elr_el1
477	mrs	x1, spsr_el1
478	store_xregs sp, THREAD_SVC_REG_ELR_OFFS, 0, 1
479	mov	x0, sp
480
481	/*
482	 * Unmask FIQ, Serror, and debug exceptions since we have nothing
483	 * left in sp_el1.
484	 */
485	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
486
487	/* Call the registered handler */
488	adr	x16, thread_svc_handler_ptr
489	ldr	x16, [x16]
490	blr	x16
491
492	/* Mask all maskable exceptions since we're switching back to sp_el1 */
493	msr	daifset, #DAIFBIT_ALL
494
495	/* Save kernel sp we'll have after the add below */
496	msr	spsel, #1
497	get_thread_ctx sp, x0, x1, x2
498	msr	spsel, #0
499	add	x1, sp, #THREAD_SVC_REG_SIZE
500	str	x1, [x0, #THREAD_CTX_KERN_SP_OFFSET]
501
502	/* Restore registers to the required state and return*/
503	load_xregs sp, THREAD_SVC_REG_ELR_OFFS, 0, 1
504	msr	elr_el1, x0
505	msr	spsr_el1, x1
506	load_xregs sp, THREAD_SVC_REG_X_OFFS(0), 0, 14
507	add	sp, sp, #THREAD_SVC_REG_SIZE
508
509	eret
510END_FUNC el0_sync_a32_svc
511
512LOCAL_FUNC el1_sync_abort , :
513	mov	x0, sp
514	msr	spsel, #0
515
516	/* Update core local flags */
517	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
518	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
519	orr	w1, w1, #THREAD_CLF_ABORT
520	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
521
522	/*
523	 * Check if we should initialize SP_EL0 or use it as is (recursive
524	 * aborts).
525	 */
526	tst	w1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
527	mov	x3, sp		/* Save original sp unconditionally */
528	beq	.keep_sp
529	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET]
530	mov	sp, x2
531.keep_sp:
532
533	/*
534	 * Save state on stack
535	 */
536	sub	sp, sp, #THREAD_ABT_REGS_SIZE
537	mrs	x2, spsr_el1
538	/* Store spsr, sp_el0 */
539	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR_OFFS]
540	/* Store original x0, x1 */
541	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(0)]
542	stp	x2, x3, [sp, #THREAD_ABT_REG_X_OFFS(0)]
543	/* Store original x2, x3 and x4 to x29 */
544	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(2)]
545	store_xregs sp, THREAD_ABT_REG_X_OFFS(2), 2, 29
546	/* Store x30, elr_el1 */
547	mrs	x0, elr_el1
548	stp	x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)]
549
550	/*
551	 * Call handler
552	 */
553	mov	x0, #0
554	mov	x1, sp
555	bl	thread_handle_abort
556
557	/*
558	 * Restore state from stack
559	 */
560	/* Load x30, elr_el1 */
561	ldp	x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)]
562	msr	elr_el1, x0
563	/* Load x0 to x29 */
564	load_xregs sp, THREAD_ABT_REG_X_OFFS(0), 0, 29
565	/* Switch to SP_EL1 */
566	msr	spsel, #1
567	/* Save x0 to x3 in CORE_LOCAL */
568	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
569	/* Restore spsr_el1 and sp_el0 */
570	mrs	x3, sp_el0
571	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR_OFFS]
572	msr	spsr_el1, x0
573	msr	sp_el0, x1
574
575	/* Update core local flags */
576	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
577	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
578	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
579
580	/* Restore x0 to x3 */
581	load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
582
583	/* Return from exception */
584	eret
585END_FUNC el1_sync_abort
586
587	/* sp_el0 in x3 */
588LOCAL_FUNC el0_sync_abort , :
589	/* load abt_stack_va_end */
590	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET]
591	/* Keep pointer to initial record in x0 */
592	mov	x0, sp
593	/* Switch to SP_EL0 */
594	msr	spsel, #0
595	mov	sp, x1
596	sub	sp, sp, #THREAD_ABT_REGS_SIZE
597	mrs	x2, spsr_el1
598	/* Store spsr, sp_el0 */
599	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR_OFFS]
600	/* Store original x0, x1 */
601	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(0)]
602	stp	x2, x3, [sp, #THREAD_ABT_REG_X_OFFS(0)]
603	/* Store original x2, x3 and x4 to x29 */
604	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(2)]
605	store_xregs sp, THREAD_ABT_REG_X_OFFS(2), 2, 29
606	/* Store x30, elr_el1 */
607	mrs	x0, elr_el1
608	stp	x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)]
609	/* Call handler */
610	mov	x0, #0
611	mov	x1, sp
612	bl	thread_handle_abort
613	/* Load x30, elr_el1 */
614	ldp	x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)]
615	msr	elr_el1, x0
616	/* Load x0 to x29 */
617	load_xregs sp, THREAD_ABT_REG_X_OFFS(0), 0, 29
618	/* Switch to SP_EL1 */
619	msr	spsel, #1
620	/* Save x0 to x3 in EL1_REC */
621	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
622	/* Restore spsr_el1 and sp_el0 */
623	mrs	x3, sp_el0
624	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR_OFFS]
625	msr	spsr_el1, x0
626	msr	sp_el0, x1
627	/* Restore x0 to x3 */
628	load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
629	/* Return from exception */
630	eret
631END_FUNC el0_sync_abort
632
633/*
634 * struct elx_itr_rec {
635 * 	uint64_t x[19 - 4]; x4..x18
636 * 	uint64_t init_rec;
637 * 	uint64_t pad;
638 * 	uint64_t lr;
639 * 	uint64_t sp_el0;
640 * };
641 */
642#define ELX_ITR_REC_X_OFFSET(x)		(8 * ((x) - 4))
643#define ELX_ITR_REC_INIT_REC_OFFSET	(8 + ELX_ITR_REC_X_OFFSET(19))
644#define ELX_ITR_REC_PAD_OFFSET		(8 + ELX_ITR_REC_INIT_REC_OFFSET)
645#define ELX_ITR_REC_LR_OFFSET		(8 + ELX_ITR_REC_PAD_OFFSET)
646#define ELX_ITR_REC_SP_EL0_OFFSET	(8 + ELX_ITR_REC_LR_OFFSET)
647#define ELX_ITR_REC_SIZE		(8 + ELX_ITR_REC_SP_EL0_OFFSET)
648
649LOCAL_FUNC elx_irq , :
650	/* load tmp_stack_va_end */
651	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET]
652	/* Keep pointer to initial record in x0 */
653	mov	x0, sp
654	/* Keep original SP_EL0 */
655	mrs	x2, sp_el0
656	/* Switch to SP_EL0 */
657	msr	spsel, #0
658	mov	sp, x1
659
660	/*
661	 * Save registers on stack that can be corrupted by a call to
662	 * thread_get_ctx_regs().
663	 */
664	/* Make room for struct elx_itr_rec */
665	sub	sp, sp, #ELX_ITR_REC_SIZE
666	/* Store x4..x18 */
667	store_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18
668	/* Store pointer to initial record */
669	str	x0, [sp, #ELX_ITR_REC_INIT_REC_OFFSET]
670	/* Store lr and original sp_el0 */
671	stp	x30, x2, [sp, #ELX_ITR_REC_LR_OFFSET]
672
673	/*
674	 * Get pointer to struct thread_ctx_regs and store context
675	 */
676	bl	thread_get_ctx_regs
677	/* Restore lr and original sp_el0 */
678	ldp	x30, x1, [sp, #ELX_ITR_REC_LR_OFFSET]
679	/* Store original sp_el0 */
680	str	x1, [x0, #THREAD_CTX_REGS_SP_OFFSET]
681	/* Restore x4..x18 */
682	load_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18
683	/* store x4..x30 */
684	store_xregs x0, THREAD_CTX_REGS_X_OFFSET(4), 4, 30
685	/* get pointer to initial record */
686	ldr	x4, [sp, #ELX_ITR_REC_INIT_REC_OFFSET]
687	/* Load original x0..x3 into x10..x13 */
688	load_xregs x4, THREAD_CORE_LOCAL_X_OFFSET(0), 10, 13
689	/* Save original x0..x3 */
690	store_xregs x0, THREAD_CTX_REGS_X_OFFSET(0), 10, 13
691
692	/* Remove struct elx_itr_rec from stack */
693	add	sp, sp, #ELX_ITR_REC_SIZE
694
695	/*
696	 * Mark current thread as suspended
697	 */
698	mov	w0, #THREAD_FLAGS_EXIT_ON_IRQ
699	mrs	x1, spsr_el1
700	mrs	x2, elr_el1
701	bl	thread_state_suspend
702	mov	w4, w0		/* Supply thread index */
703
704	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
705	ldr	w1, =TEESMC_RETURN_RPC_IRQ
706	mov	w2, #0
707	mov	w3, #0
708	/* w4 is already filled in above */
709	smc	#0
710	b	.	/* SMC should not return */
711END_FUNC elx_irq
712
713LOCAL_FUNC elx_fiq , :
714	/* load tmp_stack_va_end */
715	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET]
716	/* Keep pointer to initial record in x0 */
717	mov	x0, sp
718	/* Keep original SP_EL0 */
719	mrs	x2, sp_el0
720	/* Switch to SP_EL0 */
721	msr	spsel, #0
722	mov	sp, x1
723
724	/*
725	 * Save registers on stack that can be corrupted by a call to
726	 * a C function
727	 */
728	/* Make room for struct elx_itr_rec */
729	sub	sp, sp, #ELX_ITR_REC_SIZE
730	/* Store x4..x18 */
731	store_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18
732	/* Store lr and original sp_el0 */
733	stp	x30, x2, [sp, #ELX_ITR_REC_LR_OFFSET]
734
735	bl	thread_check_canaries
736	adr	x16, thread_fiq_handler_ptr
737	ldr	x16, [x16]
738	blr	x16
739
740	/*
741	 * Restore registers
742	 */
743	/* Restore x4..x18 */
744	load_xregs sp, ELX_ITR_REC_X_OFFSET(4), 4, 18
745	/* Load  lr and original sp_el0 */
746	ldp	x30, x2, [sp, #ELX_ITR_REC_LR_OFFSET]
747	/* Restore sp_el0 */
748	mov	sp, x2
749	/* Switch back to sp_el1 */
750	msr	spsel, #1
751	/* Restore x0..x3 */
752	load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
753
754	/* Return from exception */
755	eret
756END_FUNC elx_fiq
757