xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 9403c583381528e7fb391e3769644cc9653cfbb6)
1/*
2 * Copyright (c) 2015, Linaro Limited
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <asm.S>
29#include <arm64_macros.S>
30#include <arm64.h>
31#include <sm/teesmc.h>
32#include <sm/teesmc_opteed_macros.h>
33#include <sm/teesmc_opteed.h>
34#include <kernel/thread_defs.h>
35#include <kernel/thread.h>
36#include "thread_private.h"
37
38	.macro get_thread_ctx core_local, res, tmp0, tmp1
39		ldr	w\tmp0, [\core_local, \
40				#THREAD_CORE_LOCAL_CURR_THREAD_OFFSET]
41		adr	x\res, threads
42		mov	x\tmp1, #THREAD_CTX_SIZE
43		madd	x\res, x\tmp0, x\tmp1, x\res
44	.endm
45
46	.section .text.thread_asm
47LOCAL_FUNC vector_std_smc_entry , :
48	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
49	store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7
50	mov	x0, sp
51	bl	thread_handle_std_smc
52	/*
53	 * Normally thread_handle_std_smc() should return via
54	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
55	 * hasn't switched stack (error detected) it will do a normal "C"
56	 * return.
57	 */
58	load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 1, 8
59	add	sp, sp, #THREAD_SMC_ARGS_SIZE
60	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
61	smc	#0
62	b	.	/* SMC should not return */
63END_FUNC vector_std_smc_entry
64
65LOCAL_FUNC vector_fast_smc_entry , :
66	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
67	store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7
68	mov	x0, sp
69	bl	thread_handle_fast_smc
70	load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 1, 8
71	add	sp, sp, #THREAD_SMC_ARGS_SIZE
72	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
73	smc	#0
74	b	.	/* SMC should not return */
75END_FUNC vector_fast_smc_entry
76
77LOCAL_FUNC vector_fiq_entry , :
78	/* Secure Monitor received a FIQ and passed control to us. */
79	bl	thread_check_canaries
80	adr	x16, thread_fiq_handler_ptr
81	ldr	x16, [x16]
82	blr	x16
83	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
84	smc	#0
85	b	.	/* SMC should not return */
86END_FUNC vector_fiq_entry
87
88LOCAL_FUNC vector_cpu_on_entry , :
89	adr	x16, thread_cpu_on_handler_ptr
90	ldr	x16, [x16]
91	blr	x16
92	mov	x1, x0
93	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
94	smc	#0
95	b	.	/* SMC should not return */
96END_FUNC vector_cpu_on_entry
97
98LOCAL_FUNC vector_cpu_off_entry , :
99	adr	x16, thread_cpu_off_handler_ptr
100	ldr	x16, [x16]
101	blr	x16
102	mov	x1, x0
103	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
104	smc	#0
105	b	.	/* SMC should not return */
106END_FUNC vector_cpu_off_entry
107
108LOCAL_FUNC vector_cpu_suspend_entry , :
109	adr	x16, thread_cpu_suspend_handler_ptr
110	ldr	x16, [x16]
111	blr	x16
112	mov	x1, x0
113	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
114	smc	#0
115	b	.	/* SMC should not return */
116END_FUNC vector_cpu_suspend_entry
117
118LOCAL_FUNC vector_cpu_resume_entry , :
119	adr	x16, thread_cpu_resume_handler_ptr
120	ldr	x16, [x16]
121	blr	x16
122	mov	x1, x0
123	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
124	smc	#0
125	b	.	/* SMC should not return */
126END_FUNC vector_cpu_resume_entry
127
128LOCAL_FUNC vector_system_off_entry , :
129	adr	x16, thread_system_off_handler_ptr
130	ldr	x16, [x16]
131	blr	x16
132	mov	x1, x0
133	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
134	smc	#0
135	b	.	/* SMC should not return */
136END_FUNC vector_system_off_entry
137
138LOCAL_FUNC vector_system_reset_entry , :
139	adr	x16, thread_system_reset_handler_ptr
140	ldr	x16, [x16]
141	blr	x16
142	mov	x1, x0
143	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
144	smc	#0
145	b	.	/* SMC should not return */
146END_FUNC vector_system_reset_entry
147
148/*
149 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
150 * initialization.
151 *
152 * Note that ARM-TF depends on the layout of this vector table, any change
153 * in layout has to be synced with ARM-TF.
154 */
155FUNC thread_vector_table , :
156	b	vector_std_smc_entry
157	b	vector_fast_smc_entry
158	b	vector_cpu_on_entry
159	b	vector_cpu_off_entry
160	b	vector_cpu_resume_entry
161	b	vector_cpu_suspend_entry
162	b	vector_fiq_entry
163	b	vector_system_off_entry
164	b	vector_system_reset_entry
165END_FUNC thread_vector_table
166
167
168/* void thread_resume(struct thread_ctx_regs *regs) */
169FUNC thread_resume , :
170	load_xregs x0, THREAD_CTX_REGS_SP_OFFSET, 1, 3
171	mov	sp, x1
172	msr	elr_el1, x2
173	msr	spsr_el1, x3
174	load_xregs x0, THREAD_CTX_REGS_X_OFFSET(1), 1, 30
175	ldr	x0, [x0, THREAD_CTX_REGS_X_OFFSET(0)]
176	eret
177END_FUNC thread_resume
178
179FUNC thread_std_smc_entry , :
180	/* pass x0-x7 in a struct thread_smc_args */
181	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
182	store_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 0, 7
183	mov	x0, sp
184
185	/* Call the registered handler */
186	bl	__thread_std_smc_entry
187
188	/*
189	 * Load the returned x0-x3 into preserved registers and skip the
190	 * "returned" x4-x7 since they will not be returned to normal
191	 * world.
192	 */
193	load_xregs sp, THREAD_SMC_ARGS_X_OFFS(0), 20, 23
194	add	sp, sp, #THREAD_SMC_ARGS_SIZE
195
196	/* Disable interrupts before switching to temporary stack */
197	msr	daifset, #(DAIFBIT_FIQ | DAIFBIT_IRQ)
198	bl	thread_get_tmp_sp
199	mov	sp, x0
200
201	bl	thread_state_free
202
203	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
204	mov	x1, x20
205	mov	x2, x21
206	mov	x3, x22
207	mov	x4, x23
208	smc	#0
209	b	.	/* SMC should not return */
210END_FUNC thread_std_smc_entry
211
212/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
213FUNC thread_rpc , :
214	/* Read daif and create an SPSR */
215	mrs	x1, daif
216	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
217
218	msr	daifset, #DAIFBIT_ALL
219	push	x0, xzr
220	push	x1, x30
221	bl	thread_get_ctx_regs
222	ldr	x30, [sp, #8]
223	store_xregs x0, THREAD_CTX_REGS_X_OFFSET(19), 19, 30
224	mov	x19, x0
225
226	bl	thread_get_tmp_sp
227	pop	x1, xzr		/* Match "push x1, x30" above */
228	mov	x2, sp
229	str	x2, [x19, #THREAD_CTX_REGS_SP_OFFSET]
230	ldr	x20, [sp]	/* Get pointer to rv[] */
231	mov	sp, x0		/* Switch to tmp stack */
232
233	adr	x2, .thread_rpc_return
234	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
235	bl	thread_state_suspend
236	mov	x4, x0		/* Supply thread index */
237	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
238	load_wregs x20, 0, 1, 3	/* Load rv[] into x0-x2 */
239	smc	#0
240	b	.		/* SMC should not return */
241
242.thread_rpc_return:
243	/*
244	 * At this point has the stack pointer been restored to the value
245	 * stored in THREAD_CTX above.
246	 *
247	 * Jumps here from thread_resume above when RPC has returned. The
248	 * IRQ and FIQ bits are restored to what they where when this
249	 * function was originally entered.
250	 */
251	pop	x4, xzr		/* Get pointer to rv[] */
252	store_wregs x4, 0, 0, 2	/* Store x0-x2 into rv[] */
253	ret
254END_FUNC thread_rpc
255
256FUNC thread_init_vbar , :
257	adr	x0, thread_vect_table
258	msr	vbar_el1, x0
259	ret
260END_FUNC thread_init_vbar
261
262/*
263 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
264 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
265 *               unsigned long user_func, unsigned long spsr,
266 *               uint32_t *exit_status0, uint32_t *exit_status1)
267 *
268 */
269FUNC __thread_enter_user_mode , :
270	ldr	x8, [sp]
271	/*
272	 * Create the and fill in the struct thread_user_mode_rec
273	 */
274	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
275	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET, 7, 8
276	store_xregs sp, THREAD_USER_MODE_REC_X_OFFSET(19), 19, 30
277
278	/*
279	 * Switch to SP_EL1
280	 * Disable exceptions
281	 * Save kern sp in x19
282	 */
283	msr	daifset, #DAIFBIT_ALL
284	mov	x19, sp
285	msr	spsel, #1
286
287	/*
288	 * Save the kernel stack pointer in the thread context
289	 */
290	/* get pointer to current thread context */
291	get_thread_ctx sp, 21, 20, 22
292	/*
293	 * Save kernel stack pointer to ensure that el0_svc() uses
294	 * correct stack pointer
295	 */
296	str	x19, [x21, #THREAD_CTX_KERN_SP_OFFSET]
297
298	/*
299	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
300	 */
301	msr	spsr_el1, x6
302	/* Set user sp */
303	mov	x13, x4		/* Used when running TA in Aarch32 */
304	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
305	/* Set user function */
306	msr	elr_el1, x5
307
308	/* Jump into user mode */
309	eret
310END_FUNC __thread_enter_user_mode
311
312/*
313 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
314 * 		uint32_t exit_status1);
315 * See description in thread.h
316 */
317FUNC thread_unwind_user_mode , :
318	/* Store the exit status */
319	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET]
320	str	w1, [x3]
321	str	w2, [x4]
322	/* Restore x19..x30 */
323	load_xregs sp, THREAD_USER_MODE_REC_X_OFFSET(19), 19, 30
324	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
325	/* Return from the call of thread_enter_user_mode() */
326	ret
327END_FUNC thread_unwind_user_mode
328
329	/*
330	 * This macro verifies that the a given vector doesn't exceed the
331	 * architectural limit of 32 instructions. This is meant to be placed
332	 * immedately after the last instruction in the vector. It takes the
333	 * vector entry as the parameter
334	 */
335	.macro check_vector_size since
336	  .if (. - \since) > (32 * 4)
337	    .error "Vector exceeds 32 instructions"
338	  .endif
339	.endm
340
341
342	.align	11
343LOCAL_FUNC thread_vect_table , :
344	/* -----------------------------------------------------
345	 * EL1 with SP0 : 0x0 - 0x180
346	 * -----------------------------------------------------
347	 */
348	.align	7
349sync_el1_sp0:
350	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
351	b	el1_sync_abort
352	check_vector_size sync_el1_sp0
353
354	.align	7
355irq_el1_sp0:
356	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
357	b	elx_irq
358	check_vector_size irq_el1_sp0
359
360	.align	7
361fiq_el1_sp0:
362	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
363	b	elx_fiq
364	check_vector_size fiq_el1_sp0
365
366	.align	7
367SErrorSP0:
368	b	SErrorSP0
369	check_vector_size SErrorSP0
370
371	/* -----------------------------------------------------
372	 * Current EL with SPx: 0x200 - 0x380
373	 * -----------------------------------------------------
374	 */
375	.align	7
376SynchronousExceptionSPx:
377	b	SynchronousExceptionSPx
378	check_vector_size SynchronousExceptionSPx
379
380	.align	7
381IrqSPx:
382	b	IrqSPx
383	check_vector_size IrqSPx
384
385	.align	7
386FiqSPx:
387	b	FiqSPx
388	check_vector_size FiqSPx
389
390	.align	7
391SErrorSPx:
392	b	SErrorSPx
393	check_vector_size SErrorSPx
394
395	/* -----------------------------------------------------
396	 * Lower EL using AArch64 : 0x400 - 0x580
397	 * -----------------------------------------------------
398	 */
399	.align	7
400el0_sync_a64:
401	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
402	mrs	x2, esr_el1
403	mrs	x3, sp_el0
404	lsr	x2, x2, #ESR_EC_SHIFT
405	cmp	x2, #ESR_EC_AARCH64_SVC
406	b.eq	el0_svc
407	b	el0_sync_abort
408	check_vector_size el0_sync_a64
409
410	.align	7
411el0_irq_a64:
412	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
413	b	elx_irq
414	check_vector_size el0_irq_a64
415
416	.align	7
417el0_fiq_a64:
418	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
419	b	elx_irq
420	check_vector_size el0_fiq_a64
421
422	.align	7
423SErrorA64:
424	b   	SErrorA64
425	check_vector_size SErrorA64
426
427	/* -----------------------------------------------------
428	 * Lower EL using AArch32 : 0x0 - 0x180
429	 * -----------------------------------------------------
430	 */
431	.align	7
432el0_sync_a32:
433	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
434	mrs	x2, esr_el1
435	mrs	x3, sp_el0
436	lsr	x2, x2, #ESR_EC_SHIFT
437	cmp	x2, #ESR_EC_AARCH32_SVC
438	b.eq	el0_svc
439	b	el0_sync_abort
440	check_vector_size el0_sync_a32
441
442	.align	7
443el0_irq_a32:
444	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
445	b	elx_irq
446	check_vector_size el0_irq_a32
447
448	.align	7
449el0_fiq_a32:
450	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
451	b	elx_fiq
452	check_vector_size el0_fiq_a32
453
454	.align	7
455SErrorA32:
456	b	SErrorA32
457	check_vector_size SErrorA32
458
459END_FUNC thread_vect_table
460
461LOCAL_FUNC el0_svc , :
462	/* get pointer to current thread context in x0 */
463	get_thread_ctx sp, 0, 1, 2
464	/* load saved kernel sp */
465	ldr	x0, [x0, #THREAD_CTX_KERN_SP_OFFSET]
466	/* Keep pointer to initial recod in x1 */
467	mov	x1, sp
468	/* Switch to SP_EL0 and restore kernel sp */
469	msr	spsel, #0
470	mov	x2, sp	/* Save SP_EL0 */
471	mov	sp, x0
472
473	/* Make room for struct thread_svc_regs */
474	sub	sp, sp, #THREAD_SVC_REG_SIZE
475	stp	x30,x2, [sp, #THREAD_SVC_REG_X30_OFFS]
476
477	/* Restore x0-x3 */
478	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X_OFFSET(2)]
479	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X_OFFSET(0)]
480
481	/* Prepare the argument for the handler */
482	store_xregs sp, THREAD_SVC_REG_X_OFFS(0), 0, 14
483	mrs	x0, elr_el1
484	mrs	x1, spsr_el1
485	store_xregs sp, THREAD_SVC_REG_ELR_OFFS, 0, 1
486	mov	x0, sp
487
488	/*
489	 * Unmask FIQ, Serror, and debug exceptions since we have nothing
490	 * left in sp_el1. Note that the SVC handler is excepted to
491	 * re-enable IRQs by itself.
492	 */
493	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
494
495	/* Call the registered handler */
496	adr	x16, thread_svc_handler_ptr
497	ldr	x16, [x16]
498	blr	x16
499
500	/* Mask all maskable exceptions since we're switching back to sp_el1 */
501	msr	daifset, #DAIFBIT_ALL
502
503	/*
504	 * Save kernel sp we'll had at the beginning of this function.
505	 * This is when this TA has called another TA because
506	 * __thread_enter_user_mode() also saves the stack pointer in this
507	 * field.
508	 */
509	msr	spsel, #1
510	get_thread_ctx sp, 0, 1, 2
511	msr	spsel, #0
512	add	x1, sp, #THREAD_SVC_REG_SIZE
513	str	x1, [x0, #THREAD_CTX_KERN_SP_OFFSET]
514
515	/* Restore registers to the required state and return*/
516	load_xregs sp, THREAD_SVC_REG_ELR_OFFS, 0, 1
517	msr	elr_el1, x0
518	msr	spsr_el1, x1
519	load_xregs sp, THREAD_SVC_REG_X_OFFS(0), 0, 14
520	mov	x30, sp
521	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0_OFFS]
522	mov	sp, x0
523	ldr	x0, [x30, THREAD_SVC_REG_X_OFFS(0)]
524	ldr	x30, [x30, #THREAD_SVC_REG_X30_OFFS]
525
526	eret
527END_FUNC el0_svc
528
529LOCAL_FUNC el1_sync_abort , :
530	mov	x0, sp
531	msr	spsel, #0
532	mov	x3, sp		/* Save original sp */
533
534	/*
535	 * Update core local flags.
536	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
537	 */
538	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
539	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
540	orr	w1, w1, #THREAD_CLF_ABORT
541	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
542			.Lsel_tmp_sp
543
544	/* Select abort stack */
545	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET]
546	b	.Lset_sp
547
548.Lsel_tmp_sp:
549	/* Select tmp stack */
550	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET]
551	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
552
553.Lset_sp:
554	mov	sp, x2
555	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
556
557	/*
558	 * Save state on stack
559	 */
560	sub	sp, sp, #THREAD_ABT_REGS_SIZE
561	mrs	x2, spsr_el1
562	/* Store spsr, sp_el0 */
563	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR_OFFS]
564	/* Store original x0, x1 */
565	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(0)]
566	stp	x2, x3, [sp, #THREAD_ABT_REG_X_OFFS(0)]
567	/* Store original x2, x3 and x4 to x29 */
568	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(2)]
569	store_xregs sp, THREAD_ABT_REG_X_OFFS(2), 2, 29
570	/* Store x30, elr_el1 */
571	mrs	x0, elr_el1
572	stp	x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)]
573
574	/*
575	 * Call handler
576	 */
577	mov	x0, #0
578	mov	x1, sp
579	bl	thread_handle_abort
580
581	/*
582	 * Restore state from stack
583	 */
584	/* Load x30, elr_el1 */
585	ldp	x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)]
586	msr	elr_el1, x0
587	/* Load x0 to x29 */
588	load_xregs sp, THREAD_ABT_REG_X_OFFS(0), 0, 29
589	/* Switch to SP_EL1 */
590	msr	spsel, #1
591	/* Save x0 to x3 in CORE_LOCAL */
592	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
593	/* Restore spsr_el1 and sp_el0 */
594	mrs	x3, sp_el0
595	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR_OFFS]
596	msr	spsr_el1, x0
597	msr	sp_el0, x1
598
599	/* Update core local flags */
600	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
601	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
602	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
603
604	/* Restore x0 to x3 */
605	load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
606
607	/* Return from exception */
608	eret
609END_FUNC el1_sync_abort
610
611	/* sp_el0 in x3 */
612LOCAL_FUNC el0_sync_abort , :
613	/*
614	 * Update core local flags
615	 */
616	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
617	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
618	orr	w1, w1, #THREAD_CLF_ABORT
619	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
620
621	/*
622	 * Save state on stack
623	 */
624
625	/* load abt_stack_va_end */
626	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET]
627	/* Keep pointer to initial record in x0 */
628	mov	x0, sp
629	/* Switch to SP_EL0 */
630	msr	spsel, #0
631	mov	sp, x1
632	sub	sp, sp, #THREAD_ABT_REGS_SIZE
633	mrs	x2, spsr_el1
634	/* Store spsr, sp_el0 */
635	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR_OFFS]
636	/* Store original x0, x1 */
637	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(0)]
638	stp	x2, x3, [sp, #THREAD_ABT_REG_X_OFFS(0)]
639	/* Store original x2, x3 and x4 to x29 */
640	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X_OFFSET(2)]
641	store_xregs sp, THREAD_ABT_REG_X_OFFS(2), 2, 29
642	/* Store x30, elr_el1 */
643	mrs	x0, elr_el1
644	stp	x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)]
645
646	/*
647	 * Call handler
648	 */
649	mov	x0, #0
650	mov	x1, sp
651	bl	thread_handle_abort
652
653	/*
654	 * Restore state from stack
655	 */
656
657	/* Load x30, elr_el1 */
658	ldp	x30, x0, [sp, #THREAD_ABT_REG_X_OFFS(30)]
659	msr	elr_el1, x0
660	/* Load x0 to x29 */
661	load_xregs sp, THREAD_ABT_REG_X_OFFS(0), 0, 29
662	/* Switch to SP_EL1 */
663	msr	spsel, #1
664	/* Save x0 to x3 in EL1_REC */
665	store_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
666	/* Restore spsr_el1 and sp_el0 */
667	mrs	x3, sp_el0
668	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR_OFFS]
669	msr	spsr_el1, x0
670	msr	sp_el0, x1
671
672	/* Update core local flags */
673	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
674	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
675	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
676
677	/* Restore x0 to x3 */
678	load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
679
680	/* Return from exception */
681	eret
682END_FUNC el0_sync_abort
683
684LOCAL_FUNC elx_irq , :
685	/*
686	 * Update core local flags
687	 */
688	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
689	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
690	orr	w1, w1, #THREAD_CLF_TMP
691	orr	w1, w1, #THREAD_CLF_IRQ
692	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
693
694	/* get pointer to current thread context in x0 */
695	get_thread_ctx sp, 0, 1, 2
696	/* Keep original SP_EL0 */
697	mrs	x2, sp_el0
698
699	/* Store original sp_el0 */
700	str	x2, [x0, #THREAD_CTX_REGS_SP_OFFSET]
701	/* store x4..x30 */
702	store_xregs x0, THREAD_CTX_REGS_X_OFFSET(4), 4, 30
703	/* Load original x0..x3 into x10..x13 */
704	load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 10, 13
705	/* Save original x0..x3 */
706	store_xregs x0, THREAD_CTX_REGS_X_OFFSET(0), 10, 13
707
708	/* load tmp_stack_va_end */
709	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET]
710	/* Switch to SP_EL0 */
711	msr	spsel, #0
712	mov	sp, x1
713
714	/*
715	 * Mark current thread as suspended
716	 */
717	mov	w0, #THREAD_FLAGS_EXIT_ON_IRQ
718	mrs	x1, spsr_el1
719	mrs	x2, elr_el1
720	bl	thread_state_suspend
721	mov	w4, w0		/* Supply thread index */
722
723	/* Update core local flags */
724	/* Switch to SP_EL1 */
725	msr	spsel, #1
726	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
727	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
728	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
729	msr	spsel, #0
730
731	/*
732	 * Note that we're exiting with SP_EL0 selected since the entry
733	 * functions expects to have SP_EL0 selected with the tmp stack
734	 * set.
735	 */
736
737	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
738	ldr	w1, =TEESMC_RETURN_RPC_IRQ
739	mov	w2, #0
740	mov	w3, #0
741	/* w4 is already filled in above */
742	smc	#0
743	b	.	/* SMC should not return */
744END_FUNC elx_irq
745
746/*
747 * This struct is never used from C it's only here to visualize the
748 * layout.
749 *
750 * struct elx_fiq_rec {
751 * 	uint64_t x[19 - 4]; x4..x18
752 * 	uint64_t lr;
753 * 	uint64_t sp_el0;
754 * };
755 */
756#define ELX_FIQ_REC_X_OFFSET(x)		(8 * ((x) - 4))
757#define ELX_FIQ_REC_LR_OFFSET		(8 + ELX_FIQ_REC_X_OFFSET(19))
758#define ELX_FIQ_REC_SP_EL0_OFFSET	(8 + ELX_FIQ_REC_LR_OFFSET)
759#define ELX_FIQ_REC_SIZE		(8 + ELX_FIQ_REC_SP_EL0_OFFSET)
760
761LOCAL_FUNC elx_fiq , :
762	/*
763	 * Update core local flags
764	 */
765	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
766	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
767	orr	w1, w1, #THREAD_CLF_FIQ
768	orr	w1, w1, #THREAD_CLF_TMP
769	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
770
771	/* load tmp_stack_va_end */
772	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET]
773	/* Keep original SP_EL0 */
774	mrs	x2, sp_el0
775	/* Switch to SP_EL0 */
776	msr	spsel, #0
777	mov	sp, x1
778
779	/*
780	 * Save registers on stack that can be corrupted by a call to
781	 * a C function
782	 */
783	/* Make room for struct elx_fiq_rec */
784	sub	sp, sp, #ELX_FIQ_REC_SIZE
785	/* Store x4..x18 */
786	store_xregs sp, ELX_FIQ_REC_X_OFFSET(4), 4, 18
787	/* Store lr and original sp_el0 */
788	stp	x30, x2, [sp, #ELX_FIQ_REC_LR_OFFSET]
789
790	bl	thread_check_canaries
791	adr	x16, thread_fiq_handler_ptr
792	ldr	x16, [x16]
793	blr	x16
794
795	/*
796	 * Restore registers
797	 */
798	/* Restore x4..x18 */
799	load_xregs sp, ELX_FIQ_REC_X_OFFSET(4), 4, 18
800	/* Load  lr and original sp_el0 */
801	ldp	x30, x2, [sp, #ELX_FIQ_REC_LR_OFFSET]
802	/* Restore SP_El0 */
803	mov	sp, x2
804	/* Switch back to SP_EL1 */
805	msr	spsel, #1
806
807	/* Update core local flags */
808	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
809	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
810	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS_OFFSET]
811
812	/* Restore x0..x3 */
813	load_xregs sp, THREAD_CORE_LOCAL_X_OFFSET(0), 0, 3
814
815	/* Return from exception */
816	eret
817END_FUNC elx_fiq
818