xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 51ac0e23b5c2b3c84469a0de79c9f027a46d5747)
1/*
2 * Copyright (c) 2015, Linaro Limited
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <asm.S>
29#include <arm64_macros.S>
30#include <arm64.h>
31#include <sm/optee_smc.h>
32#include <sm/teesmc_opteed_macros.h>
33#include <sm/teesmc_opteed.h>
34#include <asm-defines.h>
35#include <kernel/thread_defs.h>
36#include "thread_private.h"
37
38	.macro get_thread_ctx core_local, res, tmp0, tmp1
39		ldr	w\tmp0, [\core_local, \
40				#THREAD_CORE_LOCAL_CURR_THREAD]
41		adr	x\res, threads
42		mov	x\tmp1, #THREAD_CTX_SIZE
43		madd	x\res, x\tmp0, x\tmp1, x\res
44	.endm
45
46	.section .text.thread_asm
47LOCAL_FUNC vector_std_smc_entry , :
48	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
49	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
50	mov	x0, sp
51	bl	thread_handle_std_smc
52	/*
53	 * Normally thread_handle_std_smc() should return via
54	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
55	 * hasn't switched stack (error detected) it will do a normal "C"
56	 * return.
57	 */
58	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
59	add	sp, sp, #THREAD_SMC_ARGS_SIZE
60	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
61	smc	#0
62	b	.	/* SMC should not return */
63END_FUNC vector_std_smc_entry
64
65LOCAL_FUNC vector_fast_smc_entry , :
66	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
67	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
68	mov	x0, sp
69	bl	thread_handle_fast_smc
70	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
71	add	sp, sp, #THREAD_SMC_ARGS_SIZE
72	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
73	smc	#0
74	b	.	/* SMC should not return */
75END_FUNC vector_fast_smc_entry
76
77LOCAL_FUNC vector_fiq_entry , :
78	/* Secure Monitor received a FIQ and passed control to us. */
79	bl	thread_check_canaries
80	adr	x16, thread_fiq_handler_ptr
81	ldr	x16, [x16]
82	blr	x16
83	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
84	smc	#0
85	b	.	/* SMC should not return */
86END_FUNC vector_fiq_entry
87
88LOCAL_FUNC vector_cpu_on_entry , :
89	adr	x16, thread_cpu_on_handler_ptr
90	ldr	x16, [x16]
91	blr	x16
92	mov	x1, x0
93	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
94	smc	#0
95	b	.	/* SMC should not return */
96END_FUNC vector_cpu_on_entry
97
98LOCAL_FUNC vector_cpu_off_entry , :
99	adr	x16, thread_cpu_off_handler_ptr
100	ldr	x16, [x16]
101	blr	x16
102	mov	x1, x0
103	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
104	smc	#0
105	b	.	/* SMC should not return */
106END_FUNC vector_cpu_off_entry
107
108LOCAL_FUNC vector_cpu_suspend_entry , :
109	adr	x16, thread_cpu_suspend_handler_ptr
110	ldr	x16, [x16]
111	blr	x16
112	mov	x1, x0
113	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
114	smc	#0
115	b	.	/* SMC should not return */
116END_FUNC vector_cpu_suspend_entry
117
118LOCAL_FUNC vector_cpu_resume_entry , :
119	adr	x16, thread_cpu_resume_handler_ptr
120	ldr	x16, [x16]
121	blr	x16
122	mov	x1, x0
123	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
124	smc	#0
125	b	.	/* SMC should not return */
126END_FUNC vector_cpu_resume_entry
127
128LOCAL_FUNC vector_system_off_entry , :
129	adr	x16, thread_system_off_handler_ptr
130	ldr	x16, [x16]
131	blr	x16
132	mov	x1, x0
133	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
134	smc	#0
135	b	.	/* SMC should not return */
136END_FUNC vector_system_off_entry
137
138LOCAL_FUNC vector_system_reset_entry , :
139	adr	x16, thread_system_reset_handler_ptr
140	ldr	x16, [x16]
141	blr	x16
142	mov	x1, x0
143	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
144	smc	#0
145	b	.	/* SMC should not return */
146END_FUNC vector_system_reset_entry
147
148/*
149 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
150 * initialization.
151 *
152 * Note that ARM-TF depends on the layout of this vector table, any change
153 * in layout has to be synced with ARM-TF.
154 */
155FUNC thread_vector_table , :
156	b	vector_std_smc_entry
157	b	vector_fast_smc_entry
158	b	vector_cpu_on_entry
159	b	vector_cpu_off_entry
160	b	vector_cpu_resume_entry
161	b	vector_cpu_suspend_entry
162	b	vector_fiq_entry
163	b	vector_system_off_entry
164	b	vector_system_reset_entry
165END_FUNC thread_vector_table
166
167
168/* void thread_resume(struct thread_ctx_regs *regs) */
169FUNC thread_resume , :
170	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
171	mov	sp, x1
172	msr	elr_el1, x2
173	msr	spsr_el1, x3
174	load_xregs x0, THREAD_CTX_REGS_X1, 1, 30
175	ldr	x0, [x0, THREAD_CTX_REGS_X0]
176	eret
177END_FUNC thread_resume
178
179FUNC thread_std_smc_entry , :
180	/* pass x0-x7 in a struct thread_smc_args */
181	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
182	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
183	mov	x0, sp
184
185	/* Call the registered handler */
186	bl	__thread_std_smc_entry
187
188	/*
189	 * Load the returned x0-x3 into preserved registers and skip the
190	 * "returned" x4-x7 since they will not be returned to normal
191	 * world.
192	 */
193	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
194	add	sp, sp, #THREAD_SMC_ARGS_SIZE
195
196	/* Disable interrupts before switching to temporary stack */
197	msr	daifset, #(DAIFBIT_FIQ | DAIFBIT_IRQ)
198	bl	thread_get_tmp_sp
199	mov	sp, x0
200
201	bl	thread_state_free
202
203	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
204	mov	x1, x20
205	mov	x2, x21
206	mov	x3, x22
207	mov	x4, x23
208	smc	#0
209	b	.	/* SMC should not return */
210END_FUNC thread_std_smc_entry
211
212/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
213FUNC thread_rpc , :
214	/* Read daif and create an SPSR */
215	mrs	x1, daif
216	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
217
218	msr	daifset, #DAIFBIT_ALL
219	push	x0, xzr
220	push	x1, x30
221	bl	thread_get_ctx_regs
222	ldr	x30, [sp, #8]
223	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
224	mov	x19, x0
225
226	bl	thread_get_tmp_sp
227	pop	x1, xzr		/* Match "push x1, x30" above */
228	mov	x2, sp
229	str	x2, [x19, #THREAD_CTX_REGS_SP]
230	ldr	x20, [sp]	/* Get pointer to rv[] */
231	mov	sp, x0		/* Switch to tmp stack */
232
233	adr	x2, .thread_rpc_return
234	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
235	bl	thread_state_suspend
236	mov	x4, x0		/* Supply thread index */
237	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
238	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
239	smc	#0
240	b	.		/* SMC should not return */
241
242.thread_rpc_return:
243	/*
244	 * At this point has the stack pointer been restored to the value
245	 * stored in THREAD_CTX above.
246	 *
247	 * Jumps here from thread_resume above when RPC has returned. The
248	 * IRQ and FIQ bits are restored to what they where when this
249	 * function was originally entered.
250	 */
251	pop	x16, xzr	/* Get pointer to rv[] */
252	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
253	ret
254END_FUNC thread_rpc
255
256FUNC thread_init_vbar , :
257	adr	x0, thread_vect_table
258	msr	vbar_el1, x0
259	ret
260END_FUNC thread_init_vbar
261
262/*
263 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
264 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
265 *               unsigned long user_func, unsigned long spsr,
266 *               uint32_t *exit_status0, uint32_t *exit_status1)
267 *
268 */
269FUNC __thread_enter_user_mode , :
270	ldr	x8, [sp]
271	/*
272	 * Create the and fill in the struct thread_user_mode_rec
273	 */
274	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
275	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
276	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
277
278	/*
279	 * Switch to SP_EL1
280	 * Disable exceptions
281	 * Save kern sp in x19
282	 */
283	msr	daifset, #DAIFBIT_ALL
284	mov	x19, sp
285	msr	spsel, #1
286
287	/*
288	 * Save the kernel stack pointer in the thread context
289	 */
290	/* get pointer to current thread context */
291	get_thread_ctx sp, 21, 20, 22
292	/*
293	 * Save kernel stack pointer to ensure that el0_svc() uses
294	 * correct stack pointer
295	 */
296	str	x19, [x21, #THREAD_CTX_KERN_SP]
297
298	/*
299	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
300	 */
301	msr	spsr_el1, x6
302	/* Set user sp */
303	mov	x13, x4		/* Used when running TA in Aarch32 */
304	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
305	/* Set user function */
306	msr	elr_el1, x5
307
308	/* Jump into user mode */
309	eret
310END_FUNC __thread_enter_user_mode
311
312/*
313 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
314 * 		uint32_t exit_status1);
315 * See description in thread.h
316 */
317FUNC thread_unwind_user_mode , :
318	/* Store the exit status */
319	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
320	str	w1, [x3]
321	str	w2, [x4]
322	/* Restore x19..x30 */
323	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
324	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
325	/* Return from the call of thread_enter_user_mode() */
326	ret
327END_FUNC thread_unwind_user_mode
328
329	/*
330	 * This macro verifies that the a given vector doesn't exceed the
331	 * architectural limit of 32 instructions. This is meant to be placed
332	 * immedately after the last instruction in the vector. It takes the
333	 * vector entry as the parameter
334	 */
335	.macro check_vector_size since
336	  .if (. - \since) > (32 * 4)
337	    .error "Vector exceeds 32 instructions"
338	  .endif
339	.endm
340
341
342	.align	11
343LOCAL_FUNC thread_vect_table , :
344	/* -----------------------------------------------------
345	 * EL1 with SP0 : 0x0 - 0x180
346	 * -----------------------------------------------------
347	 */
348	.align	7
349sync_el1_sp0:
350	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
351	b	el1_sync_abort
352	check_vector_size sync_el1_sp0
353
354	.align	7
355irq_el1_sp0:
356	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
357	b	elx_irq
358	check_vector_size irq_el1_sp0
359
360	.align	7
361fiq_el1_sp0:
362	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
363	b	elx_fiq
364	check_vector_size fiq_el1_sp0
365
366	.align	7
367SErrorSP0:
368	b	SErrorSP0
369	check_vector_size SErrorSP0
370
371	/* -----------------------------------------------------
372	 * Current EL with SPx: 0x200 - 0x380
373	 * -----------------------------------------------------
374	 */
375	.align	7
376SynchronousExceptionSPx:
377	b	SynchronousExceptionSPx
378	check_vector_size SynchronousExceptionSPx
379
380	.align	7
381IrqSPx:
382	b	IrqSPx
383	check_vector_size IrqSPx
384
385	.align	7
386FiqSPx:
387	b	FiqSPx
388	check_vector_size FiqSPx
389
390	.align	7
391SErrorSPx:
392	b	SErrorSPx
393	check_vector_size SErrorSPx
394
395	/* -----------------------------------------------------
396	 * Lower EL using AArch64 : 0x400 - 0x580
397	 * -----------------------------------------------------
398	 */
399	.align	7
400el0_sync_a64:
401	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
402	mrs	x2, esr_el1
403	mrs	x3, sp_el0
404	lsr	x2, x2, #ESR_EC_SHIFT
405	cmp	x2, #ESR_EC_AARCH64_SVC
406	b.eq	el0_svc
407	b	el0_sync_abort
408	check_vector_size el0_sync_a64
409
410	.align	7
411el0_irq_a64:
412	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
413	b	elx_irq
414	check_vector_size el0_irq_a64
415
416	.align	7
417el0_fiq_a64:
418	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
419	b	elx_fiq
420	check_vector_size el0_fiq_a64
421
422	.align	7
423SErrorA64:
424	b   	SErrorA64
425	check_vector_size SErrorA64
426
427	/* -----------------------------------------------------
428	 * Lower EL using AArch32 : 0x0 - 0x180
429	 * -----------------------------------------------------
430	 */
431	.align	7
432el0_sync_a32:
433	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
434	mrs	x2, esr_el1
435	mrs	x3, sp_el0
436	lsr	x2, x2, #ESR_EC_SHIFT
437	cmp	x2, #ESR_EC_AARCH32_SVC
438	b.eq	el0_svc
439	b	el0_sync_abort
440	check_vector_size el0_sync_a32
441
442	.align	7
443el0_irq_a32:
444	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
445	b	elx_irq
446	check_vector_size el0_irq_a32
447
448	.align	7
449el0_fiq_a32:
450	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
451	b	elx_fiq
452	check_vector_size el0_fiq_a32
453
454	.align	7
455SErrorA32:
456	b	SErrorA32
457	check_vector_size SErrorA32
458
459END_FUNC thread_vect_table
460
461LOCAL_FUNC el0_svc , :
462	/* get pointer to current thread context in x0 */
463	get_thread_ctx sp, 0, 1, 2
464	/* load saved kernel sp */
465	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
466	/* Keep pointer to initial recod in x1 */
467	mov	x1, sp
468	/* Switch to SP_EL0 and restore kernel sp */
469	msr	spsel, #0
470	mov	x2, sp	/* Save SP_EL0 */
471	mov	sp, x0
472
473	/* Make room for struct thread_svc_regs */
474	sub	sp, sp, #THREAD_SVC_REG_SIZE
475	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
476
477	/* Restore x0-x3 */
478	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
479	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
480
481	/* Prepare the argument for the handler */
482	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
483	mrs	x0, elr_el1
484	mrs	x1, spsr_el1
485	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
486	mov	x0, sp
487
488	/*
489	 * Unmask FIQ, Serror, and debug exceptions since we have nothing
490	 * left in sp_el1. Note that the SVC handler is excepted to
491	 * re-enable IRQs by itself.
492	 */
493	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
494
495	/* Call the handler */
496	bl	tee_svc_handler
497
498	/* Mask all maskable exceptions since we're switching back to sp_el1 */
499	msr	daifset, #DAIFBIT_ALL
500
501	/*
502	 * Save kernel sp we'll had at the beginning of this function.
503	 * This is when this TA has called another TA because
504	 * __thread_enter_user_mode() also saves the stack pointer in this
505	 * field.
506	 */
507	msr	spsel, #1
508	get_thread_ctx sp, 0, 1, 2
509	msr	spsel, #0
510	add	x1, sp, #THREAD_SVC_REG_SIZE
511	str	x1, [x0, #THREAD_CTX_KERN_SP]
512
513	/* Restore registers to the required state and return*/
514	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
515	msr	elr_el1, x0
516	msr	spsr_el1, x1
517	load_xregs sp, THREAD_SVC_REG_X0, 0, 14
518	mov	x30, sp
519	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
520	mov	sp, x0
521	ldr	x0, [x30, THREAD_SVC_REG_X0]
522	ldr	x30, [x30, #THREAD_SVC_REG_X30]
523
524	eret
525END_FUNC el0_svc
526
527LOCAL_FUNC el1_sync_abort , :
528	mov	x0, sp
529	msr	spsel, #0
530	mov	x3, sp		/* Save original sp */
531
532	/*
533	 * Update core local flags.
534	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
535	 */
536	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
537	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
538	orr	w1, w1, #THREAD_CLF_ABORT
539	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
540			.Lsel_tmp_sp
541
542	/* Select abort stack */
543	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
544	b	.Lset_sp
545
546.Lsel_tmp_sp:
547	/* Select tmp stack */
548	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
549	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
550
551.Lset_sp:
552	mov	sp, x2
553	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
554
555	/*
556	 * Save state on stack
557	 */
558	sub	sp, sp, #THREAD_ABT_REGS_SIZE
559	mrs	x2, spsr_el1
560	/* Store spsr, sp_el0 */
561	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
562	/* Store original x0, x1 */
563	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
564	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
565	/* Store original x2, x3 and x4 to x29 */
566	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
567	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
568	/* Store x30, elr_el1 */
569	mrs	x0, elr_el1
570	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
571
572	/*
573	 * Call handler
574	 */
575	mov	x0, #0
576	mov	x1, sp
577	bl	abort_handler
578
579	/*
580	 * Restore state from stack
581	 */
582	/* Load x30, elr_el1 */
583	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
584	msr	elr_el1, x0
585	/* Load x0 to x29 */
586	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
587	/* Switch to SP_EL1 */
588	msr	spsel, #1
589	/* Save x0 to x3 in CORE_LOCAL */
590	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
591	/* Restore spsr_el1 and sp_el0 */
592	mrs	x3, sp_el0
593	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
594	msr	spsr_el1, x0
595	msr	sp_el0, x1
596
597	/* Update core local flags */
598	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
599	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
600	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
601
602	/* Restore x0 to x3 */
603	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
604
605	/* Return from exception */
606	eret
607END_FUNC el1_sync_abort
608
609	/* sp_el0 in x3 */
610LOCAL_FUNC el0_sync_abort , :
611	/*
612	 * Update core local flags
613	 */
614	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
615	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
616	orr	w1, w1, #THREAD_CLF_ABORT
617	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
618
619	/*
620	 * Save state on stack
621	 */
622
623	/* load abt_stack_va_end */
624	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
625	/* Keep pointer to initial record in x0 */
626	mov	x0, sp
627	/* Switch to SP_EL0 */
628	msr	spsel, #0
629	mov	sp, x1
630	sub	sp, sp, #THREAD_ABT_REGS_SIZE
631	mrs	x2, spsr_el1
632	/* Store spsr, sp_el0 */
633	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
634	/* Store original x0, x1 */
635	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
636	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
637	/* Store original x2, x3 and x4 to x29 */
638	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
639	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
640	/* Store x30, elr_el1 */
641	mrs	x0, elr_el1
642	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
643
644	/*
645	 * Call handler
646	 */
647	mov	x0, #0
648	mov	x1, sp
649	bl	abort_handler
650
651	/*
652	 * Restore state from stack
653	 */
654
655	/* Load x30, elr_el1 */
656	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
657	msr	elr_el1, x0
658	/* Load x0 to x29 */
659	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
660	/* Switch to SP_EL1 */
661	msr	spsel, #1
662	/* Save x0 to x3 in EL1_REC */
663	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
664	/* Restore spsr_el1 and sp_el0 */
665	mrs	x3, sp_el0
666	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
667	msr	spsr_el1, x0
668	msr	sp_el0, x1
669
670	/* Update core local flags */
671	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
672	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
673	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
674
675	/* Restore x0 to x3 */
676	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
677
678	/* Return from exception */
679	eret
680END_FUNC el0_sync_abort
681
682LOCAL_FUNC elx_irq , :
683	/*
684	 * Update core local flags
685	 */
686	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
687	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
688	orr	w1, w1, #THREAD_CLF_TMP
689	orr	w1, w1, #THREAD_CLF_IRQ
690	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
691
692	/* get pointer to current thread context in x0 */
693	get_thread_ctx sp, 0, 1, 2
694	/* Keep original SP_EL0 */
695	mrs	x2, sp_el0
696
697	/* Store original sp_el0 */
698	str	x2, [x0, #THREAD_CTX_REGS_SP]
699	/* store x4..x30 */
700	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
701	/* Load original x0..x3 into x10..x13 */
702	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
703	/* Save original x0..x3 */
704	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
705
706	/* load tmp_stack_va_end */
707	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
708	/* Switch to SP_EL0 */
709	msr	spsel, #0
710	mov	sp, x1
711
712	/*
713	 * Mark current thread as suspended
714	 */
715	mov	w0, #THREAD_FLAGS_EXIT_ON_IRQ
716	mrs	x1, spsr_el1
717	mrs	x2, elr_el1
718	bl	thread_state_suspend
719	mov	w4, w0		/* Supply thread index */
720
721	/* Update core local flags */
722	/* Switch to SP_EL1 */
723	msr	spsel, #1
724	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
725	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
726	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
727	msr	spsel, #0
728
729	/*
730	 * Note that we're exiting with SP_EL0 selected since the entry
731	 * functions expects to have SP_EL0 selected with the tmp stack
732	 * set.
733	 */
734
735	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
736	ldr	w1, =OPTEE_SMC_RETURN_RPC_IRQ
737	mov	w2, #0
738	mov	w3, #0
739	/* w4 is already filled in above */
740	smc	#0
741	b	.	/* SMC should not return */
742END_FUNC elx_irq
743
744/*
745 * This struct is never used from C it's only here to visualize the
746 * layout.
747 *
748 * struct elx_fiq_rec {
749 * 	uint64_t x[19 - 4]; x4..x18
750 * 	uint64_t lr;
751 * 	uint64_t sp_el0;
752 * };
753 */
754#define ELX_FIQ_REC_X(x)		(8 * ((x) - 4))
755#define ELX_FIQ_REC_LR			(8 + ELX_FIQ_REC_X(19))
756#define ELX_FIQ_REC_SP_EL0		(8 + ELX_FIQ_REC_LR)
757#define ELX_FIQ_REC_SIZE		(8 + ELX_FIQ_REC_SP_EL0)
758
759LOCAL_FUNC elx_fiq , :
760	/*
761	 * Update core local flags
762	 */
763	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
764	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
765	orr	w1, w1, #THREAD_CLF_FIQ
766	orr	w1, w1, #THREAD_CLF_TMP
767	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
768
769	/* load tmp_stack_va_end */
770	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
771	/* Keep original SP_EL0 */
772	mrs	x2, sp_el0
773	/* Switch to SP_EL0 */
774	msr	spsel, #0
775	mov	sp, x1
776
777	/*
778	 * Save registers on stack that can be corrupted by a call to
779	 * a C function
780	 */
781	/* Make room for struct elx_fiq_rec */
782	sub	sp, sp, #ELX_FIQ_REC_SIZE
783	/* Store x4..x18 */
784	store_xregs sp, ELX_FIQ_REC_X(4), 4, 18
785	/* Store lr and original sp_el0 */
786	stp	x30, x2, [sp, #ELX_FIQ_REC_LR]
787
788	bl	thread_check_canaries
789	adr	x16, thread_fiq_handler_ptr
790	ldr	x16, [x16]
791	blr	x16
792
793	/*
794	 * Restore registers
795	 */
796	/* Restore x4..x18 */
797	load_xregs sp, ELX_FIQ_REC_X(4), 4, 18
798	/* Load  lr and original sp_el0 */
799	ldp	x30, x2, [sp, #ELX_FIQ_REC_LR]
800	/* Restore SP_El0 */
801	mov	sp, x2
802	/* Switch back to SP_EL1 */
803	msr	spsel, #1
804
805	/* Update core local flags */
806	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
807	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
808	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
809
810	/* Restore x0..x3 */
811	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
812
813	/* Return from exception */
814	eret
815END_FUNC elx_fiq
816