xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision e39aae81e1a40ba495893f1c4e04b23401eca3a3)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm64_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/thread_defs.h>
12#include <mm/core_mmu.h>
13#include <sm/optee_smc.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16
17#include "thread_private.h"
18
19	.macro get_thread_ctx core_local, res, tmp0, tmp1
20		ldr	w\tmp0, [\core_local, \
21				#THREAD_CORE_LOCAL_CURR_THREAD]
22		adr	x\res, threads
23		mov	x\tmp1, #THREAD_CTX_SIZE
24		madd	x\res, x\tmp0, x\tmp1, x\res
25	.endm
26
27	.macro b_if_spsr_is_el0 reg, label
28		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
29		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
30		b.eq	\label
31	.endm
32
33LOCAL_FUNC vector_std_smc_entry , :
34	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
35	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
36	mov	x0, sp
37	bl	thread_handle_std_smc
38	/*
39	 * Normally thread_handle_std_smc() should return via
40	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
41	 * hasn't switched stack (error detected) it will do a normal "C"
42	 * return.
43	 */
44	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
45	add	sp, sp, #THREAD_SMC_ARGS_SIZE
46	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
47	smc	#0
48	b	.	/* SMC should not return */
49END_FUNC vector_std_smc_entry
50
51LOCAL_FUNC vector_fast_smc_entry , :
52	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
53	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
54	mov	x0, sp
55	bl	thread_handle_fast_smc
56	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
57	add	sp, sp, #THREAD_SMC_ARGS_SIZE
58	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
59	smc	#0
60	b	.	/* SMC should not return */
61END_FUNC vector_fast_smc_entry
62
63LOCAL_FUNC vector_fiq_entry , :
64	/* Secure Monitor received a FIQ and passed control to us. */
65	bl	thread_check_canaries
66	adr	x16, thread_nintr_handler_ptr
67	ldr	x16, [x16]
68	blr	x16
69	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
70	smc	#0
71	b	.	/* SMC should not return */
72END_FUNC vector_fiq_entry
73
74LOCAL_FUNC vector_cpu_on_entry , :
75	adr	x16, thread_cpu_on_handler_ptr
76	ldr	x16, [x16]
77	blr	x16
78	mov	x1, x0
79	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
80	smc	#0
81	b	.	/* SMC should not return */
82END_FUNC vector_cpu_on_entry
83
84LOCAL_FUNC vector_cpu_off_entry , :
85	adr	x16, thread_cpu_off_handler_ptr
86	ldr	x16, [x16]
87	blr	x16
88	mov	x1, x0
89	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
90	smc	#0
91	b	.	/* SMC should not return */
92END_FUNC vector_cpu_off_entry
93
94LOCAL_FUNC vector_cpu_suspend_entry , :
95	adr	x16, thread_cpu_suspend_handler_ptr
96	ldr	x16, [x16]
97	blr	x16
98	mov	x1, x0
99	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
100	smc	#0
101	b	.	/* SMC should not return */
102END_FUNC vector_cpu_suspend_entry
103
104LOCAL_FUNC vector_cpu_resume_entry , :
105	adr	x16, thread_cpu_resume_handler_ptr
106	ldr	x16, [x16]
107	blr	x16
108	mov	x1, x0
109	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
110	smc	#0
111	b	.	/* SMC should not return */
112END_FUNC vector_cpu_resume_entry
113
114LOCAL_FUNC vector_system_off_entry , :
115	adr	x16, thread_system_off_handler_ptr
116	ldr	x16, [x16]
117	blr	x16
118	mov	x1, x0
119	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
120	smc	#0
121	b	.	/* SMC should not return */
122END_FUNC vector_system_off_entry
123
124LOCAL_FUNC vector_system_reset_entry , :
125	adr	x16, thread_system_reset_handler_ptr
126	ldr	x16, [x16]
127	blr	x16
128	mov	x1, x0
129	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
130	smc	#0
131	b	.	/* SMC should not return */
132END_FUNC vector_system_reset_entry
133
134/*
135 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
136 * initialization.
137 *
138 * Note that ARM-TF depends on the layout of this vector table, any change
139 * in layout has to be synced with ARM-TF.
140 */
141FUNC thread_vector_table , :
142	b	vector_std_smc_entry
143	b	vector_fast_smc_entry
144	b	vector_cpu_on_entry
145	b	vector_cpu_off_entry
146	b	vector_cpu_resume_entry
147	b	vector_cpu_suspend_entry
148	b	vector_fiq_entry
149	b	vector_system_off_entry
150	b	vector_system_reset_entry
151END_FUNC thread_vector_table
152KEEP_PAGER thread_vector_table
153
154
155/* void thread_resume(struct thread_ctx_regs *regs) */
156FUNC thread_resume , :
157	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
158	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
159	mov	sp, x1
160	msr	elr_el1, x2
161	msr	spsr_el1, x3
162
163	b_if_spsr_is_el0 w3, 1f
164
165	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
166	ldr	x0, [x0, THREAD_CTX_REGS_X0]
167	eret
168
1691:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
170	ldr	x0, [x0, THREAD_CTX_REGS_X0]
171
172	msr	spsel, #1
173	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
174	b	eret_to_el0
175END_FUNC thread_resume
176
177FUNC thread_std_smc_entry , :
178	/* pass x0-x7 in a struct thread_smc_args */
179	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
180	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
181	mov	x0, sp
182
183	/* Call the registered handler */
184	bl	__thread_std_smc_entry
185
186	/*
187	 * Load the returned x0-x3 into preserved registers and skip the
188	 * "returned" x4-x7 since they will not be returned to normal
189	 * world.
190	 */
191	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
192	add	sp, sp, #THREAD_SMC_ARGS_SIZE
193
194	/* Mask all maskable exceptions before switching to temporary stack */
195	msr	daifset, #DAIFBIT_ALL
196	bl	thread_get_tmp_sp
197	mov	sp, x0
198
199	bl	thread_state_free
200
201	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
202	mov	x1, x20
203	mov	x2, x21
204	mov	x3, x22
205	mov	x4, x23
206	smc	#0
207	b	.	/* SMC should not return */
208END_FUNC thread_std_smc_entry
209
210/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
211FUNC thread_rpc , :
212	/* Read daif and create an SPSR */
213	mrs	x1, daif
214	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
215
216	/* Mask all maskable exceptions before switching to temporary stack */
217	msr	daifset, #DAIFBIT_ALL
218	push	x0, xzr
219	push	x1, x30
220	bl	thread_get_ctx_regs
221	ldr	x30, [sp, #8]
222	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
223	mov	x19, x0
224
225	bl	thread_get_tmp_sp
226	pop	x1, xzr		/* Match "push x1, x30" above */
227	mov	x2, sp
228	str	x2, [x19, #THREAD_CTX_REGS_SP]
229	ldr	x20, [sp]	/* Get pointer to rv[] */
230	mov	sp, x0		/* Switch to tmp stack */
231
232	adr	x2, .thread_rpc_return
233	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
234	bl	thread_state_suspend
235	mov	x4, x0		/* Supply thread index */
236	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
237	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
238	smc	#0
239	b	.		/* SMC should not return */
240
241.thread_rpc_return:
242	/*
243	 * At this point has the stack pointer been restored to the value
244	 * stored in THREAD_CTX above.
245	 *
246	 * Jumps here from thread_resume above when RPC has returned. The
247	 * IRQ and FIQ bits are restored to what they where when this
248	 * function was originally entered.
249	 */
250	pop	x16, xzr	/* Get pointer to rv[] */
251	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
252	ret
253END_FUNC thread_rpc
254KEEP_PAGER thread_rpc
255
256FUNC thread_init_vbar , :
257	msr	vbar_el1, x0
258	ret
259END_FUNC thread_init_vbar
260KEEP_PAGER thread_init_vbar
261
262/*
263 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
264 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
265 *               unsigned long user_func, unsigned long spsr,
266 *               uint32_t *exit_status0, uint32_t *exit_status1)
267 *
268 */
269FUNC __thread_enter_user_mode , :
270	ldr	x8, [sp]
271	/*
272	 * Create the and fill in the struct thread_user_mode_rec
273	 */
274	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
275	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
276	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
277
278	/*
279	 * Switch to SP_EL1
280	 * Disable exceptions
281	 * Save kern sp in x19
282	 */
283	msr	daifset, #DAIFBIT_ALL
284	mov	x19, sp
285	msr	spsel, #1
286
287	/*
288	 * Save the kernel stack pointer in the thread context
289	 */
290	/* get pointer to current thread context */
291	get_thread_ctx sp, 21, 20, 22
292	/*
293	 * Save kernel stack pointer to ensure that el0_svc() uses
294	 * correct stack pointer
295	 */
296	str	x19, [x21, #THREAD_CTX_KERN_SP]
297
298	/*
299	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
300	 */
301	msr	spsr_el1, x6
302	/* Set user sp */
303	mov	x13, x4		/* Used when running TA in Aarch32 */
304	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
305	/* Set user function */
306	msr	elr_el1, x5
307	/* Set frame pointer (user stack can't be unwound past this point) */
308	mov x29, #0
309
310	/* Jump into user mode */
311	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
312	b eret_to_el0
313END_FUNC __thread_enter_user_mode
314
315/*
316 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
317 * 		uint32_t exit_status1);
318 * See description in thread.h
319 */
320FUNC thread_unwind_user_mode , :
321	/* Store the exit status */
322	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
323	str	w1, [x3]
324	str	w2, [x4]
325	/* Restore x19..x30 */
326	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
327	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
328	/* Return from the call of thread_enter_user_mode() */
329	ret
330END_FUNC thread_unwind_user_mode
331
332	/*
333	 * This macro verifies that the a given vector doesn't exceed the
334	 * architectural limit of 32 instructions. This is meant to be placed
335	 * immedately after the last instruction in the vector. It takes the
336	 * vector entry as the parameter
337	 */
338	.macro check_vector_size since
339	  .if (. - \since) > (32 * 4)
340	    .error "Vector exceeds 32 instructions"
341	  .endif
342	.endm
343
344	.macro restore_mapping
345#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
346		/* Temporarily save x0, x1 */
347		msr	tpidr_el1, x0
348		msr	tpidrro_el0, x1
349
350		/* Update the mapping to use the full kernel mapping */
351		mrs	x0, ttbr0_el1
352		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
353		/* switch to kernel mode ASID */
354		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
355		msr	ttbr0_el1, x0
356		isb
357
358		/* Jump into the full mapping and continue execution */
359		ldr	x0, =1f
360		br	x0
361	1:
362
363		/* Point to the vector into the full mapping */
364		adr	x0, thread_user_kcode_offset
365		ldr	x0, [x0]
366		mrs	x1, vbar_el1
367		add	x1, x1, x0
368		msr	vbar_el1, x1
369		isb
370
371		/* Restore x0, x1 */
372		mrs	x0, tpidr_el1
373		mrs	x1, tpidrro_el0
374		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
375#else
376		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
377		mrs	x0, ttbr0_el1
378		/* switch to kernel mode ASID */
379		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
380		msr	ttbr0_el1, x0
381		isb
382#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
383	.endm
384
385#define INV_INSN	0
386	.section .text.thread_excp_vect
387	.align	11, INV_INSN
388FUNC thread_excp_vect , :
389	/* -----------------------------------------------------
390	 * EL1 with SP0 : 0x0 - 0x180
391	 * -----------------------------------------------------
392	 */
393	.align	7, INV_INSN
394el1_sync_sp0:
395	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
396	b	el1_sync_abort
397	check_vector_size el1_sync_sp0
398
399	.align	7, INV_INSN
400el1_irq_sp0:
401	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
402	b	elx_irq
403	check_vector_size el1_irq_sp0
404
405	.align	7, INV_INSN
406el1_fiq_sp0:
407	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
408	b	elx_fiq
409	check_vector_size el1_fiq_sp0
410
411	.align	7, INV_INSN
412el1_serror_sp0:
413	b	el1_serror_sp0
414	check_vector_size el1_serror_sp0
415
416	/* -----------------------------------------------------
417	 * Current EL with SP1: 0x200 - 0x380
418	 * -----------------------------------------------------
419	 */
420	.align	7, INV_INSN
421el1_sync_sp1:
422	b	el1_sync_sp1
423	check_vector_size el1_sync_sp1
424
425	.align	7, INV_INSN
426el1_irq_sp1:
427	b	el1_irq_sp1
428	check_vector_size el1_irq_sp1
429
430	.align	7, INV_INSN
431el1_fiq_sp1:
432	b	el1_fiq_sp1
433	check_vector_size el1_fiq_sp1
434
435	.align	7, INV_INSN
436el1_serror_sp1:
437	b	el1_serror_sp1
438	check_vector_size el1_serror_sp1
439
440	/* -----------------------------------------------------
441	 * Lower EL using AArch64 : 0x400 - 0x580
442	 * -----------------------------------------------------
443	 */
444	.align	7, INV_INSN
445el0_sync_a64:
446	restore_mapping
447
448	mrs	x2, esr_el1
449	mrs	x3, sp_el0
450	lsr	x2, x2, #ESR_EC_SHIFT
451	cmp	x2, #ESR_EC_AARCH64_SVC
452	b.eq	el0_svc
453	b	el0_sync_abort
454	check_vector_size el0_sync_a64
455
456	.align	7, INV_INSN
457el0_irq_a64:
458	restore_mapping
459
460	b	elx_irq
461	check_vector_size el0_irq_a64
462
463	.align	7, INV_INSN
464el0_fiq_a64:
465	restore_mapping
466
467	b	elx_fiq
468	check_vector_size el0_fiq_a64
469
470	.align	7, INV_INSN
471el0_serror_a64:
472	b   	el0_serror_a64
473	check_vector_size el0_serror_a64
474
475	/* -----------------------------------------------------
476	 * Lower EL using AArch32 : 0x0 - 0x180
477	 * -----------------------------------------------------
478	 */
479	.align	7, INV_INSN
480el0_sync_a32:
481	restore_mapping
482
483	mrs	x2, esr_el1
484	mrs	x3, sp_el0
485	lsr	x2, x2, #ESR_EC_SHIFT
486	cmp	x2, #ESR_EC_AARCH32_SVC
487	b.eq	el0_svc
488	b	el0_sync_abort
489	check_vector_size el0_sync_a32
490
491	.align	7, INV_INSN
492el0_irq_a32:
493	restore_mapping
494
495	b	elx_irq
496	check_vector_size el0_irq_a32
497
498	.align	7, INV_INSN
499el0_fiq_a32:
500	restore_mapping
501
502	b	elx_fiq
503	check_vector_size el0_fiq_a32
504
505	.align	7, INV_INSN
506el0_serror_a32:
507	b	el0_serror_a32
508	check_vector_size el0_serror_a32
509
510#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
511	.macro invalidate_branch_predictor
512	ic	iallu
513	isb
514	.endm
515
516	.align	11, INV_INSN
517	.global thread_excp_vect_workaround
518thread_excp_vect_workaround:
519	/* -----------------------------------------------------
520	 * EL1 with SP0 : 0x0 - 0x180
521	 * -----------------------------------------------------
522	 */
523	.align	7, INV_INSN
524workaround_el1_sync_sp0:
525	b	el1_sync_sp0
526	check_vector_size workaround_el1_sync_sp0
527
528	.align	7, INV_INSN
529workaround_el1_irq_sp0:
530	b	el1_irq_sp0
531	check_vector_size workaround_el1_irq_sp0
532
533	.align	7, INV_INSN
534workaround_el1_fiq_sp0:
535	b	el1_fiq_sp0
536	check_vector_size workaround_el1_fiq_sp0
537
538	.align	7, INV_INSN
539workaround_el1_serror_sp0:
540	b	el1_serror_sp0
541	check_vector_size workaround_el1_serror_sp0
542
543	/* -----------------------------------------------------
544	 * Current EL with SP1: 0x200 - 0x380
545	 * -----------------------------------------------------
546	 */
547	.align	7, INV_INSN
548workaround_el1_sync_sp1:
549	b	workaround_el1_sync_sp1
550	check_vector_size workaround_el1_sync_sp1
551
552	.align	7, INV_INSN
553workaround_el1_irq_sp1:
554	b	workaround_el1_irq_sp1
555	check_vector_size workaround_el1_irq_sp1
556
557	.align	7, INV_INSN
558workaround_el1_fiq_sp1:
559	b	workaround_el1_fiq_sp1
560	check_vector_size workaround_el1_fiq_sp1
561
562	.align	7, INV_INSN
563workaround_el1_serror_sp1:
564	b	workaround_el1_serror_sp1
565	check_vector_size workaround_el1_serror_sp1
566
567	/* -----------------------------------------------------
568	 * Lower EL using AArch64 : 0x400 - 0x580
569	 * -----------------------------------------------------
570	 */
571	.align	7, INV_INSN
572workaround_el0_sync_a64:
573	invalidate_branch_predictor
574	b	el0_sync_a64
575	check_vector_size workaround_el0_sync_a64
576
577	.align	7, INV_INSN
578workaround_el0_irq_a64:
579	invalidate_branch_predictor
580	b	el0_irq_a64
581	check_vector_size workaround_el0_irq_a64
582
583	.align	7, INV_INSN
584workaround_el0_fiq_a64:
585	invalidate_branch_predictor
586	b	el0_fiq_a64
587	check_vector_size workaround_el0_fiq_a64
588
589	.align	7, INV_INSN
590workaround_el0_serror_a64:
591	b   	workaround_el0_serror_a64
592	check_vector_size workaround_el0_serror_a64
593
594	/* -----------------------------------------------------
595	 * Lower EL using AArch32 : 0x0 - 0x180
596	 * -----------------------------------------------------
597	 */
598	.align	7, INV_INSN
599workaround_el0_sync_a32:
600	invalidate_branch_predictor
601	b	el0_sync_a32
602	check_vector_size workaround_el0_sync_a32
603
604	.align	7, INV_INSN
605workaround_el0_irq_a32:
606	invalidate_branch_predictor
607	b	el0_irq_a32
608	check_vector_size workaround_el0_irq_a32
609
610	.align	7, INV_INSN
611workaround_el0_fiq_a32:
612	invalidate_branch_predictor
613	b	el0_fiq_a32
614	check_vector_size workaround_el0_fiq_a32
615
616	.align	7, INV_INSN
617workaround_el0_serror_a32:
618	b	workaround_el0_serror_a32
619	check_vector_size workaround_el0_serror_a32
620#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
621
622/*
623 * We're keeping this code in the same section as the vector to make sure
624 * that it's always available.
625 */
626eret_to_el0:
627
628#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
629	/* Point to the vector into the reduced mapping */
630	adr	x0, thread_user_kcode_offset
631	ldr	x0, [x0]
632	mrs	x1, vbar_el1
633	sub	x1, x1, x0
634	msr	vbar_el1, x1
635	isb
636
637	/* Jump into the reduced mapping and continue execution */
638	ldr	x1, =1f
639	sub	x1, x1, x0
640	br	x1
6411:
642
643	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
644	msr	tpidr_el1, x0
645
646	/* Update the mapping to exclude the full kernel mapping */
647	mrs	x0, ttbr0_el1
648	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
649	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
650	msr	ttbr0_el1, x0
651	isb
652
653	mrs	x0, tpidr_el1
654#else
655	mrs	x0, ttbr0_el1
656	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
657	msr	ttbr0_el1, x0
658	isb
659	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
660#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
661
662	eret
663
664END_FUNC thread_excp_vect
665
666LOCAL_FUNC el0_svc , :
667	/* get pointer to current thread context in x0 */
668	get_thread_ctx sp, 0, 1, 2
669	/* load saved kernel sp */
670	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
671	/* Keep pointer to initial recod in x1 */
672	mov	x1, sp
673	/* Switch to SP_EL0 and restore kernel sp */
674	msr	spsel, #0
675	mov	x2, sp	/* Save SP_EL0 */
676	mov	sp, x0
677
678	/* Make room for struct thread_svc_regs */
679	sub	sp, sp, #THREAD_SVC_REG_SIZE
680	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
681
682	/* Restore x0-x3 */
683	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
684	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
685
686	/* Prepare the argument for the handler */
687	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
688	mrs	x0, elr_el1
689	mrs	x1, spsr_el1
690	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
691	mov	x0, sp
692
693	/*
694	 * Unmask native interrupts, Serror, and debug exceptions since we have
695	 * nothing left in sp_el1. Note that the SVC handler is excepted to
696	 * re-enable foreign interrupts by itself.
697	 */
698#if defined(CFG_ARM_GICV3)
699	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
700#else
701	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
702#endif
703
704	/* Call the handler */
705	bl	tee_svc_handler
706
707	/* Mask all maskable exceptions since we're switching back to sp_el1 */
708	msr	daifset, #DAIFBIT_ALL
709
710	/*
711	 * Save kernel sp we'll had at the beginning of this function.
712	 * This is when this TA has called another TA because
713	 * __thread_enter_user_mode() also saves the stack pointer in this
714	 * field.
715	 */
716	msr	spsel, #1
717	get_thread_ctx sp, 0, 1, 2
718	msr	spsel, #0
719	add	x1, sp, #THREAD_SVC_REG_SIZE
720	str	x1, [x0, #THREAD_CTX_KERN_SP]
721
722	/* Restore registers to the required state and return*/
723	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
724	msr	elr_el1, x0
725	msr	spsr_el1, x1
726	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
727	mov	x30, sp
728	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
729	mov	sp, x0
730	b_if_spsr_is_el0 w1, 1f
731	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
732	ldr	x30, [x30, #THREAD_SVC_REG_X30]
733
734	eret
735
7361:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
737	ldr	x30, [x30, #THREAD_SVC_REG_X30]
738
739	msr	spsel, #1
740	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
741	b	eret_to_el0
742END_FUNC el0_svc
743
744LOCAL_FUNC el1_sync_abort , :
745	mov	x0, sp
746	msr	spsel, #0
747	mov	x3, sp		/* Save original sp */
748
749	/*
750	 * Update core local flags.
751	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
752	 */
753	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
754	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
755	orr	w1, w1, #THREAD_CLF_ABORT
756	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
757			.Lsel_tmp_sp
758
759	/* Select abort stack */
760	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
761	b	.Lset_sp
762
763.Lsel_tmp_sp:
764	/* Select tmp stack */
765	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
766	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
767
768.Lset_sp:
769	mov	sp, x2
770	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
771
772	/*
773	 * Save state on stack
774	 */
775	sub	sp, sp, #THREAD_ABT_REGS_SIZE
776	mrs	x2, spsr_el1
777	/* Store spsr, sp_el0 */
778	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
779	/* Store original x0, x1 */
780	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
781	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
782	/* Store original x2, x3 and x4 to x29 */
783	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
784	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
785	/* Store x30, elr_el1 */
786	mrs	x0, elr_el1
787	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
788
789	/*
790	 * Call handler
791	 */
792	mov	x0, #0
793	mov	x1, sp
794	bl	abort_handler
795
796	/*
797	 * Restore state from stack
798	 */
799	/* Load x30, elr_el1 */
800	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
801	msr	elr_el1, x0
802	/* Load x0 to x29 */
803	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
804	/* Switch to SP_EL1 */
805	msr	spsel, #1
806	/* Save x0 to x3 in CORE_LOCAL */
807	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
808	/* Restore spsr_el1 and sp_el0 */
809	mrs	x3, sp_el0
810	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
811	msr	spsr_el1, x0
812	msr	sp_el0, x1
813
814	/* Update core local flags */
815	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
816	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
817	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
818
819	/* Restore x0 to x3 */
820	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
821
822	/* Return from exception */
823	eret
824END_FUNC el1_sync_abort
825
826	/* sp_el0 in x3 */
827LOCAL_FUNC el0_sync_abort , :
828	/*
829	 * Update core local flags
830	 */
831	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
832	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
833	orr	w1, w1, #THREAD_CLF_ABORT
834	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
835
836	/*
837	 * Save state on stack
838	 */
839
840	/* load abt_stack_va_end */
841	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
842	/* Keep pointer to initial record in x0 */
843	mov	x0, sp
844	/* Switch to SP_EL0 */
845	msr	spsel, #0
846	mov	sp, x1
847	sub	sp, sp, #THREAD_ABT_REGS_SIZE
848	mrs	x2, spsr_el1
849	/* Store spsr, sp_el0 */
850	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
851	/* Store original x0, x1 */
852	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
853	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
854	/* Store original x2, x3 and x4 to x29 */
855	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
856	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
857	/* Store x30, elr_el1 */
858	mrs	x0, elr_el1
859	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
860
861	/*
862	 * Call handler
863	 */
864	mov	x0, #0
865	mov	x1, sp
866	bl	abort_handler
867
868	/*
869	 * Restore state from stack
870	 */
871
872	/* Load x30, elr_el1 */
873	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
874	msr	elr_el1, x0
875	/* Load x0 to x29 */
876	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
877	/* Switch to SP_EL1 */
878	msr	spsel, #1
879	/* Save x0 to x3 in EL1_REC */
880	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
881	/* Restore spsr_el1 and sp_el0 */
882	mrs	x3, sp_el0
883	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
884	msr	spsr_el1, x0
885	msr	sp_el0, x1
886
887	/* Update core local flags */
888	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
889	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
890	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
891
892	/* Restore x2 to x3 */
893	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
894
895	b_if_spsr_is_el0 w0, 1f
896
897	/* Restore x0 to x1 */
898	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
899
900	/* Return from exception */
901	eret
9021:	b	eret_to_el0
903END_FUNC el0_sync_abort
904
905/* The handler of foreign interrupt. */
906.macro foreign_intr_handler mode:req
907	/*
908	 * Update core local flags
909	 */
910	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
911	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
912	orr	w1, w1, #THREAD_CLF_TMP
913	.ifc	\mode\(),fiq
914	orr	w1, w1, #THREAD_CLF_FIQ
915	.else
916	orr	w1, w1, #THREAD_CLF_IRQ
917	.endif
918	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
919
920	/* get pointer to current thread context in x0 */
921	get_thread_ctx sp, 0, 1, 2
922	/* Keep original SP_EL0 */
923	mrs	x2, sp_el0
924
925	/* Store original sp_el0 */
926	str	x2, [x0, #THREAD_CTX_REGS_SP]
927	/* store x4..x30 */
928	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
929	/* Load original x0..x3 into x10..x13 */
930	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
931	/* Save original x0..x3 */
932	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
933
934	/* load tmp_stack_va_end */
935	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
936	/* Switch to SP_EL0 */
937	msr	spsel, #0
938	mov	sp, x1
939
940	/*
941	 * Mark current thread as suspended
942	 */
943	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
944	mrs	x1, spsr_el1
945	mrs	x2, elr_el1
946	bl	thread_state_suspend
947	mov	w4, w0		/* Supply thread index */
948
949	/* Update core local flags */
950	/* Switch to SP_EL1 */
951	msr	spsel, #1
952	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
953	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
954	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
955	msr	spsel, #0
956
957	/*
958	 * Note that we're exiting with SP_EL0 selected since the entry
959	 * functions expects to have SP_EL0 selected with the tmp stack
960	 * set.
961	 */
962
963	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
964	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
965	mov	w2, #0
966	mov	w3, #0
967	/* w4 is already filled in above */
968	smc	#0
969	b	.	/* SMC should not return */
970.endm
971
972/*
973 * This struct is never used from C it's only here to visualize the
974 * layout.
975 *
976 * struct elx_nintr_rec {
977 * 	uint64_t x[19 - 4]; x4..x18
978 * 	uint64_t lr;
979 * 	uint64_t sp_el0;
980 * };
981 */
982#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
983#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
984#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
985#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
986
987/* The handler of native interrupt. */
988.macro native_intr_handler mode:req
989	/*
990	 * Update core local flags
991	 */
992	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
993	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
994	.ifc	\mode\(),fiq
995	orr	w1, w1, #THREAD_CLF_FIQ
996	.else
997	orr	w1, w1, #THREAD_CLF_IRQ
998	.endif
999	orr	w1, w1, #THREAD_CLF_TMP
1000	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1001
1002	/* load tmp_stack_va_end */
1003	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1004	/* Keep original SP_EL0 */
1005	mrs	x2, sp_el0
1006	/* Switch to SP_EL0 */
1007	msr	spsel, #0
1008	mov	sp, x1
1009
1010	/*
1011	 * Save registers on stack that can be corrupted by a call to
1012	 * a C function
1013	 */
1014	/* Make room for struct elx_nintr_rec */
1015	sub	sp, sp, #ELX_NINTR_REC_SIZE
1016	/* Store x4..x18 */
1017	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1018	/* Store lr and original sp_el0 */
1019	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1020
1021	bl	thread_check_canaries
1022	adr	x16, thread_nintr_handler_ptr
1023	ldr	x16, [x16]
1024	blr	x16
1025
1026	/*
1027	 * Restore registers
1028	 */
1029	/* Restore x4..x18 */
1030	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1031	/* Load  lr and original sp_el0 */
1032	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1033	/* Restore SP_El0 */
1034	mov	sp, x2
1035	/* Switch back to SP_EL1 */
1036	msr	spsel, #1
1037
1038	/* Update core local flags */
1039	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1040	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1041	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1042
1043	mrs	x0, spsr_el1
1044	/* Restore x2..x3 */
1045	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1046	b_if_spsr_is_el0 w0, 1f
1047
1048	/* Restore x0..x1 */
1049	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1050
1051	/* Return from exception */
1052	eret
10531:	b	eret_to_el0
1054.endm
1055
1056LOCAL_FUNC elx_irq , :
1057#if defined(CFG_ARM_GICV3)
1058	native_intr_handler	irq
1059#else
1060	foreign_intr_handler	irq
1061#endif
1062END_FUNC elx_irq
1063
1064LOCAL_FUNC elx_fiq , :
1065#if defined(CFG_ARM_GICV3)
1066	foreign_intr_handler	fiq
1067#else
1068	native_intr_handler	fiq
1069#endif
1070END_FUNC elx_fiq
1071