xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision f8031323eada2b7439020be316440f11af92ded1)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm-defines.h>
9#include <asm.S>
10#include <keep.h>
11#include <kernel/thread_defs.h>
12#include <mm/core_mmu.h>
13#include <sm/optee_smc.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16
17#include "thread_private.h"
18
19	.macro get_thread_ctx core_local, res, tmp0, tmp1
20		ldr	w\tmp0, [\core_local, \
21				#THREAD_CORE_LOCAL_CURR_THREAD]
22		adr	x\res, threads
23		mov	x\tmp1, #THREAD_CTX_SIZE
24		madd	x\res, x\tmp0, x\tmp1, x\res
25	.endm
26
27	.macro b_if_spsr_is_el0 reg, label
28		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
29		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
30		b.eq	\label
31	.endm
32
33LOCAL_FUNC vector_std_smc_entry , :
34	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
35	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
36	mov	x0, sp
37	bl	thread_handle_std_smc
38	/*
39	 * Normally thread_handle_std_smc() should return via
40	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
41	 * hasn't switched stack (error detected) it will do a normal "C"
42	 * return.
43	 */
44	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
45	add	sp, sp, #THREAD_SMC_ARGS_SIZE
46	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
47	smc	#0
48	b	.	/* SMC should not return */
49END_FUNC vector_std_smc_entry
50
51LOCAL_FUNC vector_fast_smc_entry , :
52	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
53	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
54	mov	x0, sp
55	bl	thread_handle_fast_smc
56	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
57	add	sp, sp, #THREAD_SMC_ARGS_SIZE
58	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
59	smc	#0
60	b	.	/* SMC should not return */
61END_FUNC vector_fast_smc_entry
62
63LOCAL_FUNC vector_fiq_entry , :
64	/* Secure Monitor received a FIQ and passed control to us. */
65	bl	thread_check_canaries
66	adr	x16, thread_nintr_handler_ptr
67	ldr	x16, [x16]
68	blr	x16
69	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
70	smc	#0
71	b	.	/* SMC should not return */
72END_FUNC vector_fiq_entry
73
74LOCAL_FUNC vector_cpu_on_entry , :
75	adr	x16, thread_cpu_on_handler_ptr
76	ldr	x16, [x16]
77	blr	x16
78	mov	x1, x0
79	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
80	smc	#0
81	b	.	/* SMC should not return */
82END_FUNC vector_cpu_on_entry
83
84LOCAL_FUNC vector_cpu_off_entry , :
85	adr	x16, thread_cpu_off_handler_ptr
86	ldr	x16, [x16]
87	blr	x16
88	mov	x1, x0
89	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
90	smc	#0
91	b	.	/* SMC should not return */
92END_FUNC vector_cpu_off_entry
93
94LOCAL_FUNC vector_cpu_suspend_entry , :
95	adr	x16, thread_cpu_suspend_handler_ptr
96	ldr	x16, [x16]
97	blr	x16
98	mov	x1, x0
99	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
100	smc	#0
101	b	.	/* SMC should not return */
102END_FUNC vector_cpu_suspend_entry
103
104LOCAL_FUNC vector_cpu_resume_entry , :
105	adr	x16, thread_cpu_resume_handler_ptr
106	ldr	x16, [x16]
107	blr	x16
108	mov	x1, x0
109	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
110	smc	#0
111	b	.	/* SMC should not return */
112END_FUNC vector_cpu_resume_entry
113
114LOCAL_FUNC vector_system_off_entry , :
115	adr	x16, thread_system_off_handler_ptr
116	ldr	x16, [x16]
117	blr	x16
118	mov	x1, x0
119	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
120	smc	#0
121	b	.	/* SMC should not return */
122END_FUNC vector_system_off_entry
123
124LOCAL_FUNC vector_system_reset_entry , :
125	adr	x16, thread_system_reset_handler_ptr
126	ldr	x16, [x16]
127	blr	x16
128	mov	x1, x0
129	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
130	smc	#0
131	b	.	/* SMC should not return */
132END_FUNC vector_system_reset_entry
133
134/*
135 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
136 * initialization.
137 *
138 * Note that ARM-TF depends on the layout of this vector table, any change
139 * in layout has to be synced with ARM-TF.
140 */
141FUNC thread_vector_table , :
142	b	vector_std_smc_entry
143	b	vector_fast_smc_entry
144	b	vector_cpu_on_entry
145	b	vector_cpu_off_entry
146	b	vector_cpu_resume_entry
147	b	vector_cpu_suspend_entry
148	b	vector_fiq_entry
149	b	vector_system_off_entry
150	b	vector_system_reset_entry
151END_FUNC thread_vector_table
152KEEP_PAGER thread_vector_table
153
154
155/* void thread_resume(struct thread_ctx_regs *regs) */
156FUNC thread_resume , :
157	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
158	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
159	mov	sp, x1
160	msr	elr_el1, x2
161	msr	spsr_el1, x3
162
163	b_if_spsr_is_el0 w3, 1f
164
165	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
166	ldr	x0, [x0, THREAD_CTX_REGS_X0]
167	eret
168
1691:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
170	ldr	x0, [x0, THREAD_CTX_REGS_X0]
171
172	msr	spsel, #1
173	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
174	b	eret_to_el0
175END_FUNC thread_resume
176
177FUNC thread_std_smc_entry , :
178	/* pass x0-x7 in a struct thread_smc_args */
179	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
180	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
181	mov	x0, sp
182
183	/* Call the registered handler */
184	bl	__thread_std_smc_entry
185
186	/*
187	 * Load the returned x0-x3 into preserved registers and skip the
188	 * "returned" x4-x7 since they will not be returned to normal
189	 * world.
190	 */
191	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
192	add	sp, sp, #THREAD_SMC_ARGS_SIZE
193
194	/* Mask all maskable exceptions before switching to temporary stack */
195	msr	daifset, #DAIFBIT_ALL
196	bl	thread_get_tmp_sp
197	mov	sp, x0
198
199	bl	thread_state_free
200
201	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
202	mov	x1, x20
203	mov	x2, x21
204	mov	x3, x22
205	mov	x4, x23
206	smc	#0
207	b	.	/* SMC should not return */
208END_FUNC thread_std_smc_entry
209
210/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
211FUNC thread_rpc , :
212	/* Read daif and create an SPSR */
213	mrs	x1, daif
214	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
215
216	/* Mask all maskable exceptions before switching to temporary stack */
217	msr	daifset, #DAIFBIT_ALL
218	push	x0, xzr
219	push	x1, x30
220	bl	thread_get_ctx_regs
221	ldr	x30, [sp, #8]
222	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
223	mov	x19, x0
224
225	bl	thread_get_tmp_sp
226	pop	x1, xzr		/* Match "push x1, x30" above */
227	mov	x2, sp
228	str	x2, [x19, #THREAD_CTX_REGS_SP]
229	ldr	x20, [sp]	/* Get pointer to rv[] */
230	mov	sp, x0		/* Switch to tmp stack */
231
232	adr	x2, .thread_rpc_return
233	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
234	bl	thread_state_suspend
235	mov	x4, x0		/* Supply thread index */
236	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
237	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
238	smc	#0
239	b	.		/* SMC should not return */
240
241.thread_rpc_return:
242	/*
243	 * At this point has the stack pointer been restored to the value
244	 * stored in THREAD_CTX above.
245	 *
246	 * Jumps here from thread_resume above when RPC has returned. The
247	 * IRQ and FIQ bits are restored to what they where when this
248	 * function was originally entered.
249	 */
250	pop	x16, xzr	/* Get pointer to rv[] */
251	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
252	ret
253END_FUNC thread_rpc
254KEEP_PAGER thread_rpc
255
256FUNC thread_init_vbar , :
257#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
258	/*
259	 * For unrecognized CPUs we fall back to the vector used for
260	 * unaffected CPUs.
261	 */
262	mrs	x1, midr_el1
263	ubfx	x2, x1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
264	cmp	x2, #MIDR_IMPLEMENTER_ARM
265	b.ne	1f
266
267	adr	x0, thread_excp_vect_workaround
268	ubfx	x2, x1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
269			#MIDR_PRIMARY_PART_NUM_WIDTH
270	cmp	x2, #CORTEX_A57_PART_NUM
271	b.eq	2f
272	cmp	x2, #CORTEX_A72_PART_NUM
273	b.eq	2f
274	cmp	x2, #CORTEX_A73_PART_NUM
275	b.eq	2f
276	cmp	x2, #CORTEX_A75_PART_NUM
277	b.eq	2f
278#endif
2791:	adr	x0, thread_excp_vect
2802:	msr	vbar_el1, x0
281	ret
282END_FUNC thread_init_vbar
283KEEP_PAGER thread_init_vbar
284
285/*
286 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
287 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
288 *               unsigned long user_func, unsigned long spsr,
289 *               uint32_t *exit_status0, uint32_t *exit_status1)
290 *
291 */
292FUNC __thread_enter_user_mode , :
293	ldr	x8, [sp]
294	/*
295	 * Create the and fill in the struct thread_user_mode_rec
296	 */
297	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
298	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
299	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
300
301	/*
302	 * Switch to SP_EL1
303	 * Disable exceptions
304	 * Save kern sp in x19
305	 */
306	msr	daifset, #DAIFBIT_ALL
307	mov	x19, sp
308	msr	spsel, #1
309
310	/*
311	 * Save the kernel stack pointer in the thread context
312	 */
313	/* get pointer to current thread context */
314	get_thread_ctx sp, 21, 20, 22
315	/*
316	 * Save kernel stack pointer to ensure that el0_svc() uses
317	 * correct stack pointer
318	 */
319	str	x19, [x21, #THREAD_CTX_KERN_SP]
320
321	/*
322	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
323	 */
324	msr	spsr_el1, x6
325	/* Set user sp */
326	mov	x13, x4		/* Used when running TA in Aarch32 */
327	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
328	/* Set user function */
329	msr	elr_el1, x5
330	/* Set frame pointer (user stack can't be unwound past this point) */
331	mov x29, #0
332
333	/* Jump into user mode */
334	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
335	b eret_to_el0
336END_FUNC __thread_enter_user_mode
337
338/*
339 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
340 * 		uint32_t exit_status1);
341 * See description in thread.h
342 */
343FUNC thread_unwind_user_mode , :
344	/* Store the exit status */
345	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
346	str	w1, [x3]
347	str	w2, [x4]
348	/* Restore x19..x30 */
349	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
350	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
351	/* Return from the call of thread_enter_user_mode() */
352	ret
353END_FUNC thread_unwind_user_mode
354
355	/*
356	 * This macro verifies that the a given vector doesn't exceed the
357	 * architectural limit of 32 instructions. This is meant to be placed
358	 * immedately after the last instruction in the vector. It takes the
359	 * vector entry as the parameter
360	 */
361	.macro check_vector_size since
362	  .if (. - \since) > (32 * 4)
363	    .error "Vector exceeds 32 instructions"
364	  .endif
365	.endm
366
367	.macro restore_mapping
368#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
369		/* Temporarily save x0, x1 */
370		msr	tpidr_el1, x0
371		msr	tpidrro_el0, x1
372
373		/* Update the mapping to use the full kernel mapping */
374		mrs	x0, ttbr0_el1
375		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
376		/* switch to kernel mode ASID */
377		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
378		msr	ttbr0_el1, x0
379		isb
380
381		/* Jump into the full mapping and continue execution */
382		ldr	x0, =1f
383		br	x0
384	1:
385
386		/* Point to the vector into the full mapping */
387		adr	x0, thread_user_kcode_offset
388		ldr	x0, [x0]
389		mrs	x1, vbar_el1
390		add	x1, x1, x0
391		msr	vbar_el1, x1
392		isb
393
394		/* Restore x0, x1 */
395		mrs	x0, tpidr_el1
396		mrs	x1, tpidrro_el0
397		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
398#else
399		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
400		mrs	x0, ttbr0_el1
401		/* switch to kernel mode ASID */
402		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
403		msr	ttbr0_el1, x0
404		isb
405#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
406	.endm
407
408#define INV_INSN	0
409	.section .text.thread_excp_vect
410	.align	11, INV_INSN
411FUNC thread_excp_vect , :
412	/* -----------------------------------------------------
413	 * EL1 with SP0 : 0x0 - 0x180
414	 * -----------------------------------------------------
415	 */
416	.align	7, INV_INSN
417el1_sync_sp0:
418	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
419	b	el1_sync_abort
420	check_vector_size el1_sync_sp0
421
422	.align	7, INV_INSN
423el1_irq_sp0:
424	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
425	b	elx_irq
426	check_vector_size el1_irq_sp0
427
428	.align	7, INV_INSN
429el1_fiq_sp0:
430	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
431	b	elx_fiq
432	check_vector_size el1_fiq_sp0
433
434	.align	7, INV_INSN
435el1_serror_sp0:
436	b	el1_serror_sp0
437	check_vector_size el1_serror_sp0
438
439	/* -----------------------------------------------------
440	 * Current EL with SP1: 0x200 - 0x380
441	 * -----------------------------------------------------
442	 */
443	.align	7, INV_INSN
444el1_sync_sp1:
445	b	el1_sync_sp1
446	check_vector_size el1_sync_sp1
447
448	.align	7, INV_INSN
449el1_irq_sp1:
450	b	el1_irq_sp1
451	check_vector_size el1_irq_sp1
452
453	.align	7, INV_INSN
454el1_fiq_sp1:
455	b	el1_fiq_sp1
456	check_vector_size el1_fiq_sp1
457
458	.align	7, INV_INSN
459el1_serror_sp1:
460	b	el1_serror_sp1
461	check_vector_size el1_serror_sp1
462
463	/* -----------------------------------------------------
464	 * Lower EL using AArch64 : 0x400 - 0x580
465	 * -----------------------------------------------------
466	 */
467	.align	7, INV_INSN
468el0_sync_a64:
469	restore_mapping
470
471	mrs	x2, esr_el1
472	mrs	x3, sp_el0
473	lsr	x2, x2, #ESR_EC_SHIFT
474	cmp	x2, #ESR_EC_AARCH64_SVC
475	b.eq	el0_svc
476	b	el0_sync_abort
477	check_vector_size el0_sync_a64
478
479	.align	7, INV_INSN
480el0_irq_a64:
481	restore_mapping
482
483	b	elx_irq
484	check_vector_size el0_irq_a64
485
486	.align	7, INV_INSN
487el0_fiq_a64:
488	restore_mapping
489
490	b	elx_fiq
491	check_vector_size el0_fiq_a64
492
493	.align	7, INV_INSN
494el0_serror_a64:
495	b   	el0_serror_a64
496	check_vector_size el0_serror_a64
497
498	/* -----------------------------------------------------
499	 * Lower EL using AArch32 : 0x0 - 0x180
500	 * -----------------------------------------------------
501	 */
502	.align	7, INV_INSN
503el0_sync_a32:
504	restore_mapping
505
506	mrs	x2, esr_el1
507	mrs	x3, sp_el0
508	lsr	x2, x2, #ESR_EC_SHIFT
509	cmp	x2, #ESR_EC_AARCH32_SVC
510	b.eq	el0_svc
511	b	el0_sync_abort
512	check_vector_size el0_sync_a32
513
514	.align	7, INV_INSN
515el0_irq_a32:
516	restore_mapping
517
518	b	elx_irq
519	check_vector_size el0_irq_a32
520
521	.align	7, INV_INSN
522el0_fiq_a32:
523	restore_mapping
524
525	b	elx_fiq
526	check_vector_size el0_fiq_a32
527
528	.align	7, INV_INSN
529el0_serror_a32:
530	b	el0_serror_a32
531	check_vector_size el0_serror_a32
532
533#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
534	.macro invalidate_branch_predictor
535	ic	iallu
536	isb
537	.endm
538
539	.align	11, INV_INSN
540	.global thread_excp_vect_workaround
541thread_excp_vect_workaround:
542	/* -----------------------------------------------------
543	 * EL1 with SP0 : 0x0 - 0x180
544	 * -----------------------------------------------------
545	 */
546	.align	7, INV_INSN
547workaround_el1_sync_sp0:
548	b	el1_sync_sp0
549	check_vector_size workaround_el1_sync_sp0
550
551	.align	7, INV_INSN
552workaround_el1_irq_sp0:
553	b	el1_irq_sp0
554	check_vector_size workaround_el1_irq_sp0
555
556	.align	7, INV_INSN
557workaround_el1_fiq_sp0:
558	b	el1_fiq_sp0
559	check_vector_size workaround_el1_fiq_sp0
560
561	.align	7, INV_INSN
562workaround_el1_serror_sp0:
563	b	el1_serror_sp0
564	check_vector_size workaround_el1_serror_sp0
565
566	/* -----------------------------------------------------
567	 * Current EL with SP1: 0x200 - 0x380
568	 * -----------------------------------------------------
569	 */
570	.align	7, INV_INSN
571workaround_el1_sync_sp1:
572	b	workaround_el1_sync_sp1
573	check_vector_size workaround_el1_sync_sp1
574
575	.align	7, INV_INSN
576workaround_el1_irq_sp1:
577	b	workaround_el1_irq_sp1
578	check_vector_size workaround_el1_irq_sp1
579
580	.align	7, INV_INSN
581workaround_el1_fiq_sp1:
582	b	workaround_el1_fiq_sp1
583	check_vector_size workaround_el1_fiq_sp1
584
585	.align	7, INV_INSN
586workaround_el1_serror_sp1:
587	b	workaround_el1_serror_sp1
588	check_vector_size workaround_el1_serror_sp1
589
590	/* -----------------------------------------------------
591	 * Lower EL using AArch64 : 0x400 - 0x580
592	 * -----------------------------------------------------
593	 */
594	.align	7, INV_INSN
595workaround_el0_sync_a64:
596	invalidate_branch_predictor
597	b	el0_sync_a64
598	check_vector_size workaround_el0_sync_a64
599
600	.align	7, INV_INSN
601workaround_el0_irq_a64:
602	invalidate_branch_predictor
603	b	el0_irq_a64
604	check_vector_size workaround_el0_irq_a64
605
606	.align	7, INV_INSN
607workaround_el0_fiq_a64:
608	invalidate_branch_predictor
609	b	el0_fiq_a64
610	check_vector_size workaround_el0_fiq_a64
611
612	.align	7, INV_INSN
613workaround_el0_serror_a64:
614	b   	workaround_el0_serror_a64
615	check_vector_size workaround_el0_serror_a64
616
617	/* -----------------------------------------------------
618	 * Lower EL using AArch32 : 0x0 - 0x180
619	 * -----------------------------------------------------
620	 */
621	.align	7, INV_INSN
622workaround_el0_sync_a32:
623	invalidate_branch_predictor
624	b	el0_sync_a32
625	check_vector_size workaround_el0_sync_a32
626
627	.align	7, INV_INSN
628workaround_el0_irq_a32:
629	invalidate_branch_predictor
630	b	el0_irq_a32
631	check_vector_size workaround_el0_irq_a32
632
633	.align	7, INV_INSN
634workaround_el0_fiq_a32:
635	invalidate_branch_predictor
636	b	el0_fiq_a32
637	check_vector_size workaround_el0_fiq_a32
638
639	.align	7, INV_INSN
640workaround_el0_serror_a32:
641	b	workaround_el0_serror_a32
642	check_vector_size workaround_el0_serror_a32
643#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
644
645/*
646 * We're keeping this code in the same section as the vector to make sure
647 * that it's always available.
648 */
649eret_to_el0:
650
651#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
652	/* Point to the vector into the reduced mapping */
653	adr	x0, thread_user_kcode_offset
654	ldr	x0, [x0]
655	mrs	x1, vbar_el1
656	sub	x1, x1, x0
657	msr	vbar_el1, x1
658	isb
659
660	/* Jump into the reduced mapping and continue execution */
661	ldr	x1, =1f
662	sub	x1, x1, x0
663	br	x1
6641:
665
666	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
667	msr	tpidr_el1, x0
668
669	/* Update the mapping to exclude the full kernel mapping */
670	mrs	x0, ttbr0_el1
671	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
672	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
673	msr	ttbr0_el1, x0
674	isb
675
676	mrs	x0, tpidr_el1
677#else
678	mrs	x0, ttbr0_el1
679	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
680	msr	ttbr0_el1, x0
681	isb
682	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
683#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
684
685	eret
686
687END_FUNC thread_excp_vect
688
689LOCAL_FUNC el0_svc , :
690	/* get pointer to current thread context in x0 */
691	get_thread_ctx sp, 0, 1, 2
692	/* load saved kernel sp */
693	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
694	/* Keep pointer to initial recod in x1 */
695	mov	x1, sp
696	/* Switch to SP_EL0 and restore kernel sp */
697	msr	spsel, #0
698	mov	x2, sp	/* Save SP_EL0 */
699	mov	sp, x0
700
701	/* Make room for struct thread_svc_regs */
702	sub	sp, sp, #THREAD_SVC_REG_SIZE
703	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
704
705	/* Restore x0-x3 */
706	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
707	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
708
709	/* Prepare the argument for the handler */
710	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
711	mrs	x0, elr_el1
712	mrs	x1, spsr_el1
713	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
714	mov	x0, sp
715
716	/*
717	 * Unmask native interrupts, Serror, and debug exceptions since we have
718	 * nothing left in sp_el1. Note that the SVC handler is excepted to
719	 * re-enable foreign interrupts by itself.
720	 */
721#if defined(CFG_ARM_GICV3)
722	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
723#else
724	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
725#endif
726
727	/* Call the handler */
728	bl	tee_svc_handler
729
730	/* Mask all maskable exceptions since we're switching back to sp_el1 */
731	msr	daifset, #DAIFBIT_ALL
732
733	/*
734	 * Save kernel sp we'll had at the beginning of this function.
735	 * This is when this TA has called another TA because
736	 * __thread_enter_user_mode() also saves the stack pointer in this
737	 * field.
738	 */
739	msr	spsel, #1
740	get_thread_ctx sp, 0, 1, 2
741	msr	spsel, #0
742	add	x1, sp, #THREAD_SVC_REG_SIZE
743	str	x1, [x0, #THREAD_CTX_KERN_SP]
744
745	/* Restore registers to the required state and return*/
746	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
747	msr	elr_el1, x0
748	msr	spsr_el1, x1
749	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
750	mov	x30, sp
751	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
752	mov	sp, x0
753	b_if_spsr_is_el0 w1, 1f
754	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
755	ldr	x30, [x30, #THREAD_SVC_REG_X30]
756
757	eret
758
7591:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
760	ldr	x30, [x30, #THREAD_SVC_REG_X30]
761
762	msr	spsel, #1
763	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
764	b	eret_to_el0
765END_FUNC el0_svc
766
767LOCAL_FUNC el1_sync_abort , :
768	mov	x0, sp
769	msr	spsel, #0
770	mov	x3, sp		/* Save original sp */
771
772	/*
773	 * Update core local flags.
774	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
775	 */
776	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
777	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
778	orr	w1, w1, #THREAD_CLF_ABORT
779	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
780			.Lsel_tmp_sp
781
782	/* Select abort stack */
783	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
784	b	.Lset_sp
785
786.Lsel_tmp_sp:
787	/* Select tmp stack */
788	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
789	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
790
791.Lset_sp:
792	mov	sp, x2
793	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
794
795	/*
796	 * Save state on stack
797	 */
798	sub	sp, sp, #THREAD_ABT_REGS_SIZE
799	mrs	x2, spsr_el1
800	/* Store spsr, sp_el0 */
801	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
802	/* Store original x0, x1 */
803	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
804	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
805	/* Store original x2, x3 and x4 to x29 */
806	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
807	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
808	/* Store x30, elr_el1 */
809	mrs	x0, elr_el1
810	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
811
812	/*
813	 * Call handler
814	 */
815	mov	x0, #0
816	mov	x1, sp
817	bl	abort_handler
818
819	/*
820	 * Restore state from stack
821	 */
822	/* Load x30, elr_el1 */
823	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
824	msr	elr_el1, x0
825	/* Load x0 to x29 */
826	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
827	/* Switch to SP_EL1 */
828	msr	spsel, #1
829	/* Save x0 to x3 in CORE_LOCAL */
830	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
831	/* Restore spsr_el1 and sp_el0 */
832	mrs	x3, sp_el0
833	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
834	msr	spsr_el1, x0
835	msr	sp_el0, x1
836
837	/* Update core local flags */
838	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
839	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
840	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
841
842	/* Restore x0 to x3 */
843	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
844
845	/* Return from exception */
846	eret
847END_FUNC el1_sync_abort
848
849	/* sp_el0 in x3 */
850LOCAL_FUNC el0_sync_abort , :
851	/*
852	 * Update core local flags
853	 */
854	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
855	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
856	orr	w1, w1, #THREAD_CLF_ABORT
857	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
858
859	/*
860	 * Save state on stack
861	 */
862
863	/* load abt_stack_va_end */
864	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
865	/* Keep pointer to initial record in x0 */
866	mov	x0, sp
867	/* Switch to SP_EL0 */
868	msr	spsel, #0
869	mov	sp, x1
870	sub	sp, sp, #THREAD_ABT_REGS_SIZE
871	mrs	x2, spsr_el1
872	/* Store spsr, sp_el0 */
873	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
874	/* Store original x0, x1 */
875	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
876	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
877	/* Store original x2, x3 and x4 to x29 */
878	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
879	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
880	/* Store x30, elr_el1 */
881	mrs	x0, elr_el1
882	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
883
884	/*
885	 * Call handler
886	 */
887	mov	x0, #0
888	mov	x1, sp
889	bl	abort_handler
890
891	/*
892	 * Restore state from stack
893	 */
894
895	/* Load x30, elr_el1 */
896	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
897	msr	elr_el1, x0
898	/* Load x0 to x29 */
899	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
900	/* Switch to SP_EL1 */
901	msr	spsel, #1
902	/* Save x0 to x3 in EL1_REC */
903	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
904	/* Restore spsr_el1 and sp_el0 */
905	mrs	x3, sp_el0
906	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
907	msr	spsr_el1, x0
908	msr	sp_el0, x1
909
910	/* Update core local flags */
911	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
912	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
913	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
914
915	/* Restore x2 to x3 */
916	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
917
918	b_if_spsr_is_el0 w0, 1f
919
920	/* Restore x0 to x1 */
921	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
922
923	/* Return from exception */
924	eret
9251:	b	eret_to_el0
926END_FUNC el0_sync_abort
927
928/* The handler of foreign interrupt. */
929.macro foreign_intr_handler mode:req
930	/*
931	 * Update core local flags
932	 */
933	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
934	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
935	orr	w1, w1, #THREAD_CLF_TMP
936	.ifc	\mode\(),fiq
937	orr	w1, w1, #THREAD_CLF_FIQ
938	.else
939	orr	w1, w1, #THREAD_CLF_IRQ
940	.endif
941	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
942
943	/* get pointer to current thread context in x0 */
944	get_thread_ctx sp, 0, 1, 2
945	/* Keep original SP_EL0 */
946	mrs	x2, sp_el0
947
948	/* Store original sp_el0 */
949	str	x2, [x0, #THREAD_CTX_REGS_SP]
950	/* store x4..x30 */
951	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
952	/* Load original x0..x3 into x10..x13 */
953	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
954	/* Save original x0..x3 */
955	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
956
957	/* load tmp_stack_va_end */
958	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
959	/* Switch to SP_EL0 */
960	msr	spsel, #0
961	mov	sp, x1
962
963	/*
964	 * Mark current thread as suspended
965	 */
966	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
967	mrs	x1, spsr_el1
968	mrs	x2, elr_el1
969	bl	thread_state_suspend
970	mov	w4, w0		/* Supply thread index */
971
972	/* Update core local flags */
973	/* Switch to SP_EL1 */
974	msr	spsel, #1
975	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
976	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
977	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
978	msr	spsel, #0
979
980	/*
981	 * Note that we're exiting with SP_EL0 selected since the entry
982	 * functions expects to have SP_EL0 selected with the tmp stack
983	 * set.
984	 */
985
986	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
987	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
988	mov	w2, #0
989	mov	w3, #0
990	/* w4 is already filled in above */
991	smc	#0
992	b	.	/* SMC should not return */
993.endm
994
995/*
996 * This struct is never used from C it's only here to visualize the
997 * layout.
998 *
999 * struct elx_nintr_rec {
1000 * 	uint64_t x[19 - 4]; x4..x18
1001 * 	uint64_t lr;
1002 * 	uint64_t sp_el0;
1003 * };
1004 */
1005#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1006#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1007#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1008#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1009
1010/* The handler of native interrupt. */
1011.macro native_intr_handler mode:req
1012	/*
1013	 * Update core local flags
1014	 */
1015	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1016	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1017	.ifc	\mode\(),fiq
1018	orr	w1, w1, #THREAD_CLF_FIQ
1019	.else
1020	orr	w1, w1, #THREAD_CLF_IRQ
1021	.endif
1022	orr	w1, w1, #THREAD_CLF_TMP
1023	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1024
1025	/* load tmp_stack_va_end */
1026	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1027	/* Keep original SP_EL0 */
1028	mrs	x2, sp_el0
1029	/* Switch to SP_EL0 */
1030	msr	spsel, #0
1031	mov	sp, x1
1032
1033	/*
1034	 * Save registers on stack that can be corrupted by a call to
1035	 * a C function
1036	 */
1037	/* Make room for struct elx_nintr_rec */
1038	sub	sp, sp, #ELX_NINTR_REC_SIZE
1039	/* Store x4..x18 */
1040	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1041	/* Store lr and original sp_el0 */
1042	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1043
1044	bl	thread_check_canaries
1045	adr	x16, thread_nintr_handler_ptr
1046	ldr	x16, [x16]
1047	blr	x16
1048
1049	/*
1050	 * Restore registers
1051	 */
1052	/* Restore x4..x18 */
1053	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1054	/* Load  lr and original sp_el0 */
1055	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1056	/* Restore SP_El0 */
1057	mov	sp, x2
1058	/* Switch back to SP_EL1 */
1059	msr	spsel, #1
1060
1061	/* Update core local flags */
1062	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1063	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1064	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1065
1066	mrs	x0, spsr_el1
1067	/* Restore x2..x3 */
1068	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1069	b_if_spsr_is_el0 w0, 1f
1070
1071	/* Restore x0..x1 */
1072	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1073
1074	/* Return from exception */
1075	eret
10761:	b	eret_to_el0
1077.endm
1078
1079LOCAL_FUNC elx_irq , :
1080#if defined(CFG_ARM_GICV3)
1081	native_intr_handler	irq
1082#else
1083	foreign_intr_handler	irq
1084#endif
1085END_FUNC elx_irq
1086
1087LOCAL_FUNC elx_fiq , :
1088#if defined(CFG_ARM_GICV3)
1089	foreign_intr_handler	fiq
1090#else
1091	native_intr_handler	fiq
1092#endif
1093END_FUNC elx_fiq
1094