xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 657d02f24a61b30fb09abf9ab97fe553dd7fd418)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm64_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/thread_defs.h>
12#include <mm/core_mmu.h>
13#include <sm/optee_smc.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16
17#include "thread_private.h"
18
19	.macro get_thread_ctx core_local, res, tmp0, tmp1
20		ldr	w\tmp0, [\core_local, \
21				#THREAD_CORE_LOCAL_CURR_THREAD]
22		adr	x\res, threads
23		mov	x\tmp1, #THREAD_CTX_SIZE
24		madd	x\res, x\tmp0, x\tmp1, x\res
25	.endm
26
27	.macro b_if_spsr_is_el0 reg, label
28		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
29		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
30		b.eq	\label
31	.endm
32
33LOCAL_FUNC vector_std_smc_entry , :
34	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
35	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
36	mov	x0, sp
37	bl	thread_handle_std_smc
38	/*
39	 * Normally thread_handle_std_smc() should return via
40	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
41	 * hasn't switched stack (error detected) it will do a normal "C"
42	 * return.
43	 */
44	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
45	add	sp, sp, #THREAD_SMC_ARGS_SIZE
46	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
47	smc	#0
48	b	.	/* SMC should not return */
49END_FUNC vector_std_smc_entry
50
51LOCAL_FUNC vector_fast_smc_entry , :
52	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
53	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
54	mov	x0, sp
55	bl	thread_handle_fast_smc
56	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
57	add	sp, sp, #THREAD_SMC_ARGS_SIZE
58	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
59	smc	#0
60	b	.	/* SMC should not return */
61END_FUNC vector_fast_smc_entry
62
63LOCAL_FUNC vector_fiq_entry , :
64	/* Secure Monitor received a FIQ and passed control to us. */
65	bl	thread_check_canaries
66	adr	x16, thread_nintr_handler_ptr
67	ldr	x16, [x16]
68	blr	x16
69	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
70	smc	#0
71	b	.	/* SMC should not return */
72END_FUNC vector_fiq_entry
73
74LOCAL_FUNC vector_cpu_on_entry , :
75	adr	x16, thread_cpu_on_handler_ptr
76	ldr	x16, [x16]
77	blr	x16
78	mov	x1, x0
79	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
80	smc	#0
81	b	.	/* SMC should not return */
82END_FUNC vector_cpu_on_entry
83
84LOCAL_FUNC vector_cpu_off_entry , :
85	adr	x16, thread_cpu_off_handler_ptr
86	ldr	x16, [x16]
87	blr	x16
88	mov	x1, x0
89	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
90	smc	#0
91	b	.	/* SMC should not return */
92END_FUNC vector_cpu_off_entry
93
94LOCAL_FUNC vector_cpu_suspend_entry , :
95	adr	x16, thread_cpu_suspend_handler_ptr
96	ldr	x16, [x16]
97	blr	x16
98	mov	x1, x0
99	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
100	smc	#0
101	b	.	/* SMC should not return */
102END_FUNC vector_cpu_suspend_entry
103
104LOCAL_FUNC vector_cpu_resume_entry , :
105	adr	x16, thread_cpu_resume_handler_ptr
106	ldr	x16, [x16]
107	blr	x16
108	mov	x1, x0
109	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
110	smc	#0
111	b	.	/* SMC should not return */
112END_FUNC vector_cpu_resume_entry
113
114LOCAL_FUNC vector_system_off_entry , :
115	adr	x16, thread_system_off_handler_ptr
116	ldr	x16, [x16]
117	blr	x16
118	mov	x1, x0
119	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
120	smc	#0
121	b	.	/* SMC should not return */
122END_FUNC vector_system_off_entry
123
124LOCAL_FUNC vector_system_reset_entry , :
125	adr	x16, thread_system_reset_handler_ptr
126	ldr	x16, [x16]
127	blr	x16
128	mov	x1, x0
129	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
130	smc	#0
131	b	.	/* SMC should not return */
132END_FUNC vector_system_reset_entry
133
134/*
135 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
136 * initialization.
137 *
138 * Note that ARM-TF depends on the layout of this vector table, any change
139 * in layout has to be synced with ARM-TF.
140 */
141FUNC thread_vector_table , :
142	b	vector_std_smc_entry
143	b	vector_fast_smc_entry
144	b	vector_cpu_on_entry
145	b	vector_cpu_off_entry
146	b	vector_cpu_resume_entry
147	b	vector_cpu_suspend_entry
148	b	vector_fiq_entry
149	b	vector_system_off_entry
150	b	vector_system_reset_entry
151END_FUNC thread_vector_table
152KEEP_PAGER thread_vector_table
153
154
155/* void thread_resume(struct thread_ctx_regs *regs) */
156FUNC thread_resume , :
157	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
158	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
159	mov	sp, x1
160	msr	elr_el1, x2
161	msr	spsr_el1, x3
162
163	b_if_spsr_is_el0 w3, 1f
164
165	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
166	ldr	x0, [x0, THREAD_CTX_REGS_X0]
167	eret
168
1691:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
170	ldr	x0, [x0, THREAD_CTX_REGS_X0]
171
172	msr	spsel, #1
173	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
174	b	eret_to_el0
175END_FUNC thread_resume
176
177FUNC thread_std_smc_entry , :
178	/* pass x0-x7 in a struct thread_smc_args */
179	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
180	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
181	mov	x0, sp
182
183	/* Call the registered handler */
184	bl	__thread_std_smc_entry
185
186	/*
187	 * Load the returned x0-x3 into preserved registers and skip the
188	 * "returned" x4-x7 since they will not be returned to normal
189	 * world.
190	 */
191	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
192	add	sp, sp, #THREAD_SMC_ARGS_SIZE
193
194	/* Mask all maskable exceptions before switching to temporary stack */
195	msr	daifset, #DAIFBIT_ALL
196	bl	thread_get_tmp_sp
197	mov	sp, x0
198
199	bl	thread_state_free
200
201	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
202	mov	x1, x20
203	mov	x2, x21
204	mov	x3, x22
205	mov	x4, x23
206	smc	#0
207	b	.	/* SMC should not return */
208END_FUNC thread_std_smc_entry
209
210/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
211FUNC thread_rpc , :
212	/* Read daif and create an SPSR */
213	mrs	x1, daif
214	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
215
216	/* Mask all maskable exceptions before switching to temporary stack */
217	msr	daifset, #DAIFBIT_ALL
218	push	x0, xzr
219	push	x1, x30
220	bl	thread_get_ctx_regs
221	ldr	x30, [sp, #8]
222	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
223	mov	x19, x0
224
225	bl	thread_get_tmp_sp
226	pop	x1, xzr		/* Match "push x1, x30" above */
227	mov	x2, sp
228	str	x2, [x19, #THREAD_CTX_REGS_SP]
229	ldr	x20, [sp]	/* Get pointer to rv[] */
230	mov	sp, x0		/* Switch to tmp stack */
231
232	adr	x2, .thread_rpc_return
233	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
234	bl	thread_state_suspend
235	mov	x4, x0		/* Supply thread index */
236	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
237	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
238	smc	#0
239	b	.		/* SMC should not return */
240
241.thread_rpc_return:
242	/*
243	 * At this point has the stack pointer been restored to the value
244	 * stored in THREAD_CTX above.
245	 *
246	 * Jumps here from thread_resume above when RPC has returned. The
247	 * IRQ and FIQ bits are restored to what they where when this
248	 * function was originally entered.
249	 */
250	pop	x16, xzr	/* Get pointer to rv[] */
251	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
252	ret
253END_FUNC thread_rpc
254KEEP_PAGER thread_rpc
255
256FUNC thread_smc , :
257	smc	#0
258	ret
259END_FUNC thread_smc
260
261FUNC thread_init_vbar , :
262	msr	vbar_el1, x0
263	ret
264END_FUNC thread_init_vbar
265KEEP_PAGER thread_init_vbar
266
267/*
268 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
269 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
270 *               unsigned long user_func, unsigned long spsr,
271 *               uint32_t *exit_status0, uint32_t *exit_status1)
272 *
273 */
274FUNC __thread_enter_user_mode , :
275	ldr	x8, [sp]
276	/*
277	 * Create the and fill in the struct thread_user_mode_rec
278	 */
279	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
280	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
281	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
282
283	/*
284	 * Switch to SP_EL1
285	 * Disable exceptions
286	 * Save kern sp in x19
287	 */
288	msr	daifset, #DAIFBIT_ALL
289	mov	x19, sp
290	msr	spsel, #1
291
292	/*
293	 * Save the kernel stack pointer in the thread context
294	 */
295	/* get pointer to current thread context */
296	get_thread_ctx sp, 21, 20, 22
297	/*
298	 * Save kernel stack pointer to ensure that el0_svc() uses
299	 * correct stack pointer
300	 */
301	str	x19, [x21, #THREAD_CTX_KERN_SP]
302
303	/*
304	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
305	 */
306	msr	spsr_el1, x6
307	/* Set user sp */
308	mov	x13, x4		/* Used when running TA in Aarch32 */
309	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
310	/* Set user function */
311	msr	elr_el1, x5
312	/* Set frame pointer (user stack can't be unwound past this point) */
313	mov x29, #0
314
315	/* Jump into user mode */
316	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
317	b eret_to_el0
318END_FUNC __thread_enter_user_mode
319
320/*
321 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
322 * 		uint32_t exit_status1);
323 * See description in thread.h
324 */
325FUNC thread_unwind_user_mode , :
326	/* Store the exit status */
327	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
328	str	w1, [x3]
329	str	w2, [x4]
330	/* Restore x19..x30 */
331	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
332	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
333	/* Return from the call of thread_enter_user_mode() */
334	ret
335END_FUNC thread_unwind_user_mode
336
337	/*
338	 * This macro verifies that the a given vector doesn't exceed the
339	 * architectural limit of 32 instructions. This is meant to be placed
340	 * immedately after the last instruction in the vector. It takes the
341	 * vector entry as the parameter
342	 */
343	.macro check_vector_size since
344	  .if (. - \since) > (32 * 4)
345	    .error "Vector exceeds 32 instructions"
346	  .endif
347	.endm
348
349	.macro restore_mapping
350#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
351		/* Temporarily save x0, x1 */
352		msr	tpidr_el1, x0
353		msr	tpidrro_el0, x1
354
355		/* Update the mapping to use the full kernel mapping */
356		mrs	x0, ttbr0_el1
357		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
358		/* switch to kernel mode ASID */
359		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
360		msr	ttbr0_el1, x0
361		isb
362
363		/* Jump into the full mapping and continue execution */
364		ldr	x0, =1f
365		br	x0
366	1:
367
368		/* Point to the vector into the full mapping */
369		adr	x0, thread_user_kcode_offset
370		ldr	x0, [x0]
371		mrs	x1, vbar_el1
372		add	x1, x1, x0
373		msr	vbar_el1, x1
374		isb
375
376#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
377		/*
378		 * Update the SP with thread_user_kdata_sp_offset as
379		 * described in init_user_kcode().
380		 */
381		adr	x0, thread_user_kdata_sp_offset
382		ldr	x0, [x0]
383		add	sp, sp, x0
384#endif
385
386		/* Restore x0, x1 */
387		mrs	x0, tpidr_el1
388		mrs	x1, tpidrro_el0
389		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
390#else
391		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
392		mrs	x0, ttbr0_el1
393		/* switch to kernel mode ASID */
394		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
395		msr	ttbr0_el1, x0
396		isb
397#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
398	.endm
399
400#define INV_INSN	0
401	.section .text.thread_excp_vect
402	.align	11, INV_INSN
403FUNC thread_excp_vect , :
404	/* -----------------------------------------------------
405	 * EL1 with SP0 : 0x0 - 0x180
406	 * -----------------------------------------------------
407	 */
408	.align	7, INV_INSN
409el1_sync_sp0:
410	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
411	b	el1_sync_abort
412	check_vector_size el1_sync_sp0
413
414	.align	7, INV_INSN
415el1_irq_sp0:
416	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
417	b	elx_irq
418	check_vector_size el1_irq_sp0
419
420	.align	7, INV_INSN
421el1_fiq_sp0:
422	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
423	b	elx_fiq
424	check_vector_size el1_fiq_sp0
425
426	.align	7, INV_INSN
427el1_serror_sp0:
428	b	el1_serror_sp0
429	check_vector_size el1_serror_sp0
430
431	/* -----------------------------------------------------
432	 * Current EL with SP1: 0x200 - 0x380
433	 * -----------------------------------------------------
434	 */
435	.align	7, INV_INSN
436el1_sync_sp1:
437	b	el1_sync_sp1
438	check_vector_size el1_sync_sp1
439
440	.align	7, INV_INSN
441el1_irq_sp1:
442	b	el1_irq_sp1
443	check_vector_size el1_irq_sp1
444
445	.align	7, INV_INSN
446el1_fiq_sp1:
447	b	el1_fiq_sp1
448	check_vector_size el1_fiq_sp1
449
450	.align	7, INV_INSN
451el1_serror_sp1:
452	b	el1_serror_sp1
453	check_vector_size el1_serror_sp1
454
455	/* -----------------------------------------------------
456	 * Lower EL using AArch64 : 0x400 - 0x580
457	 * -----------------------------------------------------
458	 */
459	.align	7, INV_INSN
460el0_sync_a64:
461	restore_mapping
462
463	mrs	x2, esr_el1
464	mrs	x3, sp_el0
465	lsr	x2, x2, #ESR_EC_SHIFT
466	cmp	x2, #ESR_EC_AARCH64_SVC
467	b.eq	el0_svc
468	b	el0_sync_abort
469	check_vector_size el0_sync_a64
470
471	.align	7, INV_INSN
472el0_irq_a64:
473	restore_mapping
474
475	b	elx_irq
476	check_vector_size el0_irq_a64
477
478	.align	7, INV_INSN
479el0_fiq_a64:
480	restore_mapping
481
482	b	elx_fiq
483	check_vector_size el0_fiq_a64
484
485	.align	7, INV_INSN
486el0_serror_a64:
487	b   	el0_serror_a64
488	check_vector_size el0_serror_a64
489
490	/* -----------------------------------------------------
491	 * Lower EL using AArch32 : 0x0 - 0x180
492	 * -----------------------------------------------------
493	 */
494	.align	7, INV_INSN
495el0_sync_a32:
496	restore_mapping
497
498	mrs	x2, esr_el1
499	mrs	x3, sp_el0
500	lsr	x2, x2, #ESR_EC_SHIFT
501	cmp	x2, #ESR_EC_AARCH32_SVC
502	b.eq	el0_svc
503	b	el0_sync_abort
504	check_vector_size el0_sync_a32
505
506	.align	7, INV_INSN
507el0_irq_a32:
508	restore_mapping
509
510	b	elx_irq
511	check_vector_size el0_irq_a32
512
513	.align	7, INV_INSN
514el0_fiq_a32:
515	restore_mapping
516
517	b	elx_fiq
518	check_vector_size el0_fiq_a32
519
520	.align	7, INV_INSN
521el0_serror_a32:
522	b	el0_serror_a32
523	check_vector_size el0_serror_a32
524
525#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
526	.macro invalidate_branch_predictor
527	ic	iallu
528	isb
529	.endm
530
531	.align	11, INV_INSN
532	.global thread_excp_vect_workaround
533thread_excp_vect_workaround:
534	/* -----------------------------------------------------
535	 * EL1 with SP0 : 0x0 - 0x180
536	 * -----------------------------------------------------
537	 */
538	.align	7, INV_INSN
539workaround_el1_sync_sp0:
540	b	el1_sync_sp0
541	check_vector_size workaround_el1_sync_sp0
542
543	.align	7, INV_INSN
544workaround_el1_irq_sp0:
545	b	el1_irq_sp0
546	check_vector_size workaround_el1_irq_sp0
547
548	.align	7, INV_INSN
549workaround_el1_fiq_sp0:
550	b	el1_fiq_sp0
551	check_vector_size workaround_el1_fiq_sp0
552
553	.align	7, INV_INSN
554workaround_el1_serror_sp0:
555	b	el1_serror_sp0
556	check_vector_size workaround_el1_serror_sp0
557
558	/* -----------------------------------------------------
559	 * Current EL with SP1: 0x200 - 0x380
560	 * -----------------------------------------------------
561	 */
562	.align	7, INV_INSN
563workaround_el1_sync_sp1:
564	b	workaround_el1_sync_sp1
565	check_vector_size workaround_el1_sync_sp1
566
567	.align	7, INV_INSN
568workaround_el1_irq_sp1:
569	b	workaround_el1_irq_sp1
570	check_vector_size workaround_el1_irq_sp1
571
572	.align	7, INV_INSN
573workaround_el1_fiq_sp1:
574	b	workaround_el1_fiq_sp1
575	check_vector_size workaround_el1_fiq_sp1
576
577	.align	7, INV_INSN
578workaround_el1_serror_sp1:
579	b	workaround_el1_serror_sp1
580	check_vector_size workaround_el1_serror_sp1
581
582	/* -----------------------------------------------------
583	 * Lower EL using AArch64 : 0x400 - 0x580
584	 * -----------------------------------------------------
585	 */
586	.align	7, INV_INSN
587workaround_el0_sync_a64:
588	invalidate_branch_predictor
589	b	el0_sync_a64
590	check_vector_size workaround_el0_sync_a64
591
592	.align	7, INV_INSN
593workaround_el0_irq_a64:
594	invalidate_branch_predictor
595	b	el0_irq_a64
596	check_vector_size workaround_el0_irq_a64
597
598	.align	7, INV_INSN
599workaround_el0_fiq_a64:
600	invalidate_branch_predictor
601	b	el0_fiq_a64
602	check_vector_size workaround_el0_fiq_a64
603
604	.align	7, INV_INSN
605workaround_el0_serror_a64:
606	b   	workaround_el0_serror_a64
607	check_vector_size workaround_el0_serror_a64
608
609	/* -----------------------------------------------------
610	 * Lower EL using AArch32 : 0x0 - 0x180
611	 * -----------------------------------------------------
612	 */
613	.align	7, INV_INSN
614workaround_el0_sync_a32:
615	invalidate_branch_predictor
616	b	el0_sync_a32
617	check_vector_size workaround_el0_sync_a32
618
619	.align	7, INV_INSN
620workaround_el0_irq_a32:
621	invalidate_branch_predictor
622	b	el0_irq_a32
623	check_vector_size workaround_el0_irq_a32
624
625	.align	7, INV_INSN
626workaround_el0_fiq_a32:
627	invalidate_branch_predictor
628	b	el0_fiq_a32
629	check_vector_size workaround_el0_fiq_a32
630
631	.align	7, INV_INSN
632workaround_el0_serror_a32:
633	b	workaround_el0_serror_a32
634	check_vector_size workaround_el0_serror_a32
635#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
636
637/*
638 * We're keeping this code in the same section as the vector to make sure
639 * that it's always available.
640 */
641eret_to_el0:
642
643#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
644	/* Point to the vector into the reduced mapping */
645	adr	x0, thread_user_kcode_offset
646	ldr	x0, [x0]
647	mrs	x1, vbar_el1
648	sub	x1, x1, x0
649	msr	vbar_el1, x1
650	isb
651
652#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
653	/* Store the SP offset in tpidr_el1 to be used below to update SP */
654	adr	x1, thread_user_kdata_sp_offset
655	ldr	x1, [x1]
656	msr	tpidr_el1, x1
657#endif
658
659	/* Jump into the reduced mapping and continue execution */
660	ldr	x1, =1f
661	sub	x1, x1, x0
662	br	x1
6631:
664
665	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
666	msr	tpidrro_el0, x0
667
668	/* Update the mapping to exclude the full kernel mapping */
669	mrs	x0, ttbr0_el1
670	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
671	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
672	msr	ttbr0_el1, x0
673	isb
674
675#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
676	/*
677	 * Update the SP with thread_user_kdata_sp_offset as described in
678	 * init_user_kcode().
679	 */
680	mrs	x0, tpidr_el1
681	sub	sp, sp, x0
682#endif
683
684	mrs	x0, tpidrro_el0
685#else
686	mrs	x0, ttbr0_el1
687	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
688	msr	ttbr0_el1, x0
689	isb
690	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
691#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
692
693	eret
694
695END_FUNC thread_excp_vect
696
697LOCAL_FUNC el0_svc , :
698	/* get pointer to current thread context in x0 */
699	get_thread_ctx sp, 0, 1, 2
700	/* load saved kernel sp */
701	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
702	/* Keep pointer to initial recod in x1 */
703	mov	x1, sp
704	/* Switch to SP_EL0 and restore kernel sp */
705	msr	spsel, #0
706	mov	x2, sp	/* Save SP_EL0 */
707	mov	sp, x0
708
709	/* Make room for struct thread_svc_regs */
710	sub	sp, sp, #THREAD_SVC_REG_SIZE
711	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
712
713	/* Restore x0-x3 */
714	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
715	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
716
717	/* Prepare the argument for the handler */
718	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
719	mrs	x0, elr_el1
720	mrs	x1, spsr_el1
721	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
722	mov	x0, sp
723
724	/*
725	 * Unmask native interrupts, Serror, and debug exceptions since we have
726	 * nothing left in sp_el1. Note that the SVC handler is excepted to
727	 * re-enable foreign interrupts by itself.
728	 */
729#if defined(CFG_ARM_GICV3)
730	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
731#else
732	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
733#endif
734
735	/* Call the handler */
736	bl	tee_svc_handler
737
738	/* Mask all maskable exceptions since we're switching back to sp_el1 */
739	msr	daifset, #DAIFBIT_ALL
740
741	/*
742	 * Save kernel sp we'll had at the beginning of this function.
743	 * This is when this TA has called another TA because
744	 * __thread_enter_user_mode() also saves the stack pointer in this
745	 * field.
746	 */
747	msr	spsel, #1
748	get_thread_ctx sp, 0, 1, 2
749	msr	spsel, #0
750	add	x1, sp, #THREAD_SVC_REG_SIZE
751	str	x1, [x0, #THREAD_CTX_KERN_SP]
752
753	/* Restore registers to the required state and return*/
754	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
755	msr	elr_el1, x0
756	msr	spsr_el1, x1
757	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
758	mov	x30, sp
759	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
760	mov	sp, x0
761	b_if_spsr_is_el0 w1, 1f
762	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
763	ldr	x30, [x30, #THREAD_SVC_REG_X30]
764
765	eret
766
7671:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
768	ldr	x30, [x30, #THREAD_SVC_REG_X30]
769
770	msr	spsel, #1
771	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
772	b	eret_to_el0
773END_FUNC el0_svc
774
775LOCAL_FUNC el1_sync_abort , :
776	mov	x0, sp
777	msr	spsel, #0
778	mov	x3, sp		/* Save original sp */
779
780	/*
781	 * Update core local flags.
782	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
783	 */
784	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
785	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
786	orr	w1, w1, #THREAD_CLF_ABORT
787	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
788			.Lsel_tmp_sp
789
790	/* Select abort stack */
791	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
792	b	.Lset_sp
793
794.Lsel_tmp_sp:
795	/* Select tmp stack */
796	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
797	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
798
799.Lset_sp:
800	mov	sp, x2
801	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
802
803	/*
804	 * Save state on stack
805	 */
806	sub	sp, sp, #THREAD_ABT_REGS_SIZE
807	mrs	x2, spsr_el1
808	/* Store spsr, sp_el0 */
809	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
810	/* Store original x0, x1 */
811	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
812	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
813	/* Store original x2, x3 and x4 to x29 */
814	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
815	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
816	/* Store x30, elr_el1 */
817	mrs	x0, elr_el1
818	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
819
820	/*
821	 * Call handler
822	 */
823	mov	x0, #0
824	mov	x1, sp
825	bl	abort_handler
826
827	/*
828	 * Restore state from stack
829	 */
830	/* Load x30, elr_el1 */
831	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
832	msr	elr_el1, x0
833	/* Load x0 to x29 */
834	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
835	/* Switch to SP_EL1 */
836	msr	spsel, #1
837	/* Save x0 to x3 in CORE_LOCAL */
838	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
839	/* Restore spsr_el1 and sp_el0 */
840	mrs	x3, sp_el0
841	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
842	msr	spsr_el1, x0
843	msr	sp_el0, x1
844
845	/* Update core local flags */
846	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
847	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
848	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
849
850	/* Restore x0 to x3 */
851	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
852
853	/* Return from exception */
854	eret
855END_FUNC el1_sync_abort
856
857	/* sp_el0 in x3 */
858LOCAL_FUNC el0_sync_abort , :
859	/*
860	 * Update core local flags
861	 */
862	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
863	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
864	orr	w1, w1, #THREAD_CLF_ABORT
865	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
866
867	/*
868	 * Save state on stack
869	 */
870
871	/* load abt_stack_va_end */
872	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
873	/* Keep pointer to initial record in x0 */
874	mov	x0, sp
875	/* Switch to SP_EL0 */
876	msr	spsel, #0
877	mov	sp, x1
878	sub	sp, sp, #THREAD_ABT_REGS_SIZE
879	mrs	x2, spsr_el1
880	/* Store spsr, sp_el0 */
881	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
882	/* Store original x0, x1 */
883	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
884	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
885	/* Store original x2, x3 and x4 to x29 */
886	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
887	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
888	/* Store x30, elr_el1 */
889	mrs	x0, elr_el1
890	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
891
892	/*
893	 * Call handler
894	 */
895	mov	x0, #0
896	mov	x1, sp
897	bl	abort_handler
898
899	/*
900	 * Restore state from stack
901	 */
902
903	/* Load x30, elr_el1 */
904	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
905	msr	elr_el1, x0
906	/* Load x0 to x29 */
907	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
908	/* Switch to SP_EL1 */
909	msr	spsel, #1
910	/* Save x0 to x3 in EL1_REC */
911	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
912	/* Restore spsr_el1 and sp_el0 */
913	mrs	x3, sp_el0
914	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
915	msr	spsr_el1, x0
916	msr	sp_el0, x1
917
918	/* Update core local flags */
919	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
920	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
921	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
922
923	/* Restore x2 to x3 */
924	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
925
926	b_if_spsr_is_el0 w0, 1f
927
928	/* Restore x0 to x1 */
929	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
930
931	/* Return from exception */
932	eret
9331:	b	eret_to_el0
934END_FUNC el0_sync_abort
935
936/* The handler of foreign interrupt. */
937.macro foreign_intr_handler mode:req
938	/*
939	 * Update core local flags
940	 */
941	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
942	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
943	orr	w1, w1, #THREAD_CLF_TMP
944	.ifc	\mode\(),fiq
945	orr	w1, w1, #THREAD_CLF_FIQ
946	.else
947	orr	w1, w1, #THREAD_CLF_IRQ
948	.endif
949	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
950
951	/* get pointer to current thread context in x0 */
952	get_thread_ctx sp, 0, 1, 2
953	/* Keep original SP_EL0 */
954	mrs	x2, sp_el0
955
956	/* Store original sp_el0 */
957	str	x2, [x0, #THREAD_CTX_REGS_SP]
958	/* store x4..x30 */
959	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
960	/* Load original x0..x3 into x10..x13 */
961	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
962	/* Save original x0..x3 */
963	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
964
965	/* load tmp_stack_va_end */
966	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
967	/* Switch to SP_EL0 */
968	msr	spsel, #0
969	mov	sp, x1
970
971	/*
972	 * Mark current thread as suspended
973	 */
974	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
975	mrs	x1, spsr_el1
976	mrs	x2, elr_el1
977	bl	thread_state_suspend
978	mov	w4, w0		/* Supply thread index */
979
980	/* Update core local flags */
981	/* Switch to SP_EL1 */
982	msr	spsel, #1
983	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
984	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
985	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
986	msr	spsel, #0
987
988	/*
989	 * Note that we're exiting with SP_EL0 selected since the entry
990	 * functions expects to have SP_EL0 selected with the tmp stack
991	 * set.
992	 */
993
994	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
995	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
996	mov	w2, #0
997	mov	w3, #0
998	/* w4 is already filled in above */
999	smc	#0
1000	b	.	/* SMC should not return */
1001.endm
1002
1003/*
1004 * This struct is never used from C it's only here to visualize the
1005 * layout.
1006 *
1007 * struct elx_nintr_rec {
1008 * 	uint64_t x[19 - 4]; x4..x18
1009 * 	uint64_t lr;
1010 * 	uint64_t sp_el0;
1011 * };
1012 */
1013#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1014#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1015#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1016#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1017
1018/* The handler of native interrupt. */
1019.macro native_intr_handler mode:req
1020	/*
1021	 * Update core local flags
1022	 */
1023	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1024	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1025	.ifc	\mode\(),fiq
1026	orr	w1, w1, #THREAD_CLF_FIQ
1027	.else
1028	orr	w1, w1, #THREAD_CLF_IRQ
1029	.endif
1030	orr	w1, w1, #THREAD_CLF_TMP
1031	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1032
1033	/* load tmp_stack_va_end */
1034	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1035	/* Keep original SP_EL0 */
1036	mrs	x2, sp_el0
1037	/* Switch to SP_EL0 */
1038	msr	spsel, #0
1039	mov	sp, x1
1040
1041	/*
1042	 * Save registers on stack that can be corrupted by a call to
1043	 * a C function
1044	 */
1045	/* Make room for struct elx_nintr_rec */
1046	sub	sp, sp, #ELX_NINTR_REC_SIZE
1047	/* Store x4..x18 */
1048	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1049	/* Store lr and original sp_el0 */
1050	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1051
1052	bl	thread_check_canaries
1053	adr	x16, thread_nintr_handler_ptr
1054	ldr	x16, [x16]
1055	blr	x16
1056
1057	/*
1058	 * Restore registers
1059	 */
1060	/* Restore x4..x18 */
1061	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1062	/* Load  lr and original sp_el0 */
1063	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1064	/* Restore SP_El0 */
1065	mov	sp, x2
1066	/* Switch back to SP_EL1 */
1067	msr	spsel, #1
1068
1069	/* Update core local flags */
1070	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1071	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1072	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1073
1074	mrs	x0, spsr_el1
1075	/* Restore x2..x3 */
1076	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1077	b_if_spsr_is_el0 w0, 1f
1078
1079	/* Restore x0..x1 */
1080	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1081
1082	/* Return from exception */
1083	eret
10841:	b	eret_to_el0
1085.endm
1086
1087LOCAL_FUNC elx_irq , :
1088#if defined(CFG_ARM_GICV3)
1089	native_intr_handler	irq
1090#else
1091	foreign_intr_handler	irq
1092#endif
1093END_FUNC elx_irq
1094
1095LOCAL_FUNC elx_fiq , :
1096#if defined(CFG_ARM_GICV3)
1097	foreign_intr_handler	fiq
1098#else
1099	native_intr_handler	fiq
1100#endif
1101END_FUNC elx_fiq
1102