xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 336e32995d9c419d9fc2a6fd5974f99761285415)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/thread_defs.h>
12#include <mm/core_mmu.h>
13#include <smccc.h>
14#include <sm/optee_smc.h>
15#include <sm/teesmc_opteed.h>
16#include <sm/teesmc_opteed_macros.h>
17
18#include "thread_private.h"
19
20	.macro get_thread_ctx core_local, res, tmp0, tmp1
21		ldr	w\tmp0, [\core_local, \
22				#THREAD_CORE_LOCAL_CURR_THREAD]
23		adr	x\res, threads
24		mov	x\tmp1, #THREAD_CTX_SIZE
25		madd	x\res, x\tmp0, x\tmp1, x\res
26	.endm
27
28	.macro b_if_spsr_is_el0 reg, label
29		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
30		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
31		b.eq	\label
32	.endm
33
34LOCAL_FUNC vector_std_smc_entry , :
35	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
36	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
37	mov	x0, sp
38	bl	thread_handle_std_smc
39	/*
40	 * Normally thread_handle_std_smc() should return via
41	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
42	 * hasn't switched stack (error detected) it will do a normal "C"
43	 * return.
44	 */
45	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
46	add	sp, sp, #THREAD_SMC_ARGS_SIZE
47	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
48	smc	#0
49	b	.	/* SMC should not return */
50END_FUNC vector_std_smc_entry
51
52LOCAL_FUNC vector_fast_smc_entry , :
53	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
54	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
55	mov	x0, sp
56	bl	thread_handle_fast_smc
57	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
58	add	sp, sp, #THREAD_SMC_ARGS_SIZE
59	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
60	smc	#0
61	b	.	/* SMC should not return */
62END_FUNC vector_fast_smc_entry
63
64LOCAL_FUNC vector_fiq_entry , :
65	/* Secure Monitor received a FIQ and passed control to us. */
66	bl	thread_check_canaries
67	adr	x16, thread_nintr_handler_ptr
68	ldr	x16, [x16]
69	blr	x16
70	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
71	smc	#0
72	b	.	/* SMC should not return */
73END_FUNC vector_fiq_entry
74
75LOCAL_FUNC vector_cpu_on_entry , :
76	adr	x16, thread_cpu_on_handler_ptr
77	ldr	x16, [x16]
78	blr	x16
79	mov	x1, x0
80	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
81	smc	#0
82	b	.	/* SMC should not return */
83END_FUNC vector_cpu_on_entry
84
85LOCAL_FUNC vector_cpu_off_entry , :
86	adr	x16, thread_cpu_off_handler_ptr
87	ldr	x16, [x16]
88	blr	x16
89	mov	x1, x0
90	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
91	smc	#0
92	b	.	/* SMC should not return */
93END_FUNC vector_cpu_off_entry
94
95LOCAL_FUNC vector_cpu_suspend_entry , :
96	adr	x16, thread_cpu_suspend_handler_ptr
97	ldr	x16, [x16]
98	blr	x16
99	mov	x1, x0
100	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
101	smc	#0
102	b	.	/* SMC should not return */
103END_FUNC vector_cpu_suspend_entry
104
105LOCAL_FUNC vector_cpu_resume_entry , :
106	adr	x16, thread_cpu_resume_handler_ptr
107	ldr	x16, [x16]
108	blr	x16
109	mov	x1, x0
110	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
111	smc	#0
112	b	.	/* SMC should not return */
113END_FUNC vector_cpu_resume_entry
114
115LOCAL_FUNC vector_system_off_entry , :
116	adr	x16, thread_system_off_handler_ptr
117	ldr	x16, [x16]
118	blr	x16
119	mov	x1, x0
120	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
121	smc	#0
122	b	.	/* SMC should not return */
123END_FUNC vector_system_off_entry
124
125LOCAL_FUNC vector_system_reset_entry , :
126	adr	x16, thread_system_reset_handler_ptr
127	ldr	x16, [x16]
128	blr	x16
129	mov	x1, x0
130	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
131	smc	#0
132	b	.	/* SMC should not return */
133END_FUNC vector_system_reset_entry
134
135/*
136 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
137 * initialization.
138 *
139 * Note that ARM-TF depends on the layout of this vector table, any change
140 * in layout has to be synced with ARM-TF.
141 */
142FUNC thread_vector_table , :
143	b	vector_std_smc_entry
144	b	vector_fast_smc_entry
145	b	vector_cpu_on_entry
146	b	vector_cpu_off_entry
147	b	vector_cpu_resume_entry
148	b	vector_cpu_suspend_entry
149	b	vector_fiq_entry
150	b	vector_system_off_entry
151	b	vector_system_reset_entry
152END_FUNC thread_vector_table
153KEEP_PAGER thread_vector_table
154
155
156/* void thread_resume(struct thread_ctx_regs *regs) */
157FUNC thread_resume , :
158	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
159	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
160	mov	sp, x1
161	msr	elr_el1, x2
162	msr	spsr_el1, x3
163
164	b_if_spsr_is_el0 w3, 1f
165
166	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
167	ldr	x0, [x0, THREAD_CTX_REGS_X0]
168	eret
169
1701:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
171	ldr	x0, [x0, THREAD_CTX_REGS_X0]
172
173	msr	spsel, #1
174	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
175	b	eret_to_el0
176END_FUNC thread_resume
177
178FUNC thread_std_smc_entry , :
179	/* pass x0-x7 in a struct thread_smc_args */
180	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
181	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
182	mov	x0, sp
183
184	/* Call the registered handler */
185	bl	__thread_std_smc_entry
186
187	/*
188	 * Load the returned x0-x3 into preserved registers and skip the
189	 * "returned" x4-x7 since they will not be returned to normal
190	 * world.
191	 */
192	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
193	add	sp, sp, #THREAD_SMC_ARGS_SIZE
194
195	/* Mask all maskable exceptions before switching to temporary stack */
196	msr	daifset, #DAIFBIT_ALL
197	bl	thread_get_tmp_sp
198	mov	sp, x0
199
200	bl	thread_state_free
201
202	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
203	mov	x1, x20
204	mov	x2, x21
205	mov	x3, x22
206	mov	x4, x23
207	smc	#0
208	b	.	/* SMC should not return */
209END_FUNC thread_std_smc_entry
210
211/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
212FUNC thread_rpc , :
213	/* Read daif and create an SPSR */
214	mrs	x1, daif
215	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
216
217	/* Mask all maskable exceptions before switching to temporary stack */
218	msr	daifset, #DAIFBIT_ALL
219	push	x0, xzr
220	push	x1, x30
221	bl	thread_get_ctx_regs
222	ldr	x30, [sp, #8]
223	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
224	mov	x19, x0
225
226	bl	thread_get_tmp_sp
227	pop	x1, xzr		/* Match "push x1, x30" above */
228	mov	x2, sp
229	str	x2, [x19, #THREAD_CTX_REGS_SP]
230	ldr	x20, [sp]	/* Get pointer to rv[] */
231	mov	sp, x0		/* Switch to tmp stack */
232
233	adr	x2, .thread_rpc_return
234	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
235	bl	thread_state_suspend
236	mov	x4, x0		/* Supply thread index */
237	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
238	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
239	smc	#0
240	b	.		/* SMC should not return */
241
242.thread_rpc_return:
243	/*
244	 * At this point has the stack pointer been restored to the value
245	 * stored in THREAD_CTX above.
246	 *
247	 * Jumps here from thread_resume above when RPC has returned. The
248	 * IRQ and FIQ bits are restored to what they where when this
249	 * function was originally entered.
250	 */
251	pop	x16, xzr	/* Get pointer to rv[] */
252	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
253	ret
254END_FUNC thread_rpc
255KEEP_PAGER thread_rpc
256
257FUNC thread_smc , :
258	smc	#0
259	ret
260END_FUNC thread_smc
261
262FUNC thread_init_vbar , :
263	msr	vbar_el1, x0
264	ret
265END_FUNC thread_init_vbar
266KEEP_PAGER thread_init_vbar
267
268/*
269 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
270 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
271 *               unsigned long user_func, unsigned long spsr,
272 *               uint32_t *exit_status0, uint32_t *exit_status1)
273 *
274 */
275FUNC __thread_enter_user_mode , :
276	ldr	x8, [sp]
277	/*
278	 * Create the and fill in the struct thread_user_mode_rec
279	 */
280	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
281	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
282	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
283
284	/*
285	 * Switch to SP_EL1
286	 * Disable exceptions
287	 * Save kern sp in x19
288	 */
289	msr	daifset, #DAIFBIT_ALL
290	mov	x19, sp
291	msr	spsel, #1
292
293	/*
294	 * Save the kernel stack pointer in the thread context
295	 */
296	/* get pointer to current thread context */
297	get_thread_ctx sp, 21, 20, 22
298	/*
299	 * Save kernel stack pointer to ensure that el0_svc() uses
300	 * correct stack pointer
301	 */
302	str	x19, [x21, #THREAD_CTX_KERN_SP]
303
304	/*
305	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
306	 */
307	msr	spsr_el1, x6
308	/* Set user sp */
309	mov	x13, x4		/* Used when running TA in Aarch32 */
310	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
311	/* Set user function */
312	msr	elr_el1, x5
313	/* Set frame pointer (user stack can't be unwound past this point) */
314	mov x29, #0
315
316	/* Jump into user mode */
317	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
318	b eret_to_el0
319END_FUNC __thread_enter_user_mode
320KEEP_PAGER __thread_enter_user_mode
321
322/*
323 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
324 * 		uint32_t exit_status1);
325 * See description in thread.h
326 */
327FUNC thread_unwind_user_mode , :
328	/* Store the exit status */
329	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
330	str	w1, [x3]
331	str	w2, [x4]
332	/* Restore x19..x30 */
333	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
334	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
335	/* Return from the call of thread_enter_user_mode() */
336	ret
337END_FUNC thread_unwind_user_mode
338
339	/*
340	 * This macro verifies that the a given vector doesn't exceed the
341	 * architectural limit of 32 instructions. This is meant to be placed
342	 * immedately after the last instruction in the vector. It takes the
343	 * vector entry as the parameter
344	 */
345	.macro check_vector_size since
346	  .if (. - \since) > (32 * 4)
347	    .error "Vector exceeds 32 instructions"
348	  .endif
349	.endm
350
351	.macro restore_mapping
352#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
353		/* Temporarily save x0, x1 */
354		msr	tpidr_el1, x0
355		msr	tpidrro_el0, x1
356
357		/* Update the mapping to use the full kernel mapping */
358		mrs	x0, ttbr0_el1
359		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
360		/* switch to kernel mode ASID */
361		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
362		msr	ttbr0_el1, x0
363		isb
364
365		/* Jump into the full mapping and continue execution */
366		ldr	x0, =1f
367		br	x0
368	1:
369
370		/* Point to the vector into the full mapping */
371		adr	x0, thread_user_kcode_offset
372		ldr	x0, [x0]
373		mrs	x1, vbar_el1
374		add	x1, x1, x0
375		msr	vbar_el1, x1
376		isb
377
378#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
379		/*
380		 * Update the SP with thread_user_kdata_sp_offset as
381		 * described in init_user_kcode().
382		 */
383		adr	x0, thread_user_kdata_sp_offset
384		ldr	x0, [x0]
385		add	sp, sp, x0
386#endif
387
388		/* Restore x0, x1 */
389		mrs	x0, tpidr_el1
390		mrs	x1, tpidrro_el0
391		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
392#else
393		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
394		mrs	x0, ttbr0_el1
395		/* switch to kernel mode ASID */
396		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
397		msr	ttbr0_el1, x0
398		isb
399#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
400	.endm
401
402#define INV_INSN	0
403	.section .text.thread_excp_vect
404	.align	11, INV_INSN
405FUNC thread_excp_vect , :
406	/* -----------------------------------------------------
407	 * EL1 with SP0 : 0x0 - 0x180
408	 * -----------------------------------------------------
409	 */
410	.align	7, INV_INSN
411el1_sync_sp0:
412	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
413	b	el1_sync_abort
414	check_vector_size el1_sync_sp0
415
416	.align	7, INV_INSN
417el1_irq_sp0:
418	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
419	b	elx_irq
420	check_vector_size el1_irq_sp0
421
422	.align	7, INV_INSN
423el1_fiq_sp0:
424	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
425	b	elx_fiq
426	check_vector_size el1_fiq_sp0
427
428	.align	7, INV_INSN
429el1_serror_sp0:
430	b	el1_serror_sp0
431	check_vector_size el1_serror_sp0
432
433	/* -----------------------------------------------------
434	 * Current EL with SP1: 0x200 - 0x380
435	 * -----------------------------------------------------
436	 */
437	.align	7, INV_INSN
438el1_sync_sp1:
439	b	el1_sync_sp1
440	check_vector_size el1_sync_sp1
441
442	.align	7, INV_INSN
443el1_irq_sp1:
444	b	el1_irq_sp1
445	check_vector_size el1_irq_sp1
446
447	.align	7, INV_INSN
448el1_fiq_sp1:
449	b	el1_fiq_sp1
450	check_vector_size el1_fiq_sp1
451
452	.align	7, INV_INSN
453el1_serror_sp1:
454	b	el1_serror_sp1
455	check_vector_size el1_serror_sp1
456
457	/* -----------------------------------------------------
458	 * Lower EL using AArch64 : 0x400 - 0x580
459	 * -----------------------------------------------------
460	 */
461	.align	7, INV_INSN
462el0_sync_a64:
463	restore_mapping
464
465	mrs	x2, esr_el1
466	mrs	x3, sp_el0
467	lsr	x2, x2, #ESR_EC_SHIFT
468	cmp	x2, #ESR_EC_AARCH64_SVC
469	b.eq	el0_svc
470	b	el0_sync_abort
471	check_vector_size el0_sync_a64
472
473	.align	7, INV_INSN
474el0_irq_a64:
475	restore_mapping
476
477	b	elx_irq
478	check_vector_size el0_irq_a64
479
480	.align	7, INV_INSN
481el0_fiq_a64:
482	restore_mapping
483
484	b	elx_fiq
485	check_vector_size el0_fiq_a64
486
487	.align	7, INV_INSN
488el0_serror_a64:
489	b   	el0_serror_a64
490	check_vector_size el0_serror_a64
491
492	/* -----------------------------------------------------
493	 * Lower EL using AArch32 : 0x0 - 0x180
494	 * -----------------------------------------------------
495	 */
496	.align	7, INV_INSN
497el0_sync_a32:
498	restore_mapping
499
500	mrs	x2, esr_el1
501	mrs	x3, sp_el0
502	lsr	x2, x2, #ESR_EC_SHIFT
503	cmp	x2, #ESR_EC_AARCH32_SVC
504	b.eq	el0_svc
505	b	el0_sync_abort
506	check_vector_size el0_sync_a32
507
508	.align	7, INV_INSN
509el0_irq_a32:
510	restore_mapping
511
512	b	elx_irq
513	check_vector_size el0_irq_a32
514
515	.align	7, INV_INSN
516el0_fiq_a32:
517	restore_mapping
518
519	b	elx_fiq
520	check_vector_size el0_fiq_a32
521
522	.align	7, INV_INSN
523el0_serror_a32:
524	b	el0_serror_a32
525	check_vector_size el0_serror_a32
526
527#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
528	.macro invalidate_branch_predictor
529		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
530		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
531		smc	#0
532		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
533	.endm
534
535	.align	11, INV_INSN
536	.global thread_excp_vect_workaround
537thread_excp_vect_workaround:
538	/* -----------------------------------------------------
539	 * EL1 with SP0 : 0x0 - 0x180
540	 * -----------------------------------------------------
541	 */
542	.align	7, INV_INSN
543workaround_el1_sync_sp0:
544	b	el1_sync_sp0
545	check_vector_size workaround_el1_sync_sp0
546
547	.align	7, INV_INSN
548workaround_el1_irq_sp0:
549	b	el1_irq_sp0
550	check_vector_size workaround_el1_irq_sp0
551
552	.align	7, INV_INSN
553workaround_el1_fiq_sp0:
554	b	el1_fiq_sp0
555	check_vector_size workaround_el1_fiq_sp0
556
557	.align	7, INV_INSN
558workaround_el1_serror_sp0:
559	b	el1_serror_sp0
560	check_vector_size workaround_el1_serror_sp0
561
562	/* -----------------------------------------------------
563	 * Current EL with SP1: 0x200 - 0x380
564	 * -----------------------------------------------------
565	 */
566	.align	7, INV_INSN
567workaround_el1_sync_sp1:
568	b	workaround_el1_sync_sp1
569	check_vector_size workaround_el1_sync_sp1
570
571	.align	7, INV_INSN
572workaround_el1_irq_sp1:
573	b	workaround_el1_irq_sp1
574	check_vector_size workaround_el1_irq_sp1
575
576	.align	7, INV_INSN
577workaround_el1_fiq_sp1:
578	b	workaround_el1_fiq_sp1
579	check_vector_size workaround_el1_fiq_sp1
580
581	.align	7, INV_INSN
582workaround_el1_serror_sp1:
583	b	workaround_el1_serror_sp1
584	check_vector_size workaround_el1_serror_sp1
585
586	/* -----------------------------------------------------
587	 * Lower EL using AArch64 : 0x400 - 0x580
588	 * -----------------------------------------------------
589	 */
590	.align	7, INV_INSN
591workaround_el0_sync_a64:
592	invalidate_branch_predictor
593	b	el0_sync_a64
594	check_vector_size workaround_el0_sync_a64
595
596	.align	7, INV_INSN
597workaround_el0_irq_a64:
598	invalidate_branch_predictor
599	b	el0_irq_a64
600	check_vector_size workaround_el0_irq_a64
601
602	.align	7, INV_INSN
603workaround_el0_fiq_a64:
604	invalidate_branch_predictor
605	b	el0_fiq_a64
606	check_vector_size workaround_el0_fiq_a64
607
608	.align	7, INV_INSN
609workaround_el0_serror_a64:
610	b   	workaround_el0_serror_a64
611	check_vector_size workaround_el0_serror_a64
612
613	/* -----------------------------------------------------
614	 * Lower EL using AArch32 : 0x0 - 0x180
615	 * -----------------------------------------------------
616	 */
617	.align	7, INV_INSN
618workaround_el0_sync_a32:
619	invalidate_branch_predictor
620	b	el0_sync_a32
621	check_vector_size workaround_el0_sync_a32
622
623	.align	7, INV_INSN
624workaround_el0_irq_a32:
625	invalidate_branch_predictor
626	b	el0_irq_a32
627	check_vector_size workaround_el0_irq_a32
628
629	.align	7, INV_INSN
630workaround_el0_fiq_a32:
631	invalidate_branch_predictor
632	b	el0_fiq_a32
633	check_vector_size workaround_el0_fiq_a32
634
635	.align	7, INV_INSN
636workaround_el0_serror_a32:
637	b	workaround_el0_serror_a32
638	check_vector_size workaround_el0_serror_a32
639#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
640
641/*
642 * We're keeping this code in the same section as the vector to make sure
643 * that it's always available.
644 */
645eret_to_el0:
646
647#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
648	/* Point to the vector into the reduced mapping */
649	adr	x0, thread_user_kcode_offset
650	ldr	x0, [x0]
651	mrs	x1, vbar_el1
652	sub	x1, x1, x0
653	msr	vbar_el1, x1
654	isb
655
656#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
657	/* Store the SP offset in tpidr_el1 to be used below to update SP */
658	adr	x1, thread_user_kdata_sp_offset
659	ldr	x1, [x1]
660	msr	tpidr_el1, x1
661#endif
662
663	/* Jump into the reduced mapping and continue execution */
664	ldr	x1, =1f
665	sub	x1, x1, x0
666	br	x1
6671:
668
669	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
670	msr	tpidrro_el0, x0
671
672	/* Update the mapping to exclude the full kernel mapping */
673	mrs	x0, ttbr0_el1
674	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
675	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
676	msr	ttbr0_el1, x0
677	isb
678
679#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
680	/*
681	 * Update the SP with thread_user_kdata_sp_offset as described in
682	 * init_user_kcode().
683	 */
684	mrs	x0, tpidr_el1
685	sub	sp, sp, x0
686#endif
687
688	mrs	x0, tpidrro_el0
689#else
690	mrs	x0, ttbr0_el1
691	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
692	msr	ttbr0_el1, x0
693	isb
694	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
695#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
696
697	eret
698	/*
699	 * Make sure that literals are placed before the
700	 * thread_excp_vect_end label.
701	 */
702	.pool
703	.global thread_excp_vect_end
704thread_excp_vect_end:
705END_FUNC thread_excp_vect
706
707LOCAL_FUNC el0_svc , :
708	/* get pointer to current thread context in x0 */
709	get_thread_ctx sp, 0, 1, 2
710	/* load saved kernel sp */
711	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
712	/* Keep pointer to initial recod in x1 */
713	mov	x1, sp
714	/* Switch to SP_EL0 and restore kernel sp */
715	msr	spsel, #0
716	mov	x2, sp	/* Save SP_EL0 */
717	mov	sp, x0
718
719	/* Make room for struct thread_svc_regs */
720	sub	sp, sp, #THREAD_SVC_REG_SIZE
721	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
722
723	/* Restore x0-x3 */
724	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
725	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
726
727	/* Prepare the argument for the handler */
728	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
729	mrs	x0, elr_el1
730	mrs	x1, spsr_el1
731	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
732	mov	x0, sp
733
734	/*
735	 * Unmask native interrupts, Serror, and debug exceptions since we have
736	 * nothing left in sp_el1. Note that the SVC handler is excepted to
737	 * re-enable foreign interrupts by itself.
738	 */
739#if defined(CFG_ARM_GICV3)
740	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
741#else
742	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
743#endif
744
745	/* Call the handler */
746	bl	tee_svc_handler
747
748	/* Mask all maskable exceptions since we're switching back to sp_el1 */
749	msr	daifset, #DAIFBIT_ALL
750
751	/*
752	 * Save kernel sp we'll had at the beginning of this function.
753	 * This is when this TA has called another TA because
754	 * __thread_enter_user_mode() also saves the stack pointer in this
755	 * field.
756	 */
757	msr	spsel, #1
758	get_thread_ctx sp, 0, 1, 2
759	msr	spsel, #0
760	add	x1, sp, #THREAD_SVC_REG_SIZE
761	str	x1, [x0, #THREAD_CTX_KERN_SP]
762
763	/* Restore registers to the required state and return*/
764	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
765	msr	elr_el1, x0
766	msr	spsr_el1, x1
767	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
768	mov	x30, sp
769	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
770	mov	sp, x0
771	b_if_spsr_is_el0 w1, 1f
772	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
773	ldr	x30, [x30, #THREAD_SVC_REG_X30]
774
775	eret
776
7771:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
778	ldr	x30, [x30, #THREAD_SVC_REG_X30]
779
780	msr	spsel, #1
781	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
782	b	eret_to_el0
783END_FUNC el0_svc
784
785LOCAL_FUNC el1_sync_abort , :
786	mov	x0, sp
787	msr	spsel, #0
788	mov	x3, sp		/* Save original sp */
789
790	/*
791	 * Update core local flags.
792	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
793	 */
794	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
795	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
796	orr	w1, w1, #THREAD_CLF_ABORT
797	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
798			.Lsel_tmp_sp
799
800	/* Select abort stack */
801	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
802	b	.Lset_sp
803
804.Lsel_tmp_sp:
805	/* Select tmp stack */
806	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
807	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
808
809.Lset_sp:
810	mov	sp, x2
811	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
812
813	/*
814	 * Save state on stack
815	 */
816	sub	sp, sp, #THREAD_ABT_REGS_SIZE
817	mrs	x2, spsr_el1
818	/* Store spsr, sp_el0 */
819	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
820	/* Store original x0, x1 */
821	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
822	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
823	/* Store original x2, x3 and x4 to x29 */
824	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
825	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
826	/* Store x30, elr_el1 */
827	mrs	x0, elr_el1
828	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
829
830	/*
831	 * Call handler
832	 */
833	mov	x0, #0
834	mov	x1, sp
835	bl	abort_handler
836
837	/*
838	 * Restore state from stack
839	 */
840	/* Load x30, elr_el1 */
841	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
842	msr	elr_el1, x0
843	/* Load x0 to x29 */
844	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
845	/* Switch to SP_EL1 */
846	msr	spsel, #1
847	/* Save x0 to x3 in CORE_LOCAL */
848	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
849	/* Restore spsr_el1 and sp_el0 */
850	mrs	x3, sp_el0
851	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
852	msr	spsr_el1, x0
853	msr	sp_el0, x1
854
855	/* Update core local flags */
856	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
857	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
858	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
859
860	/* Restore x0 to x3 */
861	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
862
863	/* Return from exception */
864	eret
865END_FUNC el1_sync_abort
866
867	/* sp_el0 in x3 */
868LOCAL_FUNC el0_sync_abort , :
869	/*
870	 * Update core local flags
871	 */
872	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
873	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
874	orr	w1, w1, #THREAD_CLF_ABORT
875	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
876
877	/*
878	 * Save state on stack
879	 */
880
881	/* load abt_stack_va_end */
882	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
883	/* Keep pointer to initial record in x0 */
884	mov	x0, sp
885	/* Switch to SP_EL0 */
886	msr	spsel, #0
887	mov	sp, x1
888	sub	sp, sp, #THREAD_ABT_REGS_SIZE
889	mrs	x2, spsr_el1
890	/* Store spsr, sp_el0 */
891	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
892	/* Store original x0, x1 */
893	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
894	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
895	/* Store original x2, x3 and x4 to x29 */
896	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
897	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
898	/* Store x30, elr_el1 */
899	mrs	x0, elr_el1
900	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
901
902	/*
903	 * Call handler
904	 */
905	mov	x0, #0
906	mov	x1, sp
907	bl	abort_handler
908
909	/*
910	 * Restore state from stack
911	 */
912
913	/* Load x30, elr_el1 */
914	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
915	msr	elr_el1, x0
916	/* Load x0 to x29 */
917	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
918	/* Switch to SP_EL1 */
919	msr	spsel, #1
920	/* Save x0 to x3 in EL1_REC */
921	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
922	/* Restore spsr_el1 and sp_el0 */
923	mrs	x3, sp_el0
924	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
925	msr	spsr_el1, x0
926	msr	sp_el0, x1
927
928	/* Update core local flags */
929	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
930	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
931	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
932
933	/* Restore x2 to x3 */
934	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
935
936	b_if_spsr_is_el0 w0, 1f
937
938	/* Restore x0 to x1 */
939	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
940
941	/* Return from exception */
942	eret
9431:	b	eret_to_el0
944END_FUNC el0_sync_abort
945
946/* The handler of foreign interrupt. */
947.macro foreign_intr_handler mode:req
948	/*
949	 * Update core local flags
950	 */
951	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
952	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
953	orr	w1, w1, #THREAD_CLF_TMP
954	.ifc	\mode\(),fiq
955	orr	w1, w1, #THREAD_CLF_FIQ
956	.else
957	orr	w1, w1, #THREAD_CLF_IRQ
958	.endif
959	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
960
961	/* get pointer to current thread context in x0 */
962	get_thread_ctx sp, 0, 1, 2
963	/* Keep original SP_EL0 */
964	mrs	x2, sp_el0
965
966	/* Store original sp_el0 */
967	str	x2, [x0, #THREAD_CTX_REGS_SP]
968	/* store x4..x30 */
969	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
970	/* Load original x0..x3 into x10..x13 */
971	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
972	/* Save original x0..x3 */
973	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
974
975	/* load tmp_stack_va_end */
976	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
977	/* Switch to SP_EL0 */
978	msr	spsel, #0
979	mov	sp, x1
980
981	/*
982	 * Mark current thread as suspended
983	 */
984	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
985	mrs	x1, spsr_el1
986	mrs	x2, elr_el1
987	bl	thread_state_suspend
988	mov	w4, w0		/* Supply thread index */
989
990	/* Update core local flags */
991	/* Switch to SP_EL1 */
992	msr	spsel, #1
993	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
994	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
995	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
996	msr	spsel, #0
997
998	/*
999	 * Note that we're exiting with SP_EL0 selected since the entry
1000	 * functions expects to have SP_EL0 selected with the tmp stack
1001	 * set.
1002	 */
1003
1004	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
1005	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
1006	mov	w2, #0
1007	mov	w3, #0
1008	/* w4 is already filled in above */
1009	smc	#0
1010	b	.	/* SMC should not return */
1011.endm
1012
1013/*
1014 * This struct is never used from C it's only here to visualize the
1015 * layout.
1016 *
1017 * struct elx_nintr_rec {
1018 * 	uint64_t x[19 - 4]; x4..x18
1019 * 	uint64_t lr;
1020 * 	uint64_t sp_el0;
1021 * };
1022 */
1023#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1024#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1025#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1026#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1027
1028/* The handler of native interrupt. */
1029.macro native_intr_handler mode:req
1030	/*
1031	 * Update core local flags
1032	 */
1033	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1034	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1035	.ifc	\mode\(),fiq
1036	orr	w1, w1, #THREAD_CLF_FIQ
1037	.else
1038	orr	w1, w1, #THREAD_CLF_IRQ
1039	.endif
1040	orr	w1, w1, #THREAD_CLF_TMP
1041	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1042
1043	/* load tmp_stack_va_end */
1044	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1045	/* Keep original SP_EL0 */
1046	mrs	x2, sp_el0
1047	/* Switch to SP_EL0 */
1048	msr	spsel, #0
1049	mov	sp, x1
1050
1051	/*
1052	 * Save registers on stack that can be corrupted by a call to
1053	 * a C function
1054	 */
1055	/* Make room for struct elx_nintr_rec */
1056	sub	sp, sp, #ELX_NINTR_REC_SIZE
1057	/* Store x4..x18 */
1058	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1059	/* Store lr and original sp_el0 */
1060	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1061
1062	bl	thread_check_canaries
1063	adr	x16, thread_nintr_handler_ptr
1064	ldr	x16, [x16]
1065	blr	x16
1066
1067	/*
1068	 * Restore registers
1069	 */
1070	/* Restore x4..x18 */
1071	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1072	/* Load  lr and original sp_el0 */
1073	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1074	/* Restore SP_El0 */
1075	mov	sp, x2
1076	/* Switch back to SP_EL1 */
1077	msr	spsel, #1
1078
1079	/* Update core local flags */
1080	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1081	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1082	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1083
1084	mrs	x0, spsr_el1
1085	/* Restore x2..x3 */
1086	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1087	b_if_spsr_is_el0 w0, 1f
1088
1089	/* Restore x0..x1 */
1090	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1091
1092	/* Return from exception */
1093	eret
10941:	b	eret_to_el0
1095.endm
1096
1097LOCAL_FUNC elx_irq , :
1098#if defined(CFG_ARM_GICV3)
1099	native_intr_handler	irq
1100#else
1101	foreign_intr_handler	irq
1102#endif
1103END_FUNC elx_irq
1104
1105LOCAL_FUNC elx_fiq , :
1106#if defined(CFG_ARM_GICV3)
1107	foreign_intr_handler	fiq
1108#else
1109	native_intr_handler	fiq
1110#endif
1111END_FUNC elx_fiq
1112