xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 8bbd9b374a51a1b8617796aae8a70c271543357f)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/thread_defs.h>
12#include <mm/core_mmu.h>
13#include <smccc.h>
14#include <sm/optee_smc.h>
15#include <sm/teesmc_opteed.h>
16#include <sm/teesmc_opteed_macros.h>
17
18#include "thread_private.h"
19
20	.macro get_thread_ctx core_local, res, tmp0, tmp1
21		ldr	w\tmp0, [\core_local, \
22				#THREAD_CORE_LOCAL_CURR_THREAD]
23		ldr	x\res, =threads
24		mov	x\tmp1, #THREAD_CTX_SIZE
25		madd	x\res, x\tmp0, x\tmp1, x\res
26	.endm
27
28	.macro b_if_spsr_is_el0 reg, label
29		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
30		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
31		b.eq	\label
32	.endm
33
34LOCAL_FUNC vector_std_smc_entry , :
35	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
36	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
37	mov	x0, sp
38	bl	thread_handle_std_smc
39	/*
40	 * Normally thread_handle_std_smc() should return via
41	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
42	 * hasn't switched stack (error detected) it will do a normal "C"
43	 * return.
44	 */
45	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
46	add	sp, sp, #THREAD_SMC_ARGS_SIZE
47	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
48	smc	#0
49	b	.	/* SMC should not return */
50END_FUNC vector_std_smc_entry
51
52LOCAL_FUNC vector_fast_smc_entry , :
53	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
54	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
55	mov	x0, sp
56	bl	thread_handle_fast_smc
57	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
58	add	sp, sp, #THREAD_SMC_ARGS_SIZE
59	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
60	smc	#0
61	b	.	/* SMC should not return */
62END_FUNC vector_fast_smc_entry
63
64LOCAL_FUNC vector_fiq_entry , :
65	/* Secure Monitor received a FIQ and passed control to us. */
66	bl	thread_check_canaries
67	adr	x16, thread_nintr_handler_ptr
68	ldr	x16, [x16]
69	blr	x16
70	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
71	smc	#0
72	b	.	/* SMC should not return */
73END_FUNC vector_fiq_entry
74
75LOCAL_FUNC vector_cpu_on_entry , :
76	adr	x16, thread_cpu_on_handler_ptr
77	ldr	x16, [x16]
78	blr	x16
79	mov	x1, x0
80	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
81	smc	#0
82	b	.	/* SMC should not return */
83END_FUNC vector_cpu_on_entry
84
85LOCAL_FUNC vector_cpu_off_entry , :
86	adr	x16, thread_cpu_off_handler_ptr
87	ldr	x16, [x16]
88	blr	x16
89	mov	x1, x0
90	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
91	smc	#0
92	b	.	/* SMC should not return */
93END_FUNC vector_cpu_off_entry
94
95LOCAL_FUNC vector_cpu_suspend_entry , :
96	adr	x16, thread_cpu_suspend_handler_ptr
97	ldr	x16, [x16]
98	blr	x16
99	mov	x1, x0
100	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
101	smc	#0
102	b	.	/* SMC should not return */
103END_FUNC vector_cpu_suspend_entry
104
105LOCAL_FUNC vector_cpu_resume_entry , :
106	adr	x16, thread_cpu_resume_handler_ptr
107	ldr	x16, [x16]
108	blr	x16
109	mov	x1, x0
110	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
111	smc	#0
112	b	.	/* SMC should not return */
113END_FUNC vector_cpu_resume_entry
114
115LOCAL_FUNC vector_system_off_entry , :
116	adr	x16, thread_system_off_handler_ptr
117	ldr	x16, [x16]
118	blr	x16
119	mov	x1, x0
120	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
121	smc	#0
122	b	.	/* SMC should not return */
123END_FUNC vector_system_off_entry
124
125LOCAL_FUNC vector_system_reset_entry , :
126	adr	x16, thread_system_reset_handler_ptr
127	ldr	x16, [x16]
128	blr	x16
129	mov	x1, x0
130	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
131	smc	#0
132	b	.	/* SMC should not return */
133END_FUNC vector_system_reset_entry
134
135/*
136 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
137 * initialization.
138 *
139 * Note that ARM-TF depends on the layout of this vector table, any change
140 * in layout has to be synced with ARM-TF.
141 */
142FUNC thread_vector_table , :
143	b	vector_std_smc_entry
144	b	vector_fast_smc_entry
145	b	vector_cpu_on_entry
146	b	vector_cpu_off_entry
147	b	vector_cpu_resume_entry
148	b	vector_cpu_suspend_entry
149	b	vector_fiq_entry
150	b	vector_system_off_entry
151	b	vector_system_reset_entry
152END_FUNC thread_vector_table
153KEEP_PAGER thread_vector_table
154
155
156/* void thread_resume(struct thread_ctx_regs *regs) */
157FUNC thread_resume , :
158	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
159	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
160	mov	sp, x1
161	msr	elr_el1, x2
162	msr	spsr_el1, x3
163
164	b_if_spsr_is_el0 w3, 1f
165
166	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
167	ldr	x0, [x0, THREAD_CTX_REGS_X0]
168	eret
169
1701:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
171	ldr	x0, [x0, THREAD_CTX_REGS_X0]
172
173	msr	spsel, #1
174	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
175	b	eret_to_el0
176END_FUNC thread_resume
177
178FUNC thread_std_smc_entry , :
179	/* pass x0-x7 in a struct thread_smc_args */
180	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
181	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
182	mov	x0, sp
183
184	/* Call the registered handler */
185	bl	__thread_std_smc_entry
186
187	/*
188	 * Load the returned x0-x3 into preserved registers and skip the
189	 * "returned" x4-x7 since they will not be returned to normal
190	 * world.
191	 */
192	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
193	add	sp, sp, #THREAD_SMC_ARGS_SIZE
194
195	/* Mask all maskable exceptions before switching to temporary stack */
196	msr	daifset, #DAIFBIT_ALL
197	bl	thread_get_tmp_sp
198	mov	sp, x0
199
200	bl	thread_state_free
201
202	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
203	mov	x1, x20
204	mov	x2, x21
205	mov	x3, x22
206	mov	x4, x23
207	smc	#0
208	b	.	/* SMC should not return */
209END_FUNC thread_std_smc_entry
210
211/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
212FUNC thread_rpc , :
213	/* Read daif and create an SPSR */
214	mrs	x1, daif
215	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
216
217	/* Mask all maskable exceptions before switching to temporary stack */
218	msr	daifset, #DAIFBIT_ALL
219	push	x0, xzr
220	push	x1, x30
221	bl	thread_get_ctx_regs
222	ldr	x30, [sp, #8]
223	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
224	mov	x19, x0
225
226	bl	thread_get_tmp_sp
227	pop	x1, xzr		/* Match "push x1, x30" above */
228	mov	x2, sp
229	str	x2, [x19, #THREAD_CTX_REGS_SP]
230	ldr	x20, [sp]	/* Get pointer to rv[] */
231	mov	sp, x0		/* Switch to tmp stack */
232	/*
233	 * We need to read rv[] early, because thread_state_suspend
234	 * can invoke virt_unset_guest() which will unmap pages,
235	 * where rv[] resides
236	 */
237	load_wregs x20, 0, 21, 23	/* Load rv[] into w20-w22 */
238
239	adr	x2, .thread_rpc_return
240	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
241	bl	thread_state_suspend
242	mov	x4, x0		/* Supply thread index */
243	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
244	mov	x1, x21
245	mov	x2, x22
246	mov	x3, x23
247	smc	#0
248	b	.		/* SMC should not return */
249
250.thread_rpc_return:
251	/*
252	 * At this point has the stack pointer been restored to the value
253	 * stored in THREAD_CTX above.
254	 *
255	 * Jumps here from thread_resume above when RPC has returned. The
256	 * IRQ and FIQ bits are restored to what they where when this
257	 * function was originally entered.
258	 */
259	pop	x16, xzr	/* Get pointer to rv[] */
260	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
261	ret
262END_FUNC thread_rpc
263KEEP_PAGER thread_rpc
264
265FUNC thread_smc , :
266	smc	#0
267	ret
268END_FUNC thread_smc
269
270FUNC thread_init_vbar , :
271	msr	vbar_el1, x0
272	ret
273END_FUNC thread_init_vbar
274KEEP_PAGER thread_init_vbar
275
276/*
277 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
278 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
279 *               unsigned long user_func, unsigned long spsr,
280 *               uint32_t *exit_status0, uint32_t *exit_status1)
281 *
282 */
283FUNC __thread_enter_user_mode , :
284	ldr	x8, [sp]
285	/*
286	 * Create the and fill in the struct thread_user_mode_rec
287	 */
288	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
289	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
290	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
291
292	/*
293	 * Switch to SP_EL1
294	 * Disable exceptions
295	 * Save kern sp in x19
296	 */
297	msr	daifset, #DAIFBIT_ALL
298	mov	x19, sp
299	msr	spsel, #1
300
301	/*
302	 * Save the kernel stack pointer in the thread context
303	 */
304	/* get pointer to current thread context */
305	get_thread_ctx sp, 21, 20, 22
306	/*
307	 * Save kernel stack pointer to ensure that el0_svc() uses
308	 * correct stack pointer
309	 */
310	str	x19, [x21, #THREAD_CTX_KERN_SP]
311
312	/*
313	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
314	 */
315	msr	spsr_el1, x6
316	/* Set user sp */
317	mov	x13, x4		/* Used when running TA in Aarch32 */
318	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
319	/* Set user function */
320	msr	elr_el1, x5
321	/* Set frame pointer (user stack can't be unwound past this point) */
322	mov x29, #0
323
324	/* Jump into user mode */
325	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
326	b eret_to_el0
327END_FUNC __thread_enter_user_mode
328KEEP_PAGER __thread_enter_user_mode
329
330/*
331 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
332 * 		uint32_t exit_status1);
333 * See description in thread.h
334 */
335FUNC thread_unwind_user_mode , :
336	/* Store the exit status */
337	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
338	str	w1, [x3]
339	str	w2, [x4]
340	/* Restore x19..x30 */
341	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
342	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
343	/* Return from the call of thread_enter_user_mode() */
344	ret
345END_FUNC thread_unwind_user_mode
346
347	/*
348	 * This macro verifies that the a given vector doesn't exceed the
349	 * architectural limit of 32 instructions. This is meant to be placed
350	 * immedately after the last instruction in the vector. It takes the
351	 * vector entry as the parameter
352	 */
353	.macro check_vector_size since
354	  .if (. - \since) > (32 * 4)
355	    .error "Vector exceeds 32 instructions"
356	  .endif
357	.endm
358
359	.macro restore_mapping
360#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
361		/* Temporarily save x0, x1 */
362		msr	tpidr_el1, x0
363		msr	tpidrro_el0, x1
364
365		/* Update the mapping to use the full kernel mapping */
366		mrs	x0, ttbr0_el1
367		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
368		/* switch to kernel mode ASID */
369		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
370		msr	ttbr0_el1, x0
371		isb
372
373		/* Jump into the full mapping and continue execution */
374		ldr	x0, =1f
375		br	x0
376	1:
377
378		/* Point to the vector into the full mapping */
379		adr	x0, thread_user_kcode_offset
380		ldr	x0, [x0]
381		mrs	x1, vbar_el1
382		add	x1, x1, x0
383		msr	vbar_el1, x1
384		isb
385
386#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
387		/*
388		 * Update the SP with thread_user_kdata_sp_offset as
389		 * described in init_user_kcode().
390		 */
391		adr	x0, thread_user_kdata_sp_offset
392		ldr	x0, [x0]
393		add	sp, sp, x0
394#endif
395
396		/* Restore x0, x1 */
397		mrs	x0, tpidr_el1
398		mrs	x1, tpidrro_el0
399		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
400#else
401		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
402		mrs	x0, ttbr0_el1
403		/* switch to kernel mode ASID */
404		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
405		msr	ttbr0_el1, x0
406		isb
407#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
408	.endm
409
410#define INV_INSN	0
411	.section .text.thread_excp_vect
412	.align	11, INV_INSN
413FUNC thread_excp_vect , :
414	/* -----------------------------------------------------
415	 * EL1 with SP0 : 0x0 - 0x180
416	 * -----------------------------------------------------
417	 */
418	.align	7, INV_INSN
419el1_sync_sp0:
420	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
421	b	el1_sync_abort
422	check_vector_size el1_sync_sp0
423
424	.align	7, INV_INSN
425el1_irq_sp0:
426	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
427	b	elx_irq
428	check_vector_size el1_irq_sp0
429
430	.align	7, INV_INSN
431el1_fiq_sp0:
432	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
433	b	elx_fiq
434	check_vector_size el1_fiq_sp0
435
436	.align	7, INV_INSN
437el1_serror_sp0:
438	b	el1_serror_sp0
439	check_vector_size el1_serror_sp0
440
441	/* -----------------------------------------------------
442	 * Current EL with SP1: 0x200 - 0x380
443	 * -----------------------------------------------------
444	 */
445	.align	7, INV_INSN
446el1_sync_sp1:
447	b	el1_sync_sp1
448	check_vector_size el1_sync_sp1
449
450	.align	7, INV_INSN
451el1_irq_sp1:
452	b	el1_irq_sp1
453	check_vector_size el1_irq_sp1
454
455	.align	7, INV_INSN
456el1_fiq_sp1:
457	b	el1_fiq_sp1
458	check_vector_size el1_fiq_sp1
459
460	.align	7, INV_INSN
461el1_serror_sp1:
462	b	el1_serror_sp1
463	check_vector_size el1_serror_sp1
464
465	/* -----------------------------------------------------
466	 * Lower EL using AArch64 : 0x400 - 0x580
467	 * -----------------------------------------------------
468	 */
469	.align	7, INV_INSN
470el0_sync_a64:
471	restore_mapping
472
473	mrs	x2, esr_el1
474	mrs	x3, sp_el0
475	lsr	x2, x2, #ESR_EC_SHIFT
476	cmp	x2, #ESR_EC_AARCH64_SVC
477	b.eq	el0_svc
478	b	el0_sync_abort
479	check_vector_size el0_sync_a64
480
481	.align	7, INV_INSN
482el0_irq_a64:
483	restore_mapping
484
485	b	elx_irq
486	check_vector_size el0_irq_a64
487
488	.align	7, INV_INSN
489el0_fiq_a64:
490	restore_mapping
491
492	b	elx_fiq
493	check_vector_size el0_fiq_a64
494
495	.align	7, INV_INSN
496el0_serror_a64:
497	b   	el0_serror_a64
498	check_vector_size el0_serror_a64
499
500	/* -----------------------------------------------------
501	 * Lower EL using AArch32 : 0x0 - 0x180
502	 * -----------------------------------------------------
503	 */
504	.align	7, INV_INSN
505el0_sync_a32:
506	restore_mapping
507
508	mrs	x2, esr_el1
509	mrs	x3, sp_el0
510	lsr	x2, x2, #ESR_EC_SHIFT
511	cmp	x2, #ESR_EC_AARCH32_SVC
512	b.eq	el0_svc
513	b	el0_sync_abort
514	check_vector_size el0_sync_a32
515
516	.align	7, INV_INSN
517el0_irq_a32:
518	restore_mapping
519
520	b	elx_irq
521	check_vector_size el0_irq_a32
522
523	.align	7, INV_INSN
524el0_fiq_a32:
525	restore_mapping
526
527	b	elx_fiq
528	check_vector_size el0_fiq_a32
529
530	.align	7, INV_INSN
531el0_serror_a32:
532	b	el0_serror_a32
533	check_vector_size el0_serror_a32
534
535#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
536	.macro invalidate_branch_predictor
537		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
538		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
539		smc	#0
540		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
541	.endm
542
543	.align	11, INV_INSN
544	.global thread_excp_vect_workaround
545thread_excp_vect_workaround:
546	/* -----------------------------------------------------
547	 * EL1 with SP0 : 0x0 - 0x180
548	 * -----------------------------------------------------
549	 */
550	.align	7, INV_INSN
551workaround_el1_sync_sp0:
552	b	el1_sync_sp0
553	check_vector_size workaround_el1_sync_sp0
554
555	.align	7, INV_INSN
556workaround_el1_irq_sp0:
557	b	el1_irq_sp0
558	check_vector_size workaround_el1_irq_sp0
559
560	.align	7, INV_INSN
561workaround_el1_fiq_sp0:
562	b	el1_fiq_sp0
563	check_vector_size workaround_el1_fiq_sp0
564
565	.align	7, INV_INSN
566workaround_el1_serror_sp0:
567	b	el1_serror_sp0
568	check_vector_size workaround_el1_serror_sp0
569
570	/* -----------------------------------------------------
571	 * Current EL with SP1: 0x200 - 0x380
572	 * -----------------------------------------------------
573	 */
574	.align	7, INV_INSN
575workaround_el1_sync_sp1:
576	b	workaround_el1_sync_sp1
577	check_vector_size workaround_el1_sync_sp1
578
579	.align	7, INV_INSN
580workaround_el1_irq_sp1:
581	b	workaround_el1_irq_sp1
582	check_vector_size workaround_el1_irq_sp1
583
584	.align	7, INV_INSN
585workaround_el1_fiq_sp1:
586	b	workaround_el1_fiq_sp1
587	check_vector_size workaround_el1_fiq_sp1
588
589	.align	7, INV_INSN
590workaround_el1_serror_sp1:
591	b	workaround_el1_serror_sp1
592	check_vector_size workaround_el1_serror_sp1
593
594	/* -----------------------------------------------------
595	 * Lower EL using AArch64 : 0x400 - 0x580
596	 * -----------------------------------------------------
597	 */
598	.align	7, INV_INSN
599workaround_el0_sync_a64:
600	invalidate_branch_predictor
601	b	el0_sync_a64
602	check_vector_size workaround_el0_sync_a64
603
604	.align	7, INV_INSN
605workaround_el0_irq_a64:
606	invalidate_branch_predictor
607	b	el0_irq_a64
608	check_vector_size workaround_el0_irq_a64
609
610	.align	7, INV_INSN
611workaround_el0_fiq_a64:
612	invalidate_branch_predictor
613	b	el0_fiq_a64
614	check_vector_size workaround_el0_fiq_a64
615
616	.align	7, INV_INSN
617workaround_el0_serror_a64:
618	b   	workaround_el0_serror_a64
619	check_vector_size workaround_el0_serror_a64
620
621	/* -----------------------------------------------------
622	 * Lower EL using AArch32 : 0x0 - 0x180
623	 * -----------------------------------------------------
624	 */
625	.align	7, INV_INSN
626workaround_el0_sync_a32:
627	invalidate_branch_predictor
628	b	el0_sync_a32
629	check_vector_size workaround_el0_sync_a32
630
631	.align	7, INV_INSN
632workaround_el0_irq_a32:
633	invalidate_branch_predictor
634	b	el0_irq_a32
635	check_vector_size workaround_el0_irq_a32
636
637	.align	7, INV_INSN
638workaround_el0_fiq_a32:
639	invalidate_branch_predictor
640	b	el0_fiq_a32
641	check_vector_size workaround_el0_fiq_a32
642
643	.align	7, INV_INSN
644workaround_el0_serror_a32:
645	b	workaround_el0_serror_a32
646	check_vector_size workaround_el0_serror_a32
647#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
648
649/*
650 * We're keeping this code in the same section as the vector to make sure
651 * that it's always available.
652 */
653eret_to_el0:
654
655#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
656	/* Point to the vector into the reduced mapping */
657	adr	x0, thread_user_kcode_offset
658	ldr	x0, [x0]
659	mrs	x1, vbar_el1
660	sub	x1, x1, x0
661	msr	vbar_el1, x1
662	isb
663
664#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
665	/* Store the SP offset in tpidr_el1 to be used below to update SP */
666	adr	x1, thread_user_kdata_sp_offset
667	ldr	x1, [x1]
668	msr	tpidr_el1, x1
669#endif
670
671	/* Jump into the reduced mapping and continue execution */
672	ldr	x1, =1f
673	sub	x1, x1, x0
674	br	x1
6751:
676
677	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
678	msr	tpidrro_el0, x0
679
680	/* Update the mapping to exclude the full kernel mapping */
681	mrs	x0, ttbr0_el1
682	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
683	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
684	msr	ttbr0_el1, x0
685	isb
686
687#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
688	/*
689	 * Update the SP with thread_user_kdata_sp_offset as described in
690	 * init_user_kcode().
691	 */
692	mrs	x0, tpidr_el1
693	sub	sp, sp, x0
694#endif
695
696	mrs	x0, tpidrro_el0
697#else
698	mrs	x0, ttbr0_el1
699	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
700	msr	ttbr0_el1, x0
701	isb
702	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
703#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
704
705	eret
706
707	/*
708	 * void icache_inv_user_range(void *addr, size_t size);
709	 *
710	 * This function has to execute with the user space ASID active,
711	 * this means executing with reduced mapping and the code needs
712	 * to be located here together with the vector.
713	 */
714	.global icache_inv_user_range
715	.type icache_inv_user_range , %function
716icache_inv_user_range:
717	/* Mask all exceptions */
718	mrs	x6, daif	/* this register must be preserved */
719	msr	daifset, #DAIFBIT_ALL
720
721#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
722	/* Point to the vector into the reduced mapping */
723	adr	x2, thread_user_kcode_offset
724	ldr	x2, [x2]
725	mrs	x4, vbar_el1	/* this register must be preserved */
726	sub	x3, x4, x2
727	msr	vbar_el1, x3
728	isb
729
730	/* Jump into the reduced mapping and continue execution */
731	ldr	x3, =1f
732	sub	x3, x3, x2
733	br	x3
7341:
735
736	/* Update the mapping to exclude the full kernel mapping */
737	mrs	x5, ttbr0_el1	/* this register must be preserved */
738	add	x2, x5, #CORE_MMU_L1_TBL_OFFSET
739	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
740	msr	ttbr0_el1, x2
741	isb
742
743#else
744	mrs	x5, ttbr0_el1	/* this register must be preserved */
745	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
746	msr	ttbr0_el1, x2
747	isb
748#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
749
750	/*
751	 * Do the actual icache invalidation
752	 */
753
754	/* Calculate minimum icache line size, result in x2 */
755	mrs	x3, ctr_el0
756	and	x3, x3, #CTR_IMINLINE_MASK
757	mov	x2, #CTR_WORD_SIZE
758	lsl	x2, x2, x3
759
760	add	x1, x0, x1
761	sub	x3, x2, #1
762	bic	x0, x0, x3
7631:
764	ic	ivau, x0
765	add	x0, x0, x2
766	cmp	x0, x1
767	b.lo    1b
768	dsb	ish
769
770#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
771	/* Update the mapping to use the full kernel mapping and ASID */
772	msr	ttbr0_el1, x5
773	isb
774
775	/* Jump into the full mapping and continue execution */
776	ldr	x0, =1f
777	br	x0
7781:
779
780	/* Point to the vector into the full mapping */
781	msr	vbar_el1, x4
782	isb
783#else
784	/* switch to kernel mode ASID */
785	msr	ttbr0_el1, x5
786	isb
787#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
788
789	msr	daif, x6	/* restore exceptions */
790	ret	/* End of icache_inv_user_range() */
791
792	/*
793	 * Make sure that literals are placed before the
794	 * thread_excp_vect_end label.
795	 */
796	.pool
797	.global thread_excp_vect_end
798thread_excp_vect_end:
799END_FUNC thread_excp_vect
800
801LOCAL_FUNC el0_svc , :
802	/* get pointer to current thread context in x0 */
803	get_thread_ctx sp, 0, 1, 2
804	/* load saved kernel sp */
805	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
806	/* Keep pointer to initial recod in x1 */
807	mov	x1, sp
808	/* Switch to SP_EL0 and restore kernel sp */
809	msr	spsel, #0
810	mov	x2, sp	/* Save SP_EL0 */
811	mov	sp, x0
812
813	/* Make room for struct thread_svc_regs */
814	sub	sp, sp, #THREAD_SVC_REG_SIZE
815	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
816
817	/* Restore x0-x3 */
818	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
819	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
820
821	/* Prepare the argument for the handler */
822	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
823	mrs	x0, elr_el1
824	mrs	x1, spsr_el1
825	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
826	mov	x0, sp
827
828	/*
829	 * Unmask native interrupts, Serror, and debug exceptions since we have
830	 * nothing left in sp_el1. Note that the SVC handler is excepted to
831	 * re-enable foreign interrupts by itself.
832	 */
833#if defined(CFG_ARM_GICV3)
834	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
835#else
836	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
837#endif
838
839	/* Call the handler */
840	bl	tee_svc_handler
841
842	/* Mask all maskable exceptions since we're switching back to sp_el1 */
843	msr	daifset, #DAIFBIT_ALL
844
845	/*
846	 * Save kernel sp we'll had at the beginning of this function.
847	 * This is when this TA has called another TA because
848	 * __thread_enter_user_mode() also saves the stack pointer in this
849	 * field.
850	 */
851	msr	spsel, #1
852	get_thread_ctx sp, 0, 1, 2
853	msr	spsel, #0
854	add	x1, sp, #THREAD_SVC_REG_SIZE
855	str	x1, [x0, #THREAD_CTX_KERN_SP]
856
857	/* Restore registers to the required state and return*/
858	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
859	msr	elr_el1, x0
860	msr	spsr_el1, x1
861	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
862	mov	x30, sp
863	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
864	mov	sp, x0
865	b_if_spsr_is_el0 w1, 1f
866	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
867	ldr	x30, [x30, #THREAD_SVC_REG_X30]
868
869	eret
870
8711:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
872	ldr	x30, [x30, #THREAD_SVC_REG_X30]
873
874	msr	spsel, #1
875	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
876	b	eret_to_el0
877END_FUNC el0_svc
878
879LOCAL_FUNC el1_sync_abort , :
880	mov	x0, sp
881	msr	spsel, #0
882	mov	x3, sp		/* Save original sp */
883
884	/*
885	 * Update core local flags.
886	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
887	 */
888	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
889	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
890	orr	w1, w1, #THREAD_CLF_ABORT
891	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
892			.Lsel_tmp_sp
893
894	/* Select abort stack */
895	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
896	b	.Lset_sp
897
898.Lsel_tmp_sp:
899	/* Select tmp stack */
900	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
901	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
902
903.Lset_sp:
904	mov	sp, x2
905	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
906
907	/*
908	 * Save state on stack
909	 */
910	sub	sp, sp, #THREAD_ABT_REGS_SIZE
911	mrs	x2, spsr_el1
912	/* Store spsr, sp_el0 */
913	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
914	/* Store original x0, x1 */
915	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
916	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
917	/* Store original x2, x3 and x4 to x29 */
918	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
919	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
920	/* Store x30, elr_el1 */
921	mrs	x0, elr_el1
922	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
923
924	/*
925	 * Call handler
926	 */
927	mov	x0, #0
928	mov	x1, sp
929	bl	abort_handler
930
931	/*
932	 * Restore state from stack
933	 */
934	/* Load x30, elr_el1 */
935	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
936	msr	elr_el1, x0
937	/* Load x0 to x29 */
938	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
939	/* Switch to SP_EL1 */
940	msr	spsel, #1
941	/* Save x0 to x3 in CORE_LOCAL */
942	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
943	/* Restore spsr_el1 and sp_el0 */
944	mrs	x3, sp_el0
945	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
946	msr	spsr_el1, x0
947	msr	sp_el0, x1
948
949	/* Update core local flags */
950	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
951	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
952	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
953
954	/* Restore x0 to x3 */
955	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
956
957	/* Return from exception */
958	eret
959END_FUNC el1_sync_abort
960
961	/* sp_el0 in x3 */
962LOCAL_FUNC el0_sync_abort , :
963	/*
964	 * Update core local flags
965	 */
966	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
967	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
968	orr	w1, w1, #THREAD_CLF_ABORT
969	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
970
971	/*
972	 * Save state on stack
973	 */
974
975	/* load abt_stack_va_end */
976	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
977	/* Keep pointer to initial record in x0 */
978	mov	x0, sp
979	/* Switch to SP_EL0 */
980	msr	spsel, #0
981	mov	sp, x1
982	sub	sp, sp, #THREAD_ABT_REGS_SIZE
983	mrs	x2, spsr_el1
984	/* Store spsr, sp_el0 */
985	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
986	/* Store original x0, x1 */
987	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
988	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
989	/* Store original x2, x3 and x4 to x29 */
990	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
991	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
992	/* Store x30, elr_el1 */
993	mrs	x0, elr_el1
994	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
995
996	/*
997	 * Call handler
998	 */
999	mov	x0, #0
1000	mov	x1, sp
1001	bl	abort_handler
1002
1003	/*
1004	 * Restore state from stack
1005	 */
1006
1007	/* Load x30, elr_el1 */
1008	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
1009	msr	elr_el1, x0
1010	/* Load x0 to x29 */
1011	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
1012	/* Switch to SP_EL1 */
1013	msr	spsel, #1
1014	/* Save x0 to x3 in EL1_REC */
1015	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
1016	/* Restore spsr_el1 and sp_el0 */
1017	mrs	x3, sp_el0
1018	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
1019	msr	spsr_el1, x0
1020	msr	sp_el0, x1
1021
1022	/* Update core local flags */
1023	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1024	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
1025	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1026
1027	/* Restore x2 to x3 */
1028	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1029
1030	b_if_spsr_is_el0 w0, 1f
1031
1032	/* Restore x0 to x1 */
1033	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1034
1035	/* Return from exception */
1036	eret
10371:	b	eret_to_el0
1038END_FUNC el0_sync_abort
1039
1040/* The handler of foreign interrupt. */
1041.macro foreign_intr_handler mode:req
1042	/*
1043	 * Update core local flags
1044	 */
1045	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1046	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1047	orr	w1, w1, #THREAD_CLF_TMP
1048	.ifc	\mode\(),fiq
1049	orr	w1, w1, #THREAD_CLF_FIQ
1050	.else
1051	orr	w1, w1, #THREAD_CLF_IRQ
1052	.endif
1053	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1054
1055	/* get pointer to current thread context in x0 */
1056	get_thread_ctx sp, 0, 1, 2
1057	/* Keep original SP_EL0 */
1058	mrs	x2, sp_el0
1059
1060	/* Store original sp_el0 */
1061	str	x2, [x0, #THREAD_CTX_REGS_SP]
1062	/* store x4..x30 */
1063	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
1064	/* Load original x0..x3 into x10..x13 */
1065	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
1066	/* Save original x0..x3 */
1067	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
1068
1069	/* load tmp_stack_va_end */
1070	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1071	/* Switch to SP_EL0 */
1072	msr	spsel, #0
1073	mov	sp, x1
1074
1075	/*
1076	 * Mark current thread as suspended
1077	 */
1078	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
1079	mrs	x1, spsr_el1
1080	mrs	x2, elr_el1
1081	bl	thread_state_suspend
1082	mov	w4, w0		/* Supply thread index */
1083
1084	/* Update core local flags */
1085	/* Switch to SP_EL1 */
1086	msr	spsel, #1
1087	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1088	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1089	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1090	msr	spsel, #0
1091
1092	/*
1093	 * Note that we're exiting with SP_EL0 selected since the entry
1094	 * functions expects to have SP_EL0 selected with the tmp stack
1095	 * set.
1096	 */
1097
1098	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
1099	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
1100	mov	w2, #0
1101	mov	w3, #0
1102	/* w4 is already filled in above */
1103	smc	#0
1104	b	.	/* SMC should not return */
1105.endm
1106
1107/*
1108 * This struct is never used from C it's only here to visualize the
1109 * layout.
1110 *
1111 * struct elx_nintr_rec {
1112 * 	uint64_t x[19 - 4]; x4..x18
1113 * 	uint64_t lr;
1114 * 	uint64_t sp_el0;
1115 * };
1116 */
1117#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1118#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1119#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1120#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1121
1122/* The handler of native interrupt. */
1123.macro native_intr_handler mode:req
1124	/*
1125	 * Update core local flags
1126	 */
1127	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1128	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1129	.ifc	\mode\(),fiq
1130	orr	w1, w1, #THREAD_CLF_FIQ
1131	.else
1132	orr	w1, w1, #THREAD_CLF_IRQ
1133	.endif
1134	orr	w1, w1, #THREAD_CLF_TMP
1135	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1136
1137	/* load tmp_stack_va_end */
1138	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1139	/* Keep original SP_EL0 */
1140	mrs	x2, sp_el0
1141	/* Switch to SP_EL0 */
1142	msr	spsel, #0
1143	mov	sp, x1
1144
1145	/*
1146	 * Save registers on stack that can be corrupted by a call to
1147	 * a C function
1148	 */
1149	/* Make room for struct elx_nintr_rec */
1150	sub	sp, sp, #ELX_NINTR_REC_SIZE
1151	/* Store x4..x18 */
1152	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1153	/* Store lr and original sp_el0 */
1154	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1155
1156	bl	thread_check_canaries
1157	adr	x16, thread_nintr_handler_ptr
1158	ldr	x16, [x16]
1159	blr	x16
1160
1161	/*
1162	 * Restore registers
1163	 */
1164	/* Restore x4..x18 */
1165	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1166	/* Load  lr and original sp_el0 */
1167	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1168	/* Restore SP_El0 */
1169	mov	sp, x2
1170	/* Switch back to SP_EL1 */
1171	msr	spsel, #1
1172
1173	/* Update core local flags */
1174	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1175	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1176	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1177
1178	mrs	x0, spsr_el1
1179	/* Restore x2..x3 */
1180	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1181	b_if_spsr_is_el0 w0, 1f
1182
1183	/* Restore x0..x1 */
1184	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1185
1186	/* Return from exception */
1187	eret
11881:	b	eret_to_el0
1189.endm
1190
1191LOCAL_FUNC elx_irq , :
1192#if defined(CFG_ARM_GICV3)
1193	native_intr_handler	irq
1194#else
1195	foreign_intr_handler	irq
1196#endif
1197END_FUNC elx_irq
1198
1199LOCAL_FUNC elx_fiq , :
1200#if defined(CFG_ARM_GICV3)
1201	foreign_intr_handler	fiq
1202#else
1203	native_intr_handler	fiq
1204#endif
1205END_FUNC elx_fiq
1206