xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision c96bced4504bd39ff8ccc57a21d29fa97aff4506)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread_defs.h>
13#include <mm/core_mmu.h>
14#include <smccc.h>
15#include <sm/optee_smc.h>
16#include <sm/teesmc_opteed.h>
17#include <sm/teesmc_opteed_macros.h>
18
19#include "thread_private.h"
20
21	.macro get_thread_ctx core_local, res, tmp0, tmp1
22		ldr	w\tmp0, [\core_local, \
23				#THREAD_CORE_LOCAL_CURR_THREAD]
24		ldr	x\res, =threads
25		mov	x\tmp1, #THREAD_CTX_SIZE
26		madd	x\res, x\tmp0, x\tmp1, x\res
27	.endm
28
29	.macro b_if_spsr_is_el0 reg, label
30		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
31		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
32		b.eq	\label
33	.endm
34
35LOCAL_FUNC vector_std_smc_entry , :
36	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
37	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
38	mov	x0, sp
39	bl	thread_handle_std_smc
40	/*
41	 * Normally thread_handle_std_smc() should return via
42	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
43	 * hasn't switched stack (error detected) it will do a normal "C"
44	 * return.
45	 */
46	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
47	add	sp, sp, #THREAD_SMC_ARGS_SIZE
48	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
49	smc	#0
50	b	.	/* SMC should not return */
51END_FUNC vector_std_smc_entry
52
53LOCAL_FUNC vector_fast_smc_entry , :
54	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
55	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
56	mov	x0, sp
57	bl	thread_handle_fast_smc
58	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
59	add	sp, sp, #THREAD_SMC_ARGS_SIZE
60	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
61	smc	#0
62	b	.	/* SMC should not return */
63END_FUNC vector_fast_smc_entry
64
65LOCAL_FUNC vector_fiq_entry , :
66	/* Secure Monitor received a FIQ and passed control to us. */
67	bl	thread_check_canaries
68	adr	x16, thread_nintr_handler_ptr
69	ldr	x16, [x16]
70	blr	x16
71	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
72	smc	#0
73	b	.	/* SMC should not return */
74END_FUNC vector_fiq_entry
75
76LOCAL_FUNC vector_cpu_on_entry , :
77	adr	x16, thread_cpu_on_handler_ptr
78	ldr	x16, [x16]
79	blr	x16
80	mov	x1, x0
81	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
82	smc	#0
83	b	.	/* SMC should not return */
84END_FUNC vector_cpu_on_entry
85
86LOCAL_FUNC vector_cpu_off_entry , :
87	adr	x16, thread_cpu_off_handler_ptr
88	ldr	x16, [x16]
89	blr	x16
90	mov	x1, x0
91	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
92	smc	#0
93	b	.	/* SMC should not return */
94END_FUNC vector_cpu_off_entry
95
96LOCAL_FUNC vector_cpu_suspend_entry , :
97	adr	x16, thread_cpu_suspend_handler_ptr
98	ldr	x16, [x16]
99	blr	x16
100	mov	x1, x0
101	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
102	smc	#0
103	b	.	/* SMC should not return */
104END_FUNC vector_cpu_suspend_entry
105
106LOCAL_FUNC vector_cpu_resume_entry , :
107	adr	x16, thread_cpu_resume_handler_ptr
108	ldr	x16, [x16]
109	blr	x16
110	mov	x1, x0
111	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
112	smc	#0
113	b	.	/* SMC should not return */
114END_FUNC vector_cpu_resume_entry
115
116LOCAL_FUNC vector_system_off_entry , :
117	adr	x16, thread_system_off_handler_ptr
118	ldr	x16, [x16]
119	blr	x16
120	mov	x1, x0
121	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
122	smc	#0
123	b	.	/* SMC should not return */
124END_FUNC vector_system_off_entry
125
126LOCAL_FUNC vector_system_reset_entry , :
127	adr	x16, thread_system_reset_handler_ptr
128	ldr	x16, [x16]
129	blr	x16
130	mov	x1, x0
131	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
132	smc	#0
133	b	.	/* SMC should not return */
134END_FUNC vector_system_reset_entry
135
136/*
137 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
138 * initialization.
139 *
140 * Note that ARM-TF depends on the layout of this vector table, any change
141 * in layout has to be synced with ARM-TF.
142 */
143FUNC thread_vector_table , :
144	b	vector_std_smc_entry
145	b	vector_fast_smc_entry
146	b	vector_cpu_on_entry
147	b	vector_cpu_off_entry
148	b	vector_cpu_resume_entry
149	b	vector_cpu_suspend_entry
150	b	vector_fiq_entry
151	b	vector_system_off_entry
152	b	vector_system_reset_entry
153END_FUNC thread_vector_table
154KEEP_PAGER thread_vector_table
155
156
157/* void thread_resume(struct thread_ctx_regs *regs) */
158FUNC thread_resume , :
159	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
160	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
161	mov	sp, x1
162	msr	elr_el1, x2
163	msr	spsr_el1, x3
164
165	b_if_spsr_is_el0 w3, 1f
166
167	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
168	ldr	x0, [x0, THREAD_CTX_REGS_X0]
169	eret
170
1711:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
172	ldr	x0, [x0, THREAD_CTX_REGS_X0]
173
174	msr	spsel, #1
175	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
176	b	eret_to_el0
177END_FUNC thread_resume
178
179FUNC thread_std_smc_entry , :
180	/* pass x0-x7 in a struct thread_smc_args */
181	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
182	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
183	mov	x0, sp
184
185	/* Call the registered handler */
186	bl	__thread_std_smc_entry
187
188	/*
189	 * Load the returned x0-x3 into preserved registers and skip the
190	 * "returned" x4-x7 since they will not be returned to normal
191	 * world.
192	 */
193	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
194	add	sp, sp, #THREAD_SMC_ARGS_SIZE
195
196	/* Mask all maskable exceptions before switching to temporary stack */
197	msr	daifset, #DAIFBIT_ALL
198	bl	thread_get_tmp_sp
199	mov	sp, x0
200
201	bl	thread_state_free
202
203	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
204	mov	x1, x20
205	mov	x2, x21
206	mov	x3, x22
207	mov	x4, x23
208	smc	#0
209	b	.	/* SMC should not return */
210END_FUNC thread_std_smc_entry
211
212/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
213FUNC thread_rpc , :
214	/* Read daif and create an SPSR */
215	mrs	x1, daif
216	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
217
218	/* Mask all maskable exceptions before switching to temporary stack */
219	msr	daifset, #DAIFBIT_ALL
220	push	x0, xzr
221	push	x1, x30
222	bl	thread_get_ctx_regs
223	ldr	x30, [sp, #8]
224	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
225	mov	x19, x0
226
227	bl	thread_get_tmp_sp
228	pop	x1, xzr		/* Match "push x1, x30" above */
229	mov	x2, sp
230	str	x2, [x19, #THREAD_CTX_REGS_SP]
231	ldr	x20, [sp]	/* Get pointer to rv[] */
232	mov	sp, x0		/* Switch to tmp stack */
233	/*
234	 * We need to read rv[] early, because thread_state_suspend
235	 * can invoke virt_unset_guest() which will unmap pages,
236	 * where rv[] resides
237	 */
238	load_wregs x20, 0, 21, 23	/* Load rv[] into w20-w22 */
239
240	adr	x2, .thread_rpc_return
241	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
242	bl	thread_state_suspend
243	mov	x4, x0		/* Supply thread index */
244	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
245	mov	x1, x21
246	mov	x2, x22
247	mov	x3, x23
248	smc	#0
249	b	.		/* SMC should not return */
250
251.thread_rpc_return:
252	/*
253	 * At this point has the stack pointer been restored to the value
254	 * stored in THREAD_CTX above.
255	 *
256	 * Jumps here from thread_resume above when RPC has returned. The
257	 * IRQ and FIQ bits are restored to what they where when this
258	 * function was originally entered.
259	 */
260	pop	x16, xzr	/* Get pointer to rv[] */
261	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
262	ret
263END_FUNC thread_rpc
264KEEP_PAGER thread_rpc
265
266FUNC thread_smc , :
267	smc	#0
268	ret
269END_FUNC thread_smc
270
271FUNC thread_init_vbar , :
272	msr	vbar_el1, x0
273	ret
274END_FUNC thread_init_vbar
275KEEP_PAGER thread_init_vbar
276
277/*
278 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
279 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
280 *               unsigned long user_func, unsigned long spsr,
281 *               uint32_t *exit_status0, uint32_t *exit_status1)
282 *
283 */
284FUNC __thread_enter_user_mode , :
285	ldr	x8, [sp]
286	/*
287	 * Create the and fill in the struct thread_user_mode_rec
288	 */
289	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
290	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
291	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
292
293	/*
294	 * Switch to SP_EL1
295	 * Disable exceptions
296	 * Save kern sp in x19
297	 */
298	msr	daifset, #DAIFBIT_ALL
299	mov	x19, sp
300	msr	spsel, #1
301
302	/*
303	 * Save the kernel stack pointer in the thread context
304	 */
305	/* get pointer to current thread context */
306	get_thread_ctx sp, 21, 20, 22
307	/*
308	 * Save kernel stack pointer to ensure that el0_svc() uses
309	 * correct stack pointer
310	 */
311	str	x19, [x21, #THREAD_CTX_KERN_SP]
312
313	/*
314	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
315	 */
316	msr	spsr_el1, x6
317	/* Set user sp */
318	mov	x13, x4		/* Used when running TA in Aarch32 */
319	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
320	/* Set user function */
321	msr	elr_el1, x5
322	/* Set frame pointer (user stack can't be unwound past this point) */
323	mov x29, #0
324
325	/* Jump into user mode */
326	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
327	b eret_to_el0
328END_FUNC __thread_enter_user_mode
329KEEP_PAGER __thread_enter_user_mode
330
331/*
332 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
333 * 		uint32_t exit_status1);
334 * See description in thread.h
335 */
336FUNC thread_unwind_user_mode , :
337	/* Store the exit status */
338	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
339	str	w1, [x3]
340	str	w2, [x4]
341	/* Restore x19..x30 */
342	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
343	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
344	/* Return from the call of thread_enter_user_mode() */
345	ret
346END_FUNC thread_unwind_user_mode
347
348	/*
349	 * This macro verifies that the a given vector doesn't exceed the
350	 * architectural limit of 32 instructions. This is meant to be placed
351	 * immedately after the last instruction in the vector. It takes the
352	 * vector entry as the parameter
353	 */
354	.macro check_vector_size since
355	  .if (. - \since) > (32 * 4)
356	    .error "Vector exceeds 32 instructions"
357	  .endif
358	.endm
359
360	.macro restore_mapping
361#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
362		/* Temporarily save x0, x1 */
363		msr	tpidr_el1, x0
364		msr	tpidrro_el0, x1
365
366		/* Update the mapping to use the full kernel mapping */
367		mrs	x0, ttbr0_el1
368		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
369		/* switch to kernel mode ASID */
370		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
371		msr	ttbr0_el1, x0
372		isb
373
374		/* Jump into the full mapping and continue execution */
375		ldr	x0, =1f
376		br	x0
377	1:
378
379		/* Point to the vector into the full mapping */
380		adr	x0, thread_user_kcode_offset
381		ldr	x0, [x0]
382		mrs	x1, vbar_el1
383		add	x1, x1, x0
384		msr	vbar_el1, x1
385		isb
386
387#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
388		/*
389		 * Update the SP with thread_user_kdata_sp_offset as
390		 * described in init_user_kcode().
391		 */
392		adr	x0, thread_user_kdata_sp_offset
393		ldr	x0, [x0]
394		add	sp, sp, x0
395#endif
396
397		/* Restore x0, x1 */
398		mrs	x0, tpidr_el1
399		mrs	x1, tpidrro_el0
400		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
401#else
402		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
403		mrs	x0, ttbr0_el1
404		/* switch to kernel mode ASID */
405		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
406		msr	ttbr0_el1, x0
407		isb
408#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
409	.endm
410
411#define INV_INSN	0
412	.section .text.thread_excp_vect
413	.align	11, INV_INSN
414FUNC thread_excp_vect , :
415	/* -----------------------------------------------------
416	 * EL1 with SP0 : 0x0 - 0x180
417	 * -----------------------------------------------------
418	 */
419	.align	7, INV_INSN
420el1_sync_sp0:
421	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
422	b	el1_sync_abort
423	check_vector_size el1_sync_sp0
424
425	.align	7, INV_INSN
426el1_irq_sp0:
427	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
428	b	elx_irq
429	check_vector_size el1_irq_sp0
430
431	.align	7, INV_INSN
432el1_fiq_sp0:
433	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
434	b	elx_fiq
435	check_vector_size el1_fiq_sp0
436
437	.align	7, INV_INSN
438el1_serror_sp0:
439	b	el1_serror_sp0
440	check_vector_size el1_serror_sp0
441
442	/* -----------------------------------------------------
443	 * Current EL with SP1: 0x200 - 0x380
444	 * -----------------------------------------------------
445	 */
446	.align	7, INV_INSN
447el1_sync_sp1:
448	b	el1_sync_sp1
449	check_vector_size el1_sync_sp1
450
451	.align	7, INV_INSN
452el1_irq_sp1:
453	b	el1_irq_sp1
454	check_vector_size el1_irq_sp1
455
456	.align	7, INV_INSN
457el1_fiq_sp1:
458	b	el1_fiq_sp1
459	check_vector_size el1_fiq_sp1
460
461	.align	7, INV_INSN
462el1_serror_sp1:
463	b	el1_serror_sp1
464	check_vector_size el1_serror_sp1
465
466	/* -----------------------------------------------------
467	 * Lower EL using AArch64 : 0x400 - 0x580
468	 * -----------------------------------------------------
469	 */
470	.align	7, INV_INSN
471el0_sync_a64:
472	restore_mapping
473
474	mrs	x2, esr_el1
475	mrs	x3, sp_el0
476	lsr	x2, x2, #ESR_EC_SHIFT
477	cmp	x2, #ESR_EC_AARCH64_SVC
478	b.eq	el0_svc
479	b	el0_sync_abort
480	check_vector_size el0_sync_a64
481
482	.align	7, INV_INSN
483el0_irq_a64:
484	restore_mapping
485
486	b	elx_irq
487	check_vector_size el0_irq_a64
488
489	.align	7, INV_INSN
490el0_fiq_a64:
491	restore_mapping
492
493	b	elx_fiq
494	check_vector_size el0_fiq_a64
495
496	.align	7, INV_INSN
497el0_serror_a64:
498	b   	el0_serror_a64
499	check_vector_size el0_serror_a64
500
501	/* -----------------------------------------------------
502	 * Lower EL using AArch32 : 0x0 - 0x180
503	 * -----------------------------------------------------
504	 */
505	.align	7, INV_INSN
506el0_sync_a32:
507	restore_mapping
508
509	mrs	x2, esr_el1
510	mrs	x3, sp_el0
511	lsr	x2, x2, #ESR_EC_SHIFT
512	cmp	x2, #ESR_EC_AARCH32_SVC
513	b.eq	el0_svc
514	b	el0_sync_abort
515	check_vector_size el0_sync_a32
516
517	.align	7, INV_INSN
518el0_irq_a32:
519	restore_mapping
520
521	b	elx_irq
522	check_vector_size el0_irq_a32
523
524	.align	7, INV_INSN
525el0_fiq_a32:
526	restore_mapping
527
528	b	elx_fiq
529	check_vector_size el0_fiq_a32
530
531	.align	7, INV_INSN
532el0_serror_a32:
533	b	el0_serror_a32
534	check_vector_size el0_serror_a32
535
536#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
537	.macro invalidate_branch_predictor
538		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
539		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
540		smc	#0
541		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
542	.endm
543
544	.align	11, INV_INSN
545	.global thread_excp_vect_workaround
546thread_excp_vect_workaround:
547	/* -----------------------------------------------------
548	 * EL1 with SP0 : 0x0 - 0x180
549	 * -----------------------------------------------------
550	 */
551	.align	7, INV_INSN
552workaround_el1_sync_sp0:
553	b	el1_sync_sp0
554	check_vector_size workaround_el1_sync_sp0
555
556	.align	7, INV_INSN
557workaround_el1_irq_sp0:
558	b	el1_irq_sp0
559	check_vector_size workaround_el1_irq_sp0
560
561	.align	7, INV_INSN
562workaround_el1_fiq_sp0:
563	b	el1_fiq_sp0
564	check_vector_size workaround_el1_fiq_sp0
565
566	.align	7, INV_INSN
567workaround_el1_serror_sp0:
568	b	el1_serror_sp0
569	check_vector_size workaround_el1_serror_sp0
570
571	/* -----------------------------------------------------
572	 * Current EL with SP1: 0x200 - 0x380
573	 * -----------------------------------------------------
574	 */
575	.align	7, INV_INSN
576workaround_el1_sync_sp1:
577	b	workaround_el1_sync_sp1
578	check_vector_size workaround_el1_sync_sp1
579
580	.align	7, INV_INSN
581workaround_el1_irq_sp1:
582	b	workaround_el1_irq_sp1
583	check_vector_size workaround_el1_irq_sp1
584
585	.align	7, INV_INSN
586workaround_el1_fiq_sp1:
587	b	workaround_el1_fiq_sp1
588	check_vector_size workaround_el1_fiq_sp1
589
590	.align	7, INV_INSN
591workaround_el1_serror_sp1:
592	b	workaround_el1_serror_sp1
593	check_vector_size workaround_el1_serror_sp1
594
595	/* -----------------------------------------------------
596	 * Lower EL using AArch64 : 0x400 - 0x580
597	 * -----------------------------------------------------
598	 */
599	.align	7, INV_INSN
600workaround_el0_sync_a64:
601	invalidate_branch_predictor
602	b	el0_sync_a64
603	check_vector_size workaround_el0_sync_a64
604
605	.align	7, INV_INSN
606workaround_el0_irq_a64:
607	invalidate_branch_predictor
608	b	el0_irq_a64
609	check_vector_size workaround_el0_irq_a64
610
611	.align	7, INV_INSN
612workaround_el0_fiq_a64:
613	invalidate_branch_predictor
614	b	el0_fiq_a64
615	check_vector_size workaround_el0_fiq_a64
616
617	.align	7, INV_INSN
618workaround_el0_serror_a64:
619	b   	workaround_el0_serror_a64
620	check_vector_size workaround_el0_serror_a64
621
622	/* -----------------------------------------------------
623	 * Lower EL using AArch32 : 0x0 - 0x180
624	 * -----------------------------------------------------
625	 */
626	.align	7, INV_INSN
627workaround_el0_sync_a32:
628	invalidate_branch_predictor
629	b	el0_sync_a32
630	check_vector_size workaround_el0_sync_a32
631
632	.align	7, INV_INSN
633workaround_el0_irq_a32:
634	invalidate_branch_predictor
635	b	el0_irq_a32
636	check_vector_size workaround_el0_irq_a32
637
638	.align	7, INV_INSN
639workaround_el0_fiq_a32:
640	invalidate_branch_predictor
641	b	el0_fiq_a32
642	check_vector_size workaround_el0_fiq_a32
643
644	.align	7, INV_INSN
645workaround_el0_serror_a32:
646	b	workaround_el0_serror_a32
647	check_vector_size workaround_el0_serror_a32
648#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
649
650/*
651 * We're keeping this code in the same section as the vector to make sure
652 * that it's always available.
653 */
654eret_to_el0:
655
656#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
657	/* Point to the vector into the reduced mapping */
658	adr	x0, thread_user_kcode_offset
659	ldr	x0, [x0]
660	mrs	x1, vbar_el1
661	sub	x1, x1, x0
662	msr	vbar_el1, x1
663	isb
664
665#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
666	/* Store the SP offset in tpidr_el1 to be used below to update SP */
667	adr	x1, thread_user_kdata_sp_offset
668	ldr	x1, [x1]
669	msr	tpidr_el1, x1
670#endif
671
672	/* Jump into the reduced mapping and continue execution */
673	ldr	x1, =1f
674	sub	x1, x1, x0
675	br	x1
6761:
677
678	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
679	msr	tpidrro_el0, x0
680
681	/* Update the mapping to exclude the full kernel mapping */
682	mrs	x0, ttbr0_el1
683	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
684	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
685	msr	ttbr0_el1, x0
686	isb
687
688#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
689	/*
690	 * Update the SP with thread_user_kdata_sp_offset as described in
691	 * init_user_kcode().
692	 */
693	mrs	x0, tpidr_el1
694	sub	sp, sp, x0
695#endif
696
697	mrs	x0, tpidrro_el0
698#else
699	mrs	x0, ttbr0_el1
700	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
701	msr	ttbr0_el1, x0
702	isb
703	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
704#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
705
706	eret
707
708	/*
709	 * void icache_inv_user_range(void *addr, size_t size);
710	 *
711	 * This function has to execute with the user space ASID active,
712	 * this means executing with reduced mapping and the code needs
713	 * to be located here together with the vector.
714	 */
715	.global icache_inv_user_range
716	.type icache_inv_user_range , %function
717icache_inv_user_range:
718	/* Mask all exceptions */
719	mrs	x6, daif	/* this register must be preserved */
720	msr	daifset, #DAIFBIT_ALL
721
722#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
723	/* Point to the vector into the reduced mapping */
724	adr	x2, thread_user_kcode_offset
725	ldr	x2, [x2]
726	mrs	x4, vbar_el1	/* this register must be preserved */
727	sub	x3, x4, x2
728	msr	vbar_el1, x3
729	isb
730
731	/* Jump into the reduced mapping and continue execution */
732	ldr	x3, =1f
733	sub	x3, x3, x2
734	br	x3
7351:
736
737	/* Update the mapping to exclude the full kernel mapping */
738	mrs	x5, ttbr0_el1	/* this register must be preserved */
739	add	x2, x5, #CORE_MMU_L1_TBL_OFFSET
740	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
741	msr	ttbr0_el1, x2
742	isb
743
744#else
745	mrs	x5, ttbr0_el1	/* this register must be preserved */
746	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
747	msr	ttbr0_el1, x2
748	isb
749#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
750
751	/*
752	 * Do the actual icache invalidation
753	 */
754
755	/* Calculate minimum icache line size, result in x2 */
756	mrs	x3, ctr_el0
757	and	x3, x3, #CTR_IMINLINE_MASK
758	mov	x2, #CTR_WORD_SIZE
759	lsl	x2, x2, x3
760
761	add	x1, x0, x1
762	sub	x3, x2, #1
763	bic	x0, x0, x3
7641:
765	ic	ivau, x0
766	add	x0, x0, x2
767	cmp	x0, x1
768	b.lo    1b
769	dsb	ish
770
771#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
772	/* Update the mapping to use the full kernel mapping and ASID */
773	msr	ttbr0_el1, x5
774	isb
775
776	/* Jump into the full mapping and continue execution */
777	ldr	x0, =1f
778	br	x0
7791:
780
781	/* Point to the vector into the full mapping */
782	msr	vbar_el1, x4
783	isb
784#else
785	/* switch to kernel mode ASID */
786	msr	ttbr0_el1, x5
787	isb
788#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
789
790	msr	daif, x6	/* restore exceptions */
791	ret	/* End of icache_inv_user_range() */
792
793	/*
794	 * Make sure that literals are placed before the
795	 * thread_excp_vect_end label.
796	 */
797	.pool
798	.global thread_excp_vect_end
799thread_excp_vect_end:
800END_FUNC thread_excp_vect
801
802LOCAL_FUNC el0_svc , :
803	/* get pointer to current thread context in x0 */
804	get_thread_ctx sp, 0, 1, 2
805	/* load saved kernel sp */
806	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
807	/* Keep pointer to initial recod in x1 */
808	mov	x1, sp
809	/* Switch to SP_EL0 and restore kernel sp */
810	msr	spsel, #0
811	mov	x2, sp	/* Save SP_EL0 */
812	mov	sp, x0
813
814	/* Make room for struct thread_svc_regs */
815	sub	sp, sp, #THREAD_SVC_REG_SIZE
816	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
817
818	/* Restore x0-x3 */
819	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
820	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
821
822	/* Prepare the argument for the handler */
823	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
824	mrs	x0, elr_el1
825	mrs	x1, spsr_el1
826	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
827	mov	x0, sp
828
829	/*
830	 * Unmask native interrupts, Serror, and debug exceptions since we have
831	 * nothing left in sp_el1. Note that the SVC handler is excepted to
832	 * re-enable foreign interrupts by itself.
833	 */
834#if defined(CFG_ARM_GICV3)
835	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
836#else
837	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
838#endif
839
840	/* Call the handler */
841	bl	tee_svc_handler
842
843	/* Mask all maskable exceptions since we're switching back to sp_el1 */
844	msr	daifset, #DAIFBIT_ALL
845
846	/*
847	 * Save kernel sp we'll had at the beginning of this function.
848	 * This is when this TA has called another TA because
849	 * __thread_enter_user_mode() also saves the stack pointer in this
850	 * field.
851	 */
852	msr	spsel, #1
853	get_thread_ctx sp, 0, 1, 2
854	msr	spsel, #0
855	add	x1, sp, #THREAD_SVC_REG_SIZE
856	str	x1, [x0, #THREAD_CTX_KERN_SP]
857
858	/* Restore registers to the required state and return*/
859	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
860	msr	elr_el1, x0
861	msr	spsr_el1, x1
862	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
863	mov	x30, sp
864	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
865	mov	sp, x0
866	b_if_spsr_is_el0 w1, 1f
867	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
868	ldr	x30, [x30, #THREAD_SVC_REG_X30]
869
870	eret
871
8721:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
873	ldr	x30, [x30, #THREAD_SVC_REG_X30]
874
875	msr	spsel, #1
876	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
877	b	eret_to_el0
878END_FUNC el0_svc
879
880LOCAL_FUNC el1_sync_abort , :
881	mov	x0, sp
882	msr	spsel, #0
883	mov	x3, sp		/* Save original sp */
884
885	/*
886	 * Update core local flags.
887	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
888	 */
889	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
890	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
891	orr	w1, w1, #THREAD_CLF_ABORT
892	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
893			.Lsel_tmp_sp
894
895	/* Select abort stack */
896	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
897	b	.Lset_sp
898
899.Lsel_tmp_sp:
900	/* Select tmp stack */
901	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
902	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
903
904.Lset_sp:
905	mov	sp, x2
906	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
907
908	/*
909	 * Save state on stack
910	 */
911	sub	sp, sp, #THREAD_ABT_REGS_SIZE
912	mrs	x2, spsr_el1
913	/* Store spsr, sp_el0 */
914	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
915	/* Store original x0, x1 */
916	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
917	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
918	/* Store original x2, x3 and x4 to x29 */
919	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
920	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
921	/* Store x30, elr_el1 */
922	mrs	x0, elr_el1
923	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
924
925	/*
926	 * Call handler
927	 */
928	mov	x0, #0
929	mov	x1, sp
930	bl	abort_handler
931
932	/*
933	 * Restore state from stack
934	 */
935	/* Load x30, elr_el1 */
936	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
937	msr	elr_el1, x0
938	/* Load x0 to x29 */
939	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
940	/* Switch to SP_EL1 */
941	msr	spsel, #1
942	/* Save x0 to x3 in CORE_LOCAL */
943	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
944	/* Restore spsr_el1 and sp_el0 */
945	mrs	x3, sp_el0
946	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
947	msr	spsr_el1, x0
948	msr	sp_el0, x1
949
950	/* Update core local flags */
951	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
952	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
953	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
954
955	/* Restore x0 to x3 */
956	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
957
958	/* Return from exception */
959	eret
960END_FUNC el1_sync_abort
961
962	/* sp_el0 in x3 */
963LOCAL_FUNC el0_sync_abort , :
964	/*
965	 * Update core local flags
966	 */
967	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
968	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
969	orr	w1, w1, #THREAD_CLF_ABORT
970	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
971
972	/*
973	 * Save state on stack
974	 */
975
976	/* load abt_stack_va_end */
977	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
978	/* Keep pointer to initial record in x0 */
979	mov	x0, sp
980	/* Switch to SP_EL0 */
981	msr	spsel, #0
982	mov	sp, x1
983	sub	sp, sp, #THREAD_ABT_REGS_SIZE
984	mrs	x2, spsr_el1
985	/* Store spsr, sp_el0 */
986	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
987	/* Store original x0, x1 */
988	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
989	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
990	/* Store original x2, x3 and x4 to x29 */
991	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
992	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
993	/* Store x30, elr_el1 */
994	mrs	x0, elr_el1
995	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
996
997	/*
998	 * Call handler
999	 */
1000	mov	x0, #0
1001	mov	x1, sp
1002	bl	abort_handler
1003
1004	/*
1005	 * Restore state from stack
1006	 */
1007
1008	/* Load x30, elr_el1 */
1009	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
1010	msr	elr_el1, x0
1011	/* Load x0 to x29 */
1012	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
1013	/* Switch to SP_EL1 */
1014	msr	spsel, #1
1015	/* Save x0 to x3 in EL1_REC */
1016	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
1017	/* Restore spsr_el1 and sp_el0 */
1018	mrs	x3, sp_el0
1019	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
1020	msr	spsr_el1, x0
1021	msr	sp_el0, x1
1022
1023	/* Update core local flags */
1024	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1025	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
1026	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1027
1028	/* Restore x2 to x3 */
1029	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1030
1031	b_if_spsr_is_el0 w0, 1f
1032
1033	/* Restore x0 to x1 */
1034	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1035
1036	/* Return from exception */
1037	eret
10381:	b	eret_to_el0
1039END_FUNC el0_sync_abort
1040
1041/* The handler of foreign interrupt. */
1042.macro foreign_intr_handler mode:req
1043	/*
1044	 * Update core local flags
1045	 */
1046	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1047	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1048	orr	w1, w1, #THREAD_CLF_TMP
1049	.ifc	\mode\(),fiq
1050	orr	w1, w1, #THREAD_CLF_FIQ
1051	.else
1052	orr	w1, w1, #THREAD_CLF_IRQ
1053	.endif
1054	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1055
1056	/* get pointer to current thread context in x0 */
1057	get_thread_ctx sp, 0, 1, 2
1058	/* Keep original SP_EL0 */
1059	mrs	x2, sp_el0
1060
1061	/* Store original sp_el0 */
1062	str	x2, [x0, #THREAD_CTX_REGS_SP]
1063	/* store x4..x30 */
1064	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
1065	/* Load original x0..x3 into x10..x13 */
1066	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
1067	/* Save original x0..x3 */
1068	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
1069
1070	/* load tmp_stack_va_end */
1071	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1072	/* Switch to SP_EL0 */
1073	msr	spsel, #0
1074	mov	sp, x1
1075
1076#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
1077	/*
1078	 * Prevent leaking information about which entries has been used in
1079	 * cache. We're relying on the dispatcher in TF-A to take care of
1080	 * the BTB.
1081	 */
1082	mov	x0, #DCACHE_OP_CLEAN_INV
1083	bl	dcache_op_louis
1084	ic	iallu
1085#endif
1086	/*
1087	 * Mark current thread as suspended
1088	 */
1089	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
1090	mrs	x1, spsr_el1
1091	mrs	x2, elr_el1
1092	bl	thread_state_suspend
1093	mov	w4, w0		/* Supply thread index */
1094
1095	/* Update core local flags */
1096	/* Switch to SP_EL1 */
1097	msr	spsel, #1
1098	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1099	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1100	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1101	msr	spsel, #0
1102
1103	/*
1104	 * Note that we're exiting with SP_EL0 selected since the entry
1105	 * functions expects to have SP_EL0 selected with the tmp stack
1106	 * set.
1107	 */
1108
1109	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
1110	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
1111	mov	w2, #0
1112	mov	w3, #0
1113	/* w4 is already filled in above */
1114	smc	#0
1115	b	.	/* SMC should not return */
1116.endm
1117
1118/*
1119 * This struct is never used from C it's only here to visualize the
1120 * layout.
1121 *
1122 * struct elx_nintr_rec {
1123 * 	uint64_t x[19 - 4]; x4..x18
1124 * 	uint64_t lr;
1125 * 	uint64_t sp_el0;
1126 * };
1127 */
1128#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1129#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1130#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1131#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1132
1133/* The handler of native interrupt. */
1134.macro native_intr_handler mode:req
1135	/*
1136	 * Update core local flags
1137	 */
1138	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1139	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1140	.ifc	\mode\(),fiq
1141	orr	w1, w1, #THREAD_CLF_FIQ
1142	.else
1143	orr	w1, w1, #THREAD_CLF_IRQ
1144	.endif
1145	orr	w1, w1, #THREAD_CLF_TMP
1146	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1147
1148	/* load tmp_stack_va_end */
1149	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1150	/* Keep original SP_EL0 */
1151	mrs	x2, sp_el0
1152	/* Switch to SP_EL0 */
1153	msr	spsel, #0
1154	mov	sp, x1
1155
1156	/*
1157	 * Save registers on stack that can be corrupted by a call to
1158	 * a C function
1159	 */
1160	/* Make room for struct elx_nintr_rec */
1161	sub	sp, sp, #ELX_NINTR_REC_SIZE
1162	/* Store x4..x18 */
1163	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1164	/* Store lr and original sp_el0 */
1165	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1166
1167	bl	thread_check_canaries
1168	adr	x16, thread_nintr_handler_ptr
1169	ldr	x16, [x16]
1170	blr	x16
1171
1172	/*
1173	 * Restore registers
1174	 */
1175	/* Restore x4..x18 */
1176	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1177	/* Load  lr and original sp_el0 */
1178	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1179	/* Restore SP_El0 */
1180	mov	sp, x2
1181	/* Switch back to SP_EL1 */
1182	msr	spsel, #1
1183
1184	/* Update core local flags */
1185	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1186	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1187	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1188
1189	mrs	x0, spsr_el1
1190	/* Restore x2..x3 */
1191	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1192	b_if_spsr_is_el0 w0, 1f
1193
1194	/* Restore x0..x1 */
1195	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1196
1197	/* Return from exception */
1198	eret
11991:	b	eret_to_el0
1200.endm
1201
1202LOCAL_FUNC elx_irq , :
1203#if defined(CFG_ARM_GICV3)
1204	native_intr_handler	irq
1205#else
1206	foreign_intr_handler	irq
1207#endif
1208END_FUNC elx_irq
1209
1210LOCAL_FUNC elx_fiq , :
1211#if defined(CFG_ARM_GICV3)
1212	foreign_intr_handler	fiq
1213#else
1214	native_intr_handler	fiq
1215#endif
1216END_FUNC elx_fiq
1217