xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision e13d1040f35ce8388807c9381fe04584d131eb3d)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/thread_defs.h>
12#include <mm/core_mmu.h>
13#include <smccc.h>
14#include <sm/optee_smc.h>
15#include <sm/teesmc_opteed.h>
16#include <sm/teesmc_opteed_macros.h>
17
18#include "thread_private.h"
19
20	.macro get_thread_ctx core_local, res, tmp0, tmp1
21		ldr	w\tmp0, [\core_local, \
22				#THREAD_CORE_LOCAL_CURR_THREAD]
23		adr	x\res, threads
24		mov	x\tmp1, #THREAD_CTX_SIZE
25		madd	x\res, x\tmp0, x\tmp1, x\res
26	.endm
27
28	.macro b_if_spsr_is_el0 reg, label
29		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
30		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
31		b.eq	\label
32	.endm
33
34LOCAL_FUNC vector_std_smc_entry , :
35	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
36	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
37	mov	x0, sp
38	bl	thread_handle_std_smc
39	/*
40	 * Normally thread_handle_std_smc() should return via
41	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
42	 * hasn't switched stack (error detected) it will do a normal "C"
43	 * return.
44	 */
45	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
46	add	sp, sp, #THREAD_SMC_ARGS_SIZE
47	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
48	smc	#0
49	b	.	/* SMC should not return */
50END_FUNC vector_std_smc_entry
51
52LOCAL_FUNC vector_fast_smc_entry , :
53	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
54	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
55	mov	x0, sp
56	bl	thread_handle_fast_smc
57	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
58	add	sp, sp, #THREAD_SMC_ARGS_SIZE
59	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
60	smc	#0
61	b	.	/* SMC should not return */
62END_FUNC vector_fast_smc_entry
63
64LOCAL_FUNC vector_fiq_entry , :
65	/* Secure Monitor received a FIQ and passed control to us. */
66	bl	thread_check_canaries
67	adr	x16, thread_nintr_handler_ptr
68	ldr	x16, [x16]
69	blr	x16
70	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
71	smc	#0
72	b	.	/* SMC should not return */
73END_FUNC vector_fiq_entry
74
75LOCAL_FUNC vector_cpu_on_entry , :
76	adr	x16, thread_cpu_on_handler_ptr
77	ldr	x16, [x16]
78	blr	x16
79	mov	x1, x0
80	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
81	smc	#0
82	b	.	/* SMC should not return */
83END_FUNC vector_cpu_on_entry
84
85LOCAL_FUNC vector_cpu_off_entry , :
86	adr	x16, thread_cpu_off_handler_ptr
87	ldr	x16, [x16]
88	blr	x16
89	mov	x1, x0
90	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
91	smc	#0
92	b	.	/* SMC should not return */
93END_FUNC vector_cpu_off_entry
94
95LOCAL_FUNC vector_cpu_suspend_entry , :
96	adr	x16, thread_cpu_suspend_handler_ptr
97	ldr	x16, [x16]
98	blr	x16
99	mov	x1, x0
100	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
101	smc	#0
102	b	.	/* SMC should not return */
103END_FUNC vector_cpu_suspend_entry
104
105LOCAL_FUNC vector_cpu_resume_entry , :
106	adr	x16, thread_cpu_resume_handler_ptr
107	ldr	x16, [x16]
108	blr	x16
109	mov	x1, x0
110	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
111	smc	#0
112	b	.	/* SMC should not return */
113END_FUNC vector_cpu_resume_entry
114
115LOCAL_FUNC vector_system_off_entry , :
116	adr	x16, thread_system_off_handler_ptr
117	ldr	x16, [x16]
118	blr	x16
119	mov	x1, x0
120	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
121	smc	#0
122	b	.	/* SMC should not return */
123END_FUNC vector_system_off_entry
124
125LOCAL_FUNC vector_system_reset_entry , :
126	adr	x16, thread_system_reset_handler_ptr
127	ldr	x16, [x16]
128	blr	x16
129	mov	x1, x0
130	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
131	smc	#0
132	b	.	/* SMC should not return */
133END_FUNC vector_system_reset_entry
134
135/*
136 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
137 * initialization.
138 *
139 * Note that ARM-TF depends on the layout of this vector table, any change
140 * in layout has to be synced with ARM-TF.
141 */
142FUNC thread_vector_table , :
143	b	vector_std_smc_entry
144	b	vector_fast_smc_entry
145	b	vector_cpu_on_entry
146	b	vector_cpu_off_entry
147	b	vector_cpu_resume_entry
148	b	vector_cpu_suspend_entry
149	b	vector_fiq_entry
150	b	vector_system_off_entry
151	b	vector_system_reset_entry
152END_FUNC thread_vector_table
153KEEP_PAGER thread_vector_table
154
155
156/* void thread_resume(struct thread_ctx_regs *regs) */
157FUNC thread_resume , :
158	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
159	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
160	mov	sp, x1
161	msr	elr_el1, x2
162	msr	spsr_el1, x3
163
164	b_if_spsr_is_el0 w3, 1f
165
166	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
167	ldr	x0, [x0, THREAD_CTX_REGS_X0]
168	eret
169
1701:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
171	ldr	x0, [x0, THREAD_CTX_REGS_X0]
172
173	msr	spsel, #1
174	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
175	b	eret_to_el0
176END_FUNC thread_resume
177
178FUNC thread_std_smc_entry , :
179	/* pass x0-x7 in a struct thread_smc_args */
180	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
181	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
182	mov	x0, sp
183
184	/* Call the registered handler */
185	bl	__thread_std_smc_entry
186
187	/*
188	 * Load the returned x0-x3 into preserved registers and skip the
189	 * "returned" x4-x7 since they will not be returned to normal
190	 * world.
191	 */
192	load_xregs sp, THREAD_SMC_ARGS_X0, 20, 23
193	add	sp, sp, #THREAD_SMC_ARGS_SIZE
194
195	/* Mask all maskable exceptions before switching to temporary stack */
196	msr	daifset, #DAIFBIT_ALL
197	bl	thread_get_tmp_sp
198	mov	sp, x0
199
200	bl	thread_state_free
201
202	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
203	mov	x1, x20
204	mov	x2, x21
205	mov	x3, x22
206	mov	x4, x23
207	smc	#0
208	b	.	/* SMC should not return */
209END_FUNC thread_std_smc_entry
210
211/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
212FUNC thread_rpc , :
213	/* Read daif and create an SPSR */
214	mrs	x1, daif
215	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
216
217	/* Mask all maskable exceptions before switching to temporary stack */
218	msr	daifset, #DAIFBIT_ALL
219	push	x0, xzr
220	push	x1, x30
221	bl	thread_get_ctx_regs
222	ldr	x30, [sp, #8]
223	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
224	mov	x19, x0
225
226	bl	thread_get_tmp_sp
227	pop	x1, xzr		/* Match "push x1, x30" above */
228	mov	x2, sp
229	str	x2, [x19, #THREAD_CTX_REGS_SP]
230	ldr	x20, [sp]	/* Get pointer to rv[] */
231	mov	sp, x0		/* Switch to tmp stack */
232
233	adr	x2, .thread_rpc_return
234	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
235	bl	thread_state_suspend
236	mov	x4, x0		/* Supply thread index */
237	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
238	load_wregs x20, 0, 1, 3	/* Load rv[] into w0-w2 */
239	smc	#0
240	b	.		/* SMC should not return */
241
242.thread_rpc_return:
243	/*
244	 * At this point has the stack pointer been restored to the value
245	 * stored in THREAD_CTX above.
246	 *
247	 * Jumps here from thread_resume above when RPC has returned. The
248	 * IRQ and FIQ bits are restored to what they where when this
249	 * function was originally entered.
250	 */
251	pop	x16, xzr	/* Get pointer to rv[] */
252	store_wregs x16, 0, 0, 5	/* Store w0-w5 into rv[] */
253	ret
254END_FUNC thread_rpc
255KEEP_PAGER thread_rpc
256
257FUNC thread_smc , :
258	smc	#0
259	ret
260END_FUNC thread_smc
261
262FUNC thread_init_vbar , :
263	msr	vbar_el1, x0
264	ret
265END_FUNC thread_init_vbar
266KEEP_PAGER thread_init_vbar
267
268/*
269 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
270 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
271 *               unsigned long user_func, unsigned long spsr,
272 *               uint32_t *exit_status0, uint32_t *exit_status1)
273 *
274 */
275FUNC __thread_enter_user_mode , :
276	ldr	x8, [sp]
277	/*
278	 * Create the and fill in the struct thread_user_mode_rec
279	 */
280	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
281	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
282	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
283
284	/*
285	 * Switch to SP_EL1
286	 * Disable exceptions
287	 * Save kern sp in x19
288	 */
289	msr	daifset, #DAIFBIT_ALL
290	mov	x19, sp
291	msr	spsel, #1
292
293	/*
294	 * Save the kernel stack pointer in the thread context
295	 */
296	/* get pointer to current thread context */
297	get_thread_ctx sp, 21, 20, 22
298	/*
299	 * Save kernel stack pointer to ensure that el0_svc() uses
300	 * correct stack pointer
301	 */
302	str	x19, [x21, #THREAD_CTX_KERN_SP]
303
304	/*
305	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
306	 */
307	msr	spsr_el1, x6
308	/* Set user sp */
309	mov	x13, x4		/* Used when running TA in Aarch32 */
310	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
311	/* Set user function */
312	msr	elr_el1, x5
313	/* Set frame pointer (user stack can't be unwound past this point) */
314	mov x29, #0
315
316	/* Jump into user mode */
317	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
318	b eret_to_el0
319END_FUNC __thread_enter_user_mode
320
321/*
322 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
323 * 		uint32_t exit_status1);
324 * See description in thread.h
325 */
326FUNC thread_unwind_user_mode , :
327	/* Store the exit status */
328	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
329	str	w1, [x3]
330	str	w2, [x4]
331	/* Restore x19..x30 */
332	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
333	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
334	/* Return from the call of thread_enter_user_mode() */
335	ret
336END_FUNC thread_unwind_user_mode
337
338	/*
339	 * This macro verifies that the a given vector doesn't exceed the
340	 * architectural limit of 32 instructions. This is meant to be placed
341	 * immedately after the last instruction in the vector. It takes the
342	 * vector entry as the parameter
343	 */
344	.macro check_vector_size since
345	  .if (. - \since) > (32 * 4)
346	    .error "Vector exceeds 32 instructions"
347	  .endif
348	.endm
349
350	.macro restore_mapping
351#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
352		/* Temporarily save x0, x1 */
353		msr	tpidr_el1, x0
354		msr	tpidrro_el0, x1
355
356		/* Update the mapping to use the full kernel mapping */
357		mrs	x0, ttbr0_el1
358		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
359		/* switch to kernel mode ASID */
360		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
361		msr	ttbr0_el1, x0
362		isb
363
364		/* Jump into the full mapping and continue execution */
365		ldr	x0, =1f
366		br	x0
367	1:
368
369		/* Point to the vector into the full mapping */
370		adr	x0, thread_user_kcode_offset
371		ldr	x0, [x0]
372		mrs	x1, vbar_el1
373		add	x1, x1, x0
374		msr	vbar_el1, x1
375		isb
376
377#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
378		/*
379		 * Update the SP with thread_user_kdata_sp_offset as
380		 * described in init_user_kcode().
381		 */
382		adr	x0, thread_user_kdata_sp_offset
383		ldr	x0, [x0]
384		add	sp, sp, x0
385#endif
386
387		/* Restore x0, x1 */
388		mrs	x0, tpidr_el1
389		mrs	x1, tpidrro_el0
390		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
391#else
392		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
393		mrs	x0, ttbr0_el1
394		/* switch to kernel mode ASID */
395		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
396		msr	ttbr0_el1, x0
397		isb
398#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
399	.endm
400
401#define INV_INSN	0
402	.section .text.thread_excp_vect
403	.align	11, INV_INSN
404FUNC thread_excp_vect , :
405	/* -----------------------------------------------------
406	 * EL1 with SP0 : 0x0 - 0x180
407	 * -----------------------------------------------------
408	 */
409	.align	7, INV_INSN
410el1_sync_sp0:
411	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
412	b	el1_sync_abort
413	check_vector_size el1_sync_sp0
414
415	.align	7, INV_INSN
416el1_irq_sp0:
417	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
418	b	elx_irq
419	check_vector_size el1_irq_sp0
420
421	.align	7, INV_INSN
422el1_fiq_sp0:
423	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
424	b	elx_fiq
425	check_vector_size el1_fiq_sp0
426
427	.align	7, INV_INSN
428el1_serror_sp0:
429	b	el1_serror_sp0
430	check_vector_size el1_serror_sp0
431
432	/* -----------------------------------------------------
433	 * Current EL with SP1: 0x200 - 0x380
434	 * -----------------------------------------------------
435	 */
436	.align	7, INV_INSN
437el1_sync_sp1:
438	b	el1_sync_sp1
439	check_vector_size el1_sync_sp1
440
441	.align	7, INV_INSN
442el1_irq_sp1:
443	b	el1_irq_sp1
444	check_vector_size el1_irq_sp1
445
446	.align	7, INV_INSN
447el1_fiq_sp1:
448	b	el1_fiq_sp1
449	check_vector_size el1_fiq_sp1
450
451	.align	7, INV_INSN
452el1_serror_sp1:
453	b	el1_serror_sp1
454	check_vector_size el1_serror_sp1
455
456	/* -----------------------------------------------------
457	 * Lower EL using AArch64 : 0x400 - 0x580
458	 * -----------------------------------------------------
459	 */
460	.align	7, INV_INSN
461el0_sync_a64:
462	restore_mapping
463
464	mrs	x2, esr_el1
465	mrs	x3, sp_el0
466	lsr	x2, x2, #ESR_EC_SHIFT
467	cmp	x2, #ESR_EC_AARCH64_SVC
468	b.eq	el0_svc
469	b	el0_sync_abort
470	check_vector_size el0_sync_a64
471
472	.align	7, INV_INSN
473el0_irq_a64:
474	restore_mapping
475
476	b	elx_irq
477	check_vector_size el0_irq_a64
478
479	.align	7, INV_INSN
480el0_fiq_a64:
481	restore_mapping
482
483	b	elx_fiq
484	check_vector_size el0_fiq_a64
485
486	.align	7, INV_INSN
487el0_serror_a64:
488	b   	el0_serror_a64
489	check_vector_size el0_serror_a64
490
491	/* -----------------------------------------------------
492	 * Lower EL using AArch32 : 0x0 - 0x180
493	 * -----------------------------------------------------
494	 */
495	.align	7, INV_INSN
496el0_sync_a32:
497	restore_mapping
498
499	mrs	x2, esr_el1
500	mrs	x3, sp_el0
501	lsr	x2, x2, #ESR_EC_SHIFT
502	cmp	x2, #ESR_EC_AARCH32_SVC
503	b.eq	el0_svc
504	b	el0_sync_abort
505	check_vector_size el0_sync_a32
506
507	.align	7, INV_INSN
508el0_irq_a32:
509	restore_mapping
510
511	b	elx_irq
512	check_vector_size el0_irq_a32
513
514	.align	7, INV_INSN
515el0_fiq_a32:
516	restore_mapping
517
518	b	elx_fiq
519	check_vector_size el0_fiq_a32
520
521	.align	7, INV_INSN
522el0_serror_a32:
523	b	el0_serror_a32
524	check_vector_size el0_serror_a32
525
526#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
527	.macro invalidate_branch_predictor
528		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
529		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
530		smc	#0
531		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
532	.endm
533
534	.align	11, INV_INSN
535	.global thread_excp_vect_workaround
536thread_excp_vect_workaround:
537	/* -----------------------------------------------------
538	 * EL1 with SP0 : 0x0 - 0x180
539	 * -----------------------------------------------------
540	 */
541	.align	7, INV_INSN
542workaround_el1_sync_sp0:
543	b	el1_sync_sp0
544	check_vector_size workaround_el1_sync_sp0
545
546	.align	7, INV_INSN
547workaround_el1_irq_sp0:
548	b	el1_irq_sp0
549	check_vector_size workaround_el1_irq_sp0
550
551	.align	7, INV_INSN
552workaround_el1_fiq_sp0:
553	b	el1_fiq_sp0
554	check_vector_size workaround_el1_fiq_sp0
555
556	.align	7, INV_INSN
557workaround_el1_serror_sp0:
558	b	el1_serror_sp0
559	check_vector_size workaround_el1_serror_sp0
560
561	/* -----------------------------------------------------
562	 * Current EL with SP1: 0x200 - 0x380
563	 * -----------------------------------------------------
564	 */
565	.align	7, INV_INSN
566workaround_el1_sync_sp1:
567	b	workaround_el1_sync_sp1
568	check_vector_size workaround_el1_sync_sp1
569
570	.align	7, INV_INSN
571workaround_el1_irq_sp1:
572	b	workaround_el1_irq_sp1
573	check_vector_size workaround_el1_irq_sp1
574
575	.align	7, INV_INSN
576workaround_el1_fiq_sp1:
577	b	workaround_el1_fiq_sp1
578	check_vector_size workaround_el1_fiq_sp1
579
580	.align	7, INV_INSN
581workaround_el1_serror_sp1:
582	b	workaround_el1_serror_sp1
583	check_vector_size workaround_el1_serror_sp1
584
585	/* -----------------------------------------------------
586	 * Lower EL using AArch64 : 0x400 - 0x580
587	 * -----------------------------------------------------
588	 */
589	.align	7, INV_INSN
590workaround_el0_sync_a64:
591	invalidate_branch_predictor
592	b	el0_sync_a64
593	check_vector_size workaround_el0_sync_a64
594
595	.align	7, INV_INSN
596workaround_el0_irq_a64:
597	invalidate_branch_predictor
598	b	el0_irq_a64
599	check_vector_size workaround_el0_irq_a64
600
601	.align	7, INV_INSN
602workaround_el0_fiq_a64:
603	invalidate_branch_predictor
604	b	el0_fiq_a64
605	check_vector_size workaround_el0_fiq_a64
606
607	.align	7, INV_INSN
608workaround_el0_serror_a64:
609	b   	workaround_el0_serror_a64
610	check_vector_size workaround_el0_serror_a64
611
612	/* -----------------------------------------------------
613	 * Lower EL using AArch32 : 0x0 - 0x180
614	 * -----------------------------------------------------
615	 */
616	.align	7, INV_INSN
617workaround_el0_sync_a32:
618	invalidate_branch_predictor
619	b	el0_sync_a32
620	check_vector_size workaround_el0_sync_a32
621
622	.align	7, INV_INSN
623workaround_el0_irq_a32:
624	invalidate_branch_predictor
625	b	el0_irq_a32
626	check_vector_size workaround_el0_irq_a32
627
628	.align	7, INV_INSN
629workaround_el0_fiq_a32:
630	invalidate_branch_predictor
631	b	el0_fiq_a32
632	check_vector_size workaround_el0_fiq_a32
633
634	.align	7, INV_INSN
635workaround_el0_serror_a32:
636	b	workaround_el0_serror_a32
637	check_vector_size workaround_el0_serror_a32
638#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
639
640/*
641 * We're keeping this code in the same section as the vector to make sure
642 * that it's always available.
643 */
644eret_to_el0:
645
646#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
647	/* Point to the vector into the reduced mapping */
648	adr	x0, thread_user_kcode_offset
649	ldr	x0, [x0]
650	mrs	x1, vbar_el1
651	sub	x1, x1, x0
652	msr	vbar_el1, x1
653	isb
654
655#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
656	/* Store the SP offset in tpidr_el1 to be used below to update SP */
657	adr	x1, thread_user_kdata_sp_offset
658	ldr	x1, [x1]
659	msr	tpidr_el1, x1
660#endif
661
662	/* Jump into the reduced mapping and continue execution */
663	ldr	x1, =1f
664	sub	x1, x1, x0
665	br	x1
6661:
667
668	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
669	msr	tpidrro_el0, x0
670
671	/* Update the mapping to exclude the full kernel mapping */
672	mrs	x0, ttbr0_el1
673	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
674	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
675	msr	ttbr0_el1, x0
676	isb
677
678#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
679	/*
680	 * Update the SP with thread_user_kdata_sp_offset as described in
681	 * init_user_kcode().
682	 */
683	mrs	x0, tpidr_el1
684	sub	sp, sp, x0
685#endif
686
687	mrs	x0, tpidrro_el0
688#else
689	mrs	x0, ttbr0_el1
690	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
691	msr	ttbr0_el1, x0
692	isb
693	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
694#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
695
696	eret
697
698END_FUNC thread_excp_vect
699
700LOCAL_FUNC el0_svc , :
701	/* get pointer to current thread context in x0 */
702	get_thread_ctx sp, 0, 1, 2
703	/* load saved kernel sp */
704	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
705	/* Keep pointer to initial recod in x1 */
706	mov	x1, sp
707	/* Switch to SP_EL0 and restore kernel sp */
708	msr	spsel, #0
709	mov	x2, sp	/* Save SP_EL0 */
710	mov	sp, x0
711
712	/* Make room for struct thread_svc_regs */
713	sub	sp, sp, #THREAD_SVC_REG_SIZE
714	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
715
716	/* Restore x0-x3 */
717	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
718	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
719
720	/* Prepare the argument for the handler */
721	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
722	mrs	x0, elr_el1
723	mrs	x1, spsr_el1
724	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
725	mov	x0, sp
726
727	/*
728	 * Unmask native interrupts, Serror, and debug exceptions since we have
729	 * nothing left in sp_el1. Note that the SVC handler is excepted to
730	 * re-enable foreign interrupts by itself.
731	 */
732#if defined(CFG_ARM_GICV3)
733	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
734#else
735	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
736#endif
737
738	/* Call the handler */
739	bl	tee_svc_handler
740
741	/* Mask all maskable exceptions since we're switching back to sp_el1 */
742	msr	daifset, #DAIFBIT_ALL
743
744	/*
745	 * Save kernel sp we'll had at the beginning of this function.
746	 * This is when this TA has called another TA because
747	 * __thread_enter_user_mode() also saves the stack pointer in this
748	 * field.
749	 */
750	msr	spsel, #1
751	get_thread_ctx sp, 0, 1, 2
752	msr	spsel, #0
753	add	x1, sp, #THREAD_SVC_REG_SIZE
754	str	x1, [x0, #THREAD_CTX_KERN_SP]
755
756	/* Restore registers to the required state and return*/
757	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
758	msr	elr_el1, x0
759	msr	spsr_el1, x1
760	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
761	mov	x30, sp
762	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
763	mov	sp, x0
764	b_if_spsr_is_el0 w1, 1f
765	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
766	ldr	x30, [x30, #THREAD_SVC_REG_X30]
767
768	eret
769
7701:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
771	ldr	x30, [x30, #THREAD_SVC_REG_X30]
772
773	msr	spsel, #1
774	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
775	b	eret_to_el0
776END_FUNC el0_svc
777
778LOCAL_FUNC el1_sync_abort , :
779	mov	x0, sp
780	msr	spsel, #0
781	mov	x3, sp		/* Save original sp */
782
783	/*
784	 * Update core local flags.
785	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
786	 */
787	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
788	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
789	orr	w1, w1, #THREAD_CLF_ABORT
790	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
791			.Lsel_tmp_sp
792
793	/* Select abort stack */
794	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
795	b	.Lset_sp
796
797.Lsel_tmp_sp:
798	/* Select tmp stack */
799	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
800	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
801
802.Lset_sp:
803	mov	sp, x2
804	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
805
806	/*
807	 * Save state on stack
808	 */
809	sub	sp, sp, #THREAD_ABT_REGS_SIZE
810	mrs	x2, spsr_el1
811	/* Store spsr, sp_el0 */
812	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
813	/* Store original x0, x1 */
814	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
815	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
816	/* Store original x2, x3 and x4 to x29 */
817	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
818	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
819	/* Store x30, elr_el1 */
820	mrs	x0, elr_el1
821	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
822
823	/*
824	 * Call handler
825	 */
826	mov	x0, #0
827	mov	x1, sp
828	bl	abort_handler
829
830	/*
831	 * Restore state from stack
832	 */
833	/* Load x30, elr_el1 */
834	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
835	msr	elr_el1, x0
836	/* Load x0 to x29 */
837	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
838	/* Switch to SP_EL1 */
839	msr	spsel, #1
840	/* Save x0 to x3 in CORE_LOCAL */
841	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
842	/* Restore spsr_el1 and sp_el0 */
843	mrs	x3, sp_el0
844	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
845	msr	spsr_el1, x0
846	msr	sp_el0, x1
847
848	/* Update core local flags */
849	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
850	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
851	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
852
853	/* Restore x0 to x3 */
854	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
855
856	/* Return from exception */
857	eret
858END_FUNC el1_sync_abort
859
860	/* sp_el0 in x3 */
861LOCAL_FUNC el0_sync_abort , :
862	/*
863	 * Update core local flags
864	 */
865	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
866	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
867	orr	w1, w1, #THREAD_CLF_ABORT
868	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
869
870	/*
871	 * Save state on stack
872	 */
873
874	/* load abt_stack_va_end */
875	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
876	/* Keep pointer to initial record in x0 */
877	mov	x0, sp
878	/* Switch to SP_EL0 */
879	msr	spsel, #0
880	mov	sp, x1
881	sub	sp, sp, #THREAD_ABT_REGS_SIZE
882	mrs	x2, spsr_el1
883	/* Store spsr, sp_el0 */
884	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
885	/* Store original x0, x1 */
886	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
887	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
888	/* Store original x2, x3 and x4 to x29 */
889	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
890	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
891	/* Store x30, elr_el1 */
892	mrs	x0, elr_el1
893	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
894
895	/*
896	 * Call handler
897	 */
898	mov	x0, #0
899	mov	x1, sp
900	bl	abort_handler
901
902	/*
903	 * Restore state from stack
904	 */
905
906	/* Load x30, elr_el1 */
907	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
908	msr	elr_el1, x0
909	/* Load x0 to x29 */
910	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
911	/* Switch to SP_EL1 */
912	msr	spsel, #1
913	/* Save x0 to x3 in EL1_REC */
914	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
915	/* Restore spsr_el1 and sp_el0 */
916	mrs	x3, sp_el0
917	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
918	msr	spsr_el1, x0
919	msr	sp_el0, x1
920
921	/* Update core local flags */
922	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
923	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
924	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
925
926	/* Restore x2 to x3 */
927	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
928
929	b_if_spsr_is_el0 w0, 1f
930
931	/* Restore x0 to x1 */
932	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
933
934	/* Return from exception */
935	eret
9361:	b	eret_to_el0
937END_FUNC el0_sync_abort
938
939/* The handler of foreign interrupt. */
940.macro foreign_intr_handler mode:req
941	/*
942	 * Update core local flags
943	 */
944	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
945	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
946	orr	w1, w1, #THREAD_CLF_TMP
947	.ifc	\mode\(),fiq
948	orr	w1, w1, #THREAD_CLF_FIQ
949	.else
950	orr	w1, w1, #THREAD_CLF_IRQ
951	.endif
952	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
953
954	/* get pointer to current thread context in x0 */
955	get_thread_ctx sp, 0, 1, 2
956	/* Keep original SP_EL0 */
957	mrs	x2, sp_el0
958
959	/* Store original sp_el0 */
960	str	x2, [x0, #THREAD_CTX_REGS_SP]
961	/* store x4..x30 */
962	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
963	/* Load original x0..x3 into x10..x13 */
964	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
965	/* Save original x0..x3 */
966	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
967
968	/* load tmp_stack_va_end */
969	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
970	/* Switch to SP_EL0 */
971	msr	spsel, #0
972	mov	sp, x1
973
974	/*
975	 * Mark current thread as suspended
976	 */
977	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
978	mrs	x1, spsr_el1
979	mrs	x2, elr_el1
980	bl	thread_state_suspend
981	mov	w4, w0		/* Supply thread index */
982
983	/* Update core local flags */
984	/* Switch to SP_EL1 */
985	msr	spsel, #1
986	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
987	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
988	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
989	msr	spsel, #0
990
991	/*
992	 * Note that we're exiting with SP_EL0 selected since the entry
993	 * functions expects to have SP_EL0 selected with the tmp stack
994	 * set.
995	 */
996
997	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
998	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
999	mov	w2, #0
1000	mov	w3, #0
1001	/* w4 is already filled in above */
1002	smc	#0
1003	b	.	/* SMC should not return */
1004.endm
1005
1006/*
1007 * This struct is never used from C it's only here to visualize the
1008 * layout.
1009 *
1010 * struct elx_nintr_rec {
1011 * 	uint64_t x[19 - 4]; x4..x18
1012 * 	uint64_t lr;
1013 * 	uint64_t sp_el0;
1014 * };
1015 */
1016#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1017#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1018#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1019#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1020
1021/* The handler of native interrupt. */
1022.macro native_intr_handler mode:req
1023	/*
1024	 * Update core local flags
1025	 */
1026	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1027	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1028	.ifc	\mode\(),fiq
1029	orr	w1, w1, #THREAD_CLF_FIQ
1030	.else
1031	orr	w1, w1, #THREAD_CLF_IRQ
1032	.endif
1033	orr	w1, w1, #THREAD_CLF_TMP
1034	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1035
1036	/* load tmp_stack_va_end */
1037	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1038	/* Keep original SP_EL0 */
1039	mrs	x2, sp_el0
1040	/* Switch to SP_EL0 */
1041	msr	spsel, #0
1042	mov	sp, x1
1043
1044	/*
1045	 * Save registers on stack that can be corrupted by a call to
1046	 * a C function
1047	 */
1048	/* Make room for struct elx_nintr_rec */
1049	sub	sp, sp, #ELX_NINTR_REC_SIZE
1050	/* Store x4..x18 */
1051	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1052	/* Store lr and original sp_el0 */
1053	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1054
1055	bl	thread_check_canaries
1056	adr	x16, thread_nintr_handler_ptr
1057	ldr	x16, [x16]
1058	blr	x16
1059
1060	/*
1061	 * Restore registers
1062	 */
1063	/* Restore x4..x18 */
1064	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1065	/* Load  lr and original sp_el0 */
1066	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1067	/* Restore SP_El0 */
1068	mov	sp, x2
1069	/* Switch back to SP_EL1 */
1070	msr	spsel, #1
1071
1072	/* Update core local flags */
1073	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1074	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1075	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1076
1077	mrs	x0, spsr_el1
1078	/* Restore x2..x3 */
1079	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1080	b_if_spsr_is_el0 w0, 1f
1081
1082	/* Restore x0..x1 */
1083	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1084
1085	/* Return from exception */
1086	eret
10871:	b	eret_to_el0
1088.endm
1089
1090LOCAL_FUNC elx_irq , :
1091#if defined(CFG_ARM_GICV3)
1092	native_intr_handler	irq
1093#else
1094	foreign_intr_handler	irq
1095#endif
1096END_FUNC elx_irq
1097
1098LOCAL_FUNC elx_fiq , :
1099#if defined(CFG_ARM_GICV3)
1100	foreign_intr_handler	fiq
1101#else
1102	native_intr_handler	fiq
1103#endif
1104END_FUNC elx_fiq
1105