xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision c6c416f1bf4617feef23d592155ba7de69bceea9)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2020, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread_defs.h>
13#include <mm/core_mmu.h>
14#include <smccc.h>
15
16#include "thread_private.h"
17
18	.macro get_thread_ctx core_local, res, tmp0, tmp1
19		ldrh	w\tmp0, [\core_local, \
20				#THREAD_CORE_LOCAL_CURR_THREAD]
21		ldr	x\res, =threads
22		mov	x\tmp1, #THREAD_CTX_SIZE
23		madd	x\res, x\tmp0, x\tmp1, x\res
24	.endm
25
26	.macro return_from_exception
27		eret
28		/* Guard against speculation past ERET */
29		dsb nsh
30		isb
31	.endm
32
33	.macro b_if_spsr_is_el0 reg, label
34		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
35		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
36		b.eq	\label
37	.endm
38
39	.macro disable_pauth reg
40#ifdef	CFG_TA_PAUTH
41		mrs	\reg, sctlr_el1
42		bic     \reg, \reg, #SCTLR_ENIA
43#ifdef CFG_TA_BTI
44		orr     \reg, \reg, #(SCTLR_BT0 | SCTLR_BT1)
45#endif
46		msr	sctlr_el1, \reg
47#endif
48	.endm
49
50	.macro enable_pauth reg
51#ifdef	CFG_TA_PAUTH
52		mrs	\reg, sctlr_el1
53		orr     \reg, \reg, #SCTLR_ENIA
54#ifdef CFG_TA_BTI
55		bic     \reg, \reg, #(SCTLR_BT0 | SCTLR_BT1)
56#endif
57		msr	sctlr_el1, \reg
58#endif
59	.endm
60
61/* void thread_resume(struct thread_ctx_regs *regs) */
62FUNC thread_resume , :
63	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
64	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
65	mov	sp, x1
66	msr	elr_el1, x2
67	msr	spsr_el1, x3
68	ldr	x1, [x0, THREAD_CTX_REGS_TPIDR_EL0]
69	msr	tpidr_el0, x1
70
71	b_if_spsr_is_el0 w3, 1f
72
73	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
74	ldr	x0, [x0, THREAD_CTX_REGS_X0]
75	return_from_exception
76
771:
78#ifdef	CFG_TA_PAUTH
79	/* Restore PAC keys before return to el0 */
80	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
81	write_apiakeyhi	x1
82	write_apiakeylo	x2
83#endif
84
85	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
86	ldr	x0, [x0, THREAD_CTX_REGS_X0]
87
88	msr	spsel, #1
89	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
90	b	eret_to_el0
91END_FUNC thread_resume
92
93FUNC thread_smc , :
94	smc	#0
95	ret
96END_FUNC thread_smc
97
98/* void thread_smccc(struct thread_smc_args *arg_res) */
99FUNC thread_smccc , :
100	push	x0, xzr
101	mov	x8, x0
102	load_xregs x8, 0, 0, 7
103#ifdef CFG_CORE_SEL2_SPMC
104	hvc	#0
105#else
106	smc	#0
107#endif
108	pop	x8, xzr
109	store_xregs x8, 0, 0, 7
110	ret
111END_FUNC thread_smccc
112
113FUNC thread_init_vbar , :
114	msr	vbar_el1, x0
115	ret
116END_FUNC thread_init_vbar
117DECLARE_KEEP_PAGER thread_init_vbar
118
119/*
120 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
121 *				     uint32_t *exit_status0,
122 *				     uint32_t *exit_status1);
123 *
124 * This function depends on being called with exceptions masked.
125 */
126FUNC __thread_enter_user_mode , :
127	/*
128	 * Create the and fill in the struct thread_user_mode_rec
129	 */
130	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
131	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
132	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
133
134	/*
135	 * Save kern sp in x19
136	 * Switch to SP_EL1
137	 */
138	mov	x19, sp
139	msr	spsel, #1
140
141	/*
142	 * Save the kernel stack pointer in the thread context
143	 */
144	/* get pointer to current thread context */
145	get_thread_ctx sp, 21, 20, 22
146	/*
147	 * Save kernel stack pointer to ensure that el0_svc() uses
148	 * correct stack pointer
149	 */
150	str	x19, [x21, #THREAD_CTX_KERN_SP]
151
152	/*
153	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
154	 */
155	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
156	msr	sp_el0, x1
157	msr	elr_el1, x2
158	msr	spsr_el1, x3
159
160#ifdef	CFG_TA_PAUTH
161	/* Load APIAKEY */
162	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
163	write_apiakeyhi	x1
164	write_apiakeylo	x2
165#endif
166
167	/*
168	 * Save the values for x0 and x1 in struct thread_core_local to be
169	 * restored later just before the eret.
170	 */
171	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
172	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
173
174	/* Load the rest of the general purpose registers */
175	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
176
177	/* Jump into user mode */
178	b eret_to_el0
179END_FUNC __thread_enter_user_mode
180DECLARE_KEEP_PAGER __thread_enter_user_mode
181
182/*
183 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
184 * 		uint32_t exit_status1);
185 * See description in thread.h
186 */
187FUNC thread_unwind_user_mode , :
188	/* Store the exit status */
189	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
190	str	w1, [x4]
191	str	w2, [x5]
192	/* Save x19..x30 */
193	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
194	/* Restore x19..x30 */
195	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
196	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
197	/* Return from the call of thread_enter_user_mode() */
198	ret
199END_FUNC thread_unwind_user_mode
200
201	/*
202	 * This macro verifies that the a given vector doesn't exceed the
203	 * architectural limit of 32 instructions. This is meant to be placed
204	 * immedately after the last instruction in the vector. It takes the
205	 * vector entry as the parameter
206	 */
207	.macro check_vector_size since
208	  .if (. - \since) > (32 * 4)
209	    .error "Vector exceeds 32 instructions"
210	  .endif
211	.endm
212
213	.macro restore_mapping
214#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
215		/* Temporarily save x0, x1 */
216		msr	tpidr_el1, x0
217		msr	tpidrro_el0, x1
218
219		/* Update the mapping to use the full kernel mapping */
220		mrs	x0, ttbr0_el1
221		sub	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
222		/* switch to kernel mode ASID */
223		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
224		msr	ttbr0_el1, x0
225		isb
226
227		/* Jump into the full mapping and continue execution */
228		ldr	x0, =1f
229		br	x0
230	1:
231BTI(		bti	j)
232		/* Point to the vector into the full mapping */
233		adr_l	x0, thread_user_kcode_offset
234		ldr	x0, [x0]
235		mrs	x1, vbar_el1
236		add	x1, x1, x0
237		msr	vbar_el1, x1
238		isb
239
240#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
241		/*
242		 * Update the SP with thread_user_kdata_sp_offset as
243		 * described in init_user_kcode().
244		 */
245		adr_l	x0, thread_user_kdata_sp_offset
246		ldr	x0, [x0]
247		add	sp, sp, x0
248#endif
249
250		/* Restore x0, x1 */
251		mrs	x0, tpidr_el1
252		mrs	x1, tpidrro_el0
253		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
254#else
255		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
256		mrs	x0, ttbr0_el1
257		/* switch to kernel mode ASID */
258		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
259		msr	ttbr0_el1, x0
260		isb
261#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
262	.endm
263
264#define INV_INSN	0
265FUNC thread_excp_vect , : , default, 2048, nobti
266	/* -----------------------------------------------------
267	 * EL1 with SP0 : 0x0 - 0x180
268	 * -----------------------------------------------------
269	 */
270	.balign	128, INV_INSN
271el1_sync_sp0:
272	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
273	b	el1_sync_abort
274	check_vector_size el1_sync_sp0
275
276	.balign	128, INV_INSN
277el1_irq_sp0:
278	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
279	b	elx_irq
280	check_vector_size el1_irq_sp0
281
282	.balign	128, INV_INSN
283el1_fiq_sp0:
284	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
285	b	elx_fiq
286	check_vector_size el1_fiq_sp0
287
288	.balign	128, INV_INSN
289el1_serror_sp0:
290	b	el1_serror_sp0
291	check_vector_size el1_serror_sp0
292
293	/* -----------------------------------------------------
294	 * Current EL with SP1: 0x200 - 0x380
295	 * -----------------------------------------------------
296	 */
297	.balign	128, INV_INSN
298el1_sync_sp1:
299	b	el1_sync_sp1
300	check_vector_size el1_sync_sp1
301
302	.balign	128, INV_INSN
303el1_irq_sp1:
304	b	el1_irq_sp1
305	check_vector_size el1_irq_sp1
306
307	.balign	128, INV_INSN
308el1_fiq_sp1:
309	b	el1_fiq_sp1
310	check_vector_size el1_fiq_sp1
311
312	.balign	128, INV_INSN
313el1_serror_sp1:
314	b	el1_serror_sp1
315	check_vector_size el1_serror_sp1
316
317	/* -----------------------------------------------------
318	 * Lower EL using AArch64 : 0x400 - 0x580
319	 * -----------------------------------------------------
320	 */
321	.balign	128, INV_INSN
322el0_sync_a64:
323	restore_mapping
324	/* PAuth will be disabled later else check_vector_size will fail */
325
326	mrs	x2, esr_el1
327	mrs	x3, sp_el0
328	lsr	x2, x2, #ESR_EC_SHIFT
329	cmp	x2, #ESR_EC_AARCH64_SVC
330	b.eq	el0_svc
331	b	el0_sync_abort
332	check_vector_size el0_sync_a64
333
334	.balign	128, INV_INSN
335el0_irq_a64:
336	restore_mapping
337	disable_pauth x1
338
339	b	elx_irq
340	check_vector_size el0_irq_a64
341
342	.balign	128, INV_INSN
343el0_fiq_a64:
344	restore_mapping
345	disable_pauth x1
346
347	b	elx_fiq
348	check_vector_size el0_fiq_a64
349
350	.balign	128, INV_INSN
351el0_serror_a64:
352	b   	el0_serror_a64
353	check_vector_size el0_serror_a64
354
355	/* -----------------------------------------------------
356	 * Lower EL using AArch32 : 0x0 - 0x180
357	 * -----------------------------------------------------
358	 */
359	.balign	128, INV_INSN
360el0_sync_a32:
361	restore_mapping
362
363	mrs	x2, esr_el1
364	mrs	x3, sp_el0
365	lsr	x2, x2, #ESR_EC_SHIFT
366	cmp	x2, #ESR_EC_AARCH32_SVC
367	b.eq	el0_svc
368	b	el0_sync_abort
369	check_vector_size el0_sync_a32
370
371	.balign	128, INV_INSN
372el0_irq_a32:
373	restore_mapping
374
375	b	elx_irq
376	check_vector_size el0_irq_a32
377
378	.balign	128, INV_INSN
379el0_fiq_a32:
380	restore_mapping
381
382	b	elx_fiq
383	check_vector_size el0_fiq_a32
384
385	.balign	128, INV_INSN
386el0_serror_a32:
387	b	el0_serror_a32
388	check_vector_size el0_serror_a32
389
390#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
391	.macro invalidate_branch_predictor
392		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
393		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
394		smc	#0
395		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
396	.endm
397
398	.balign	2048, INV_INSN
399	.global thread_excp_vect_workaround
400thread_excp_vect_workaround:
401	/* -----------------------------------------------------
402	 * EL1 with SP0 : 0x0 - 0x180
403	 * -----------------------------------------------------
404	 */
405	.balign	128, INV_INSN
406workaround_el1_sync_sp0:
407	b	el1_sync_sp0
408	check_vector_size workaround_el1_sync_sp0
409
410	.balign	128, INV_INSN
411workaround_el1_irq_sp0:
412	b	el1_irq_sp0
413	check_vector_size workaround_el1_irq_sp0
414
415	.balign	128, INV_INSN
416workaround_el1_fiq_sp0:
417	b	el1_fiq_sp0
418	check_vector_size workaround_el1_fiq_sp0
419
420	.balign	128, INV_INSN
421workaround_el1_serror_sp0:
422	b	el1_serror_sp0
423	check_vector_size workaround_el1_serror_sp0
424
425	/* -----------------------------------------------------
426	 * Current EL with SP1: 0x200 - 0x380
427	 * -----------------------------------------------------
428	 */
429	.balign	128, INV_INSN
430workaround_el1_sync_sp1:
431	b	workaround_el1_sync_sp1
432	check_vector_size workaround_el1_sync_sp1
433
434	.balign	128, INV_INSN
435workaround_el1_irq_sp1:
436	b	workaround_el1_irq_sp1
437	check_vector_size workaround_el1_irq_sp1
438
439	.balign	128, INV_INSN
440workaround_el1_fiq_sp1:
441	b	workaround_el1_fiq_sp1
442	check_vector_size workaround_el1_fiq_sp1
443
444	.balign	128, INV_INSN
445workaround_el1_serror_sp1:
446	b	workaround_el1_serror_sp1
447	check_vector_size workaround_el1_serror_sp1
448
449	/* -----------------------------------------------------
450	 * Lower EL using AArch64 : 0x400 - 0x580
451	 * -----------------------------------------------------
452	 */
453	.balign	128, INV_INSN
454workaround_el0_sync_a64:
455	invalidate_branch_predictor
456	b	el0_sync_a64
457	check_vector_size workaround_el0_sync_a64
458
459	.balign	128, INV_INSN
460workaround_el0_irq_a64:
461	invalidate_branch_predictor
462	b	el0_irq_a64
463	check_vector_size workaround_el0_irq_a64
464
465	.balign	128, INV_INSN
466workaround_el0_fiq_a64:
467	invalidate_branch_predictor
468	b	el0_fiq_a64
469	check_vector_size workaround_el0_fiq_a64
470
471	.balign	128, INV_INSN
472workaround_el0_serror_a64:
473	b   	workaround_el0_serror_a64
474	check_vector_size workaround_el0_serror_a64
475
476	/* -----------------------------------------------------
477	 * Lower EL using AArch32 : 0x0 - 0x180
478	 * -----------------------------------------------------
479	 */
480	.balign	128, INV_INSN
481workaround_el0_sync_a32:
482	invalidate_branch_predictor
483	b	el0_sync_a32
484	check_vector_size workaround_el0_sync_a32
485
486	.balign	128, INV_INSN
487workaround_el0_irq_a32:
488	invalidate_branch_predictor
489	b	el0_irq_a32
490	check_vector_size workaround_el0_irq_a32
491
492	.balign	128, INV_INSN
493workaround_el0_fiq_a32:
494	invalidate_branch_predictor
495	b	el0_fiq_a32
496	check_vector_size workaround_el0_fiq_a32
497
498	.balign	128, INV_INSN
499workaround_el0_serror_a32:
500	b	workaround_el0_serror_a32
501	check_vector_size workaround_el0_serror_a32
502#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
503
504/*
505 * We're keeping this code in the same section as the vector to make sure
506 * that it's always available.
507 */
508eret_to_el0:
509	enable_pauth x1
510
511#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
512	/* Point to the vector into the reduced mapping */
513	adr_l	x0, thread_user_kcode_offset
514	ldr	x0, [x0]
515	mrs	x1, vbar_el1
516	sub	x1, x1, x0
517	msr	vbar_el1, x1
518	isb
519
520#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
521	/* Store the SP offset in tpidr_el1 to be used below to update SP */
522	adr_l	x1, thread_user_kdata_sp_offset
523	ldr	x1, [x1]
524	msr	tpidr_el1, x1
525#endif
526
527	/* Jump into the reduced mapping and continue execution */
528	ldr	x1, =1f
529	sub	x1, x1, x0
530	br	x1
5311:
532BTI(	bti	j)
533	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
534	msr	tpidrro_el0, x0
535
536	/* Update the mapping to exclude the full kernel mapping */
537	mrs	x0, ttbr0_el1
538	add	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
539	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
540	msr	ttbr0_el1, x0
541	isb
542
543#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
544	/*
545	 * Update the SP with thread_user_kdata_sp_offset as described in
546	 * init_user_kcode().
547	 */
548	mrs	x0, tpidr_el1
549	sub	sp, sp, x0
550#endif
551
552	mrs	x0, tpidrro_el0
553#else
554	mrs	x0, ttbr0_el1
555	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
556	msr	ttbr0_el1, x0
557	isb
558	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
559#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
560
561	return_from_exception
562
563	/*
564	 * void icache_inv_user_range(void *addr, size_t size);
565	 *
566	 * This function has to execute with the user space ASID active,
567	 * this means executing with reduced mapping and the code needs
568	 * to be located here together with the vector.
569	 */
570	.global icache_inv_user_range
571	.type icache_inv_user_range , %function
572icache_inv_user_range:
573	/* Mask all exceptions */
574	mrs	x6, daif	/* this register must be preserved */
575	msr	daifset, #DAIFBIT_ALL
576
577#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
578	/* Point to the vector into the reduced mapping */
579	adr_l	x2, thread_user_kcode_offset
580	ldr	x2, [x2]
581	mrs	x4, vbar_el1	/* this register must be preserved */
582	sub	x3, x4, x2
583	msr	vbar_el1, x3
584	isb
585
586	/* Jump into the reduced mapping and continue execution */
587	ldr	x3, =1f
588	sub	x3, x3, x2
589	br	x3
5901:
591BTI(	bti	j)
592	/* Update the mapping to exclude the full kernel mapping */
593	mrs	x5, ttbr0_el1	/* this register must be preserved */
594	add	x2, x5, #CORE_MMU_BASE_TABLE_OFFSET
595	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
596	msr	ttbr0_el1, x2
597	isb
598
599#else
600	mrs	x5, ttbr0_el1	/* this register must be preserved */
601	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
602	msr	ttbr0_el1, x2
603	isb
604#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
605
606	/*
607	 * Do the actual icache invalidation
608	 */
609
610	/* Calculate minimum icache line size, result in x2 */
611	mrs	x3, ctr_el0
612	and	x3, x3, #CTR_IMINLINE_MASK
613	mov	x2, #CTR_WORD_SIZE
614	lsl	x2, x2, x3
615
616	add	x1, x0, x1
617	sub	x3, x2, #1
618	bic	x0, x0, x3
6191:
620	ic	ivau, x0
621	add	x0, x0, x2
622	cmp	x0, x1
623	b.lo    1b
624	dsb	ish
625
626#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
627	/* Update the mapping to use the full kernel mapping and ASID */
628	msr	ttbr0_el1, x5
629	isb
630
631	/* Jump into the full mapping and continue execution */
632	ldr	x0, =1f
633	br	x0
6341:
635BTI(	bti	j)
636	/* Point to the vector into the full mapping */
637	msr	vbar_el1, x4
638	isb
639#else
640	/* switch to kernel mode ASID */
641	msr	ttbr0_el1, x5
642	isb
643#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
644
645	msr	daif, x6	/* restore exceptions */
646	ret	/* End of icache_inv_user_range() */
647
648	/*
649	 * Make sure that literals are placed before the
650	 * thread_excp_vect_end label.
651	 */
652	.pool
653	.global thread_excp_vect_end
654thread_excp_vect_end:
655END_FUNC thread_excp_vect
656
657LOCAL_FUNC el0_svc , :
658	disable_pauth x1
659	/* get pointer to current thread context in x0 */
660	get_thread_ctx sp, 0, 1, 2
661	mrs	x1, tpidr_el0
662	str	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
663	/* load saved kernel sp */
664	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
665	/* Keep pointer to initial recod in x1 */
666	mov	x1, sp
667	/* Switch to SP_EL0 and restore kernel sp */
668	msr	spsel, #0
669	mov	x2, sp	/* Save SP_EL0 */
670	mov	sp, x0
671
672	/* Make room for struct thread_svc_regs */
673	sub	sp, sp, #THREAD_SVC_REG_SIZE
674	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
675
676	/* Restore x0-x3 */
677	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
678	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
679
680	/* Prepare the argument for the handler */
681	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
682	mrs	x0, elr_el1
683	mrs	x1, spsr_el1
684	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
685
686#ifdef CFG_TA_PAUTH
687	/* Save APIAKEY */
688	read_apiakeyhi	x0
689	read_apiakeylo	x1
690	store_xregs sp, THREAD_SVC_REG_APIAKEY_HI, 0, 1
691#endif
692
693	mov	x0, sp
694
695	/*
696	 * Unmask native interrupts, Serror, and debug exceptions since we have
697	 * nothing left in sp_el1. Note that the SVC handler is excepted to
698	 * re-enable foreign interrupts by itself.
699	 */
700#if defined(CFG_ARM_GICV3)
701	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
702#else
703	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
704#endif
705
706	/* Call the handler */
707	bl	thread_svc_handler
708
709	/* Mask all maskable exceptions since we're switching back to sp_el1 */
710	msr	daifset, #DAIFBIT_ALL
711
712	/*
713	 * Save kernel sp we'll had at the beginning of this function.
714	 * This is when this TA has called another TA because
715	 * __thread_enter_user_mode() also saves the stack pointer in this
716	 * field.
717	 */
718	msr	spsel, #1
719	get_thread_ctx sp, 0, 1, 2
720	msr	spsel, #0
721	add	x1, sp, #THREAD_SVC_REG_SIZE
722	str	x1, [x0, #THREAD_CTX_KERN_SP]
723
724	/* Restore registers to the required state and return*/
725	ldr	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
726	msr	tpidr_el0, x1
727	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
728	msr	elr_el1, x0
729	msr	spsr_el1, x1
730	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
731	mov	x30, sp
732	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
733	mov	sp, x0
734	b_if_spsr_is_el0 w1, 1f
735	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
736	ldr	x30, [x30, #THREAD_SVC_REG_X30]
737
738	return_from_exception
739
7401:
741#ifdef	CFG_TA_PAUTH
742	/* Restore APIAKEY */
743	load_xregs x30, THREAD_SVC_REG_APIAKEY_HI, 0, 1
744	write_apiakeyhi	x0
745	write_apiakeylo	x1
746#endif
747
748	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
749	ldr	x30, [x30, #THREAD_SVC_REG_X30]
750
751	msr	spsel, #1
752	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
753	b	eret_to_el0
754END_FUNC el0_svc
755
756LOCAL_FUNC el1_sync_abort , :
757	mov	x0, sp
758	msr	spsel, #0
759	mov	x3, sp		/* Save original sp */
760
761	/*
762	 * Update core local flags.
763	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
764	 */
765	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
766	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
767	orr	w1, w1, #THREAD_CLF_ABORT
768	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
769			.Lsel_tmp_sp
770
771	/* Select abort stack */
772	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
773	b	.Lset_sp
774
775.Lsel_tmp_sp:
776	/* Select tmp stack */
777	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
778	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
779
780.Lset_sp:
781	mov	sp, x2
782	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
783
784	/*
785	 * Save state on stack
786	 */
787	sub	sp, sp, #THREAD_ABT_REGS_SIZE
788	mrs	x2, spsr_el1
789	/* Store spsr, sp_el0 */
790	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
791	/* Store original x0, x1 */
792	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
793	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
794	/* Store original x2, x3 and x4 to x29 */
795	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
796	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
797	/* Store x30, elr_el1 */
798	mrs	x0, elr_el1
799	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
800
801	/*
802	 * Call handler
803	 */
804	mov	x0, #0
805	mov	x1, sp
806	bl	abort_handler
807
808	/*
809	 * Restore state from stack
810	 */
811	/* Load x30, elr_el1 */
812	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
813	msr	elr_el1, x0
814	/* Load x0 to x29 */
815	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
816	/* Switch to SP_EL1 */
817	msr	spsel, #1
818	/* Save x0 to x3 in CORE_LOCAL */
819	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
820	/* Restore spsr_el1 and sp_el0 */
821	mrs	x3, sp_el0
822	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
823	msr	spsr_el1, x0
824	msr	sp_el0, x1
825
826	/* Update core local flags */
827	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
828	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
829	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
830
831	/* Restore x0 to x3 */
832	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
833
834	/* Return from exception */
835	return_from_exception
836END_FUNC el1_sync_abort
837
838	/* sp_el0 in x3 */
839LOCAL_FUNC el0_sync_abort , :
840	disable_pauth x1
841	/*
842	 * Update core local flags
843	 */
844	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
845	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
846	orr	w1, w1, #THREAD_CLF_ABORT
847	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
848
849	/*
850	 * Save state on stack
851	 */
852
853	/* load abt_stack_va_end */
854	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
855	/* Keep pointer to initial record in x0 */
856	mov	x0, sp
857	/* Switch to SP_EL0 */
858	msr	spsel, #0
859	mov	sp, x1
860	sub	sp, sp, #THREAD_ABT_REGS_SIZE
861	mrs	x2, spsr_el1
862	/* Store spsr, sp_el0 */
863	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
864	/* Store original x0, x1 */
865	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
866	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
867	/* Store original x2, x3 and x4 to x29 */
868	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
869	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
870	/* Store x30, elr_el1 */
871	mrs	x0, elr_el1
872	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
873
874	/*
875	 * Call handler
876	 */
877	mov	x0, #0
878	mov	x1, sp
879	bl	abort_handler
880
881	/*
882	 * Restore state from stack
883	 */
884
885	/* Load x30, elr_el1 */
886	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
887	msr	elr_el1, x0
888	/* Load x0 to x29 */
889	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
890	/* Switch to SP_EL1 */
891	msr	spsel, #1
892	/* Save x0 to x3 in EL1_REC */
893	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
894	/* Restore spsr_el1 and sp_el0 */
895	mrs	x3, sp_el0
896	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
897	msr	spsr_el1, x0
898	msr	sp_el0, x1
899
900	/* Update core local flags */
901	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
902	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
903	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
904
905	/* Restore x2 to x3 */
906	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
907
908	b_if_spsr_is_el0 w0, 1f
909
910	/* Restore x0 to x1 */
911	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
912
913	/* Return from exception */
914	return_from_exception
9151:	b	eret_to_el0
916END_FUNC el0_sync_abort
917
918/* The handler of foreign interrupt. */
919.macro foreign_intr_handler mode:req
920	/*
921	 * Update core local flags
922	 */
923	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
924	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
925	orr	w1, w1, #THREAD_CLF_TMP
926	.ifc	\mode\(),fiq
927	orr	w1, w1, #THREAD_CLF_FIQ
928	.else
929	orr	w1, w1, #THREAD_CLF_IRQ
930	.endif
931	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
932
933	/* get pointer to current thread context in x0 */
934	get_thread_ctx sp, 0, 1, 2
935	/* Keep original SP_EL0 */
936	mrs	x2, sp_el0
937
938	/* Store original sp_el0 */
939	str	x2, [x0, #THREAD_CTX_REGS_SP]
940	/* Store tpidr_el0 */
941	mrs	x2, tpidr_el0
942	str	x2, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
943	/* Store x4..x30 */
944	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
945	/* Load original x0..x3 into x10..x13 */
946	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
947	/* Save original x0..x3 */
948	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
949
950	/* load tmp_stack_va_end */
951	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
952	/* Switch to SP_EL0 */
953	msr	spsel, #0
954	mov	sp, x1
955
956#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
957	/*
958	 * Prevent leaking information about which entries has been used in
959	 * cache. We're relying on the dispatcher in TF-A to take care of
960	 * the BTB.
961	 */
962	mov	x0, #DCACHE_OP_CLEAN_INV
963	bl	dcache_op_louis
964	ic	iallu
965#endif
966	/*
967	 * Mark current thread as suspended
968	 */
969	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
970	mrs	x1, spsr_el1
971	mrs	x2, elr_el1
972	bl	thread_state_suspend
973
974	/* Update core local flags */
975	/* Switch to SP_EL1 */
976	msr	spsel, #1
977	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
978	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
979	orr	w1, w1, #THREAD_CLF_TMP
980	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
981	msr	spsel, #0
982
983	/*
984	 * Note that we're exiting with SP_EL0 selected since the entry
985	 * functions expects to have SP_EL0 selected with the tmp stack
986	 * set.
987	 */
988
989	/* Passing thread index in w0 */
990	b	thread_foreign_intr_exit
991.endm
992
993/*
994 * This struct is never used from C it's only here to visualize the
995 * layout.
996 *
997 * struct elx_nintr_rec {
998 * 	uint64_t x[19 - 4]; x4..x18
999 * 	uint64_t lr;
1000 * 	uint64_t sp_el0;
1001 * };
1002 */
1003#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1004#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1005#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1006#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1007
1008/* The handler of native interrupt. */
1009.macro native_intr_handler mode:req
1010	/*
1011	 * Update core local flags
1012	 */
1013	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1014	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1015	.ifc	\mode\(),fiq
1016	orr	w1, w1, #THREAD_CLF_FIQ
1017	.else
1018	orr	w1, w1, #THREAD_CLF_IRQ
1019	.endif
1020	orr	w1, w1, #THREAD_CLF_TMP
1021	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1022
1023	/* load tmp_stack_va_end */
1024	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1025	/* Keep original SP_EL0 */
1026	mrs	x2, sp_el0
1027	/* Switch to SP_EL0 */
1028	msr	spsel, #0
1029	mov	sp, x1
1030
1031	/*
1032	 * Save registers on stack that can be corrupted by a call to
1033	 * a C function
1034	 */
1035	/* Make room for struct elx_nintr_rec */
1036	sub	sp, sp, #ELX_NINTR_REC_SIZE
1037	/* Store x4..x18 */
1038	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1039	/* Store lr and original sp_el0 */
1040	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1041
1042	bl	thread_check_canaries
1043	bl	itr_core_handler
1044
1045	/*
1046	 * Restore registers
1047	 */
1048	/* Restore x4..x18 */
1049	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1050	/* Load  lr and original sp_el0 */
1051	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1052	/* Restore SP_El0 */
1053	mov	sp, x2
1054	/* Switch back to SP_EL1 */
1055	msr	spsel, #1
1056
1057	/* Update core local flags */
1058	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1059	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1060	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1061
1062	mrs	x0, spsr_el1
1063	/* Restore x2..x3 */
1064	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1065	b_if_spsr_is_el0 w0, 1f
1066
1067	/* Restore x0..x1 */
1068	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1069
1070	/* Return from exception */
1071	return_from_exception
10721:	b	eret_to_el0
1073.endm
1074
1075LOCAL_FUNC elx_irq , :
1076#if defined(CFG_ARM_GICV3)
1077	native_intr_handler	irq
1078#else
1079	foreign_intr_handler	irq
1080#endif
1081END_FUNC elx_irq
1082
1083LOCAL_FUNC elx_fiq , :
1084#if defined(CFG_ARM_GICV3)
1085	foreign_intr_handler	fiq
1086#else
1087	native_intr_handler	fiq
1088#endif
1089END_FUNC elx_fiq
1090
1091BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
1092