xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision e231582fca25178ed521995577f537580ed47a41)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2020, Linaro Limited
4 */
5
6#include <arm64_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread.h>
13#include <kernel/thread_private.h>
14#include <mm/core_mmu.h>
15#include <smccc.h>
16
17	.macro get_thread_ctx core_local, res, tmp0, tmp1
18		ldrh	w\tmp0, [\core_local, \
19				#THREAD_CORE_LOCAL_CURR_THREAD]
20		adr_l	x\res, threads
21		mov	x\tmp1, #THREAD_CTX_SIZE
22		madd	x\res, x\tmp0, x\tmp1, x\res
23	.endm
24
25	.macro return_from_exception
26		eret
27		/* Guard against speculation past ERET */
28		dsb nsh
29		isb
30	.endm
31
32	.macro b_if_spsr_is_el0 reg, label
33		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
34		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
35		b.eq	\label
36	.endm
37
38	.macro disable_pauth reg
39#ifdef	CFG_TA_PAUTH
40		mrs	\reg, sctlr_el1
41		bic     \reg, \reg, #SCTLR_ENIA
42#ifdef CFG_TA_BTI
43		orr     \reg, \reg, #(SCTLR_BT0 | SCTLR_BT1)
44#endif
45		msr	sctlr_el1, \reg
46#endif
47	.endm
48
49	.macro enable_pauth reg
50#ifdef	CFG_TA_PAUTH
51		mrs	\reg, sctlr_el1
52		orr     \reg, \reg, #SCTLR_ENIA
53#ifdef CFG_TA_BTI
54		bic     \reg, \reg, #(SCTLR_BT0 | SCTLR_BT1)
55#endif
56		msr	sctlr_el1, \reg
57#endif
58	.endm
59
60/* void thread_resume(struct thread_ctx_regs *regs) */
61FUNC thread_resume , :
62	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
63	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
64	mov	sp, x1
65	msr	elr_el1, x2
66	msr	spsr_el1, x3
67	ldr	x1, [x0, THREAD_CTX_REGS_TPIDR_EL0]
68	msr	tpidr_el0, x1
69
70	b_if_spsr_is_el0 w3, 1f
71
72	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
73	ldr	x0, [x0, THREAD_CTX_REGS_X0]
74	return_from_exception
75
761:
77#ifdef	CFG_TA_PAUTH
78	/* Restore PAC keys before return to el0 */
79	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
80	write_apiakeyhi	x1
81	write_apiakeylo	x2
82#endif
83
84	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
85	ldr	x0, [x0, THREAD_CTX_REGS_X0]
86
87	msr	spsel, #1
88	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
89	b	eret_to_el0
90END_FUNC thread_resume
91
92FUNC thread_smc , :
93	smc	#0
94	ret
95END_FUNC thread_smc
96
97/* void thread_smccc(struct thread_smc_args *arg_res) */
98FUNC thread_smccc , :
99	push	x0, xzr
100	mov	x8, x0
101	load_xregs x8, 0, 0, 7
102#ifdef CFG_CORE_SEL2_SPMC
103	hvc	#0
104#else
105	smc	#0
106#endif
107	pop	x8, xzr
108	store_xregs x8, 0, 0, 7
109	ret
110END_FUNC thread_smccc
111
112FUNC thread_init_vbar , :
113	msr	vbar_el1, x0
114	ret
115END_FUNC thread_init_vbar
116DECLARE_KEEP_PAGER thread_init_vbar
117
118/*
119 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
120 *				     uint32_t *exit_status0,
121 *				     uint32_t *exit_status1);
122 *
123 * This function depends on being called with exceptions masked.
124 */
125FUNC __thread_enter_user_mode , :
126	/*
127	 * Create the and fill in the struct thread_user_mode_rec
128	 */
129	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
130	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
131	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
132
133	/*
134	 * Save kern sp in x19
135	 * Switch to SP_EL1
136	 */
137	mov	x19, sp
138	msr	spsel, #1
139
140	/*
141	 * Save the kernel stack pointer in the thread context
142	 */
143	/* get pointer to current thread context */
144	get_thread_ctx sp, 21, 20, 22
145	/*
146	 * Save kernel stack pointer to ensure that el0_svc() uses
147	 * correct stack pointer
148	 */
149	str	x19, [x21, #THREAD_CTX_KERN_SP]
150
151	/*
152	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
153	 */
154	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
155	msr	sp_el0, x1
156	msr	elr_el1, x2
157	msr	spsr_el1, x3
158
159#ifdef	CFG_TA_PAUTH
160	/* Load APIAKEY */
161	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
162	write_apiakeyhi	x1
163	write_apiakeylo	x2
164#endif
165
166	/*
167	 * Save the values for x0 and x1 in struct thread_core_local to be
168	 * restored later just before the eret.
169	 */
170	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
171	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
172
173	/* Load the rest of the general purpose registers */
174	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
175
176	/* Jump into user mode */
177	b eret_to_el0
178END_FUNC __thread_enter_user_mode
179DECLARE_KEEP_PAGER __thread_enter_user_mode
180
181/*
182 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
183 * 		uint32_t exit_status1);
184 * See description in thread.h
185 */
186FUNC thread_unwind_user_mode , :
187	/* Store the exit status */
188	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
189	str	w1, [x4]
190	str	w2, [x5]
191	/* Save x19..x30 */
192	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
193	/* Restore x19..x30 */
194	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
195	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
196	/* Return from the call of thread_enter_user_mode() */
197	ret
198END_FUNC thread_unwind_user_mode
199
200	/*
201	 * This macro verifies that the a given vector doesn't exceed the
202	 * architectural limit of 32 instructions. This is meant to be placed
203	 * immedately after the last instruction in the vector. It takes the
204	 * vector entry as the parameter
205	 */
206	.macro check_vector_size since
207	  .if (. - \since) > (32 * 4)
208	    .error "Vector exceeds 32 instructions"
209	  .endif
210	.endm
211
212	.macro restore_mapping
213#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
214		/* Temporarily save x0, x1 */
215		msr	tpidr_el1, x0
216		msr	tpidrro_el0, x1
217
218		/* Update the mapping to use the full kernel mapping */
219		mrs	x0, ttbr0_el1
220		sub	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
221		/* switch to kernel mode ASID */
222		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
223		msr	ttbr0_el1, x0
224		isb
225
226		/* Jump into the full mapping and continue execution */
227		adr	x0, 1f
228		ldr	x1, [sp, #THREAD_CORE_LOCAL_KCODE_OFFSET]
229		add	x0, x0, x1
230		br	x0
231	1:
232BTI(		bti	j)
233		/* Point to the vector into the full mapping */
234		adr_l	x0, thread_user_kcode_offset
235		ldr	x0, [x0]
236		mrs	x1, vbar_el1
237		add	x1, x1, x0
238		msr	vbar_el1, x1
239		isb
240
241#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
242		/*
243		 * Update the SP with thread_user_kdata_sp_offset as
244		 * described in init_user_kcode().
245		 */
246		adr_l	x0, thread_user_kdata_sp_offset
247		ldr	x0, [x0]
248		add	sp, sp, x0
249#endif
250
251		/* Restore x0, x1 */
252		mrs	x0, tpidr_el1
253		mrs	x1, tpidrro_el0
254		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
255#else
256		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
257		mrs	x0, ttbr0_el1
258		/* switch to kernel mode ASID */
259		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
260		msr	ttbr0_el1, x0
261		isb
262#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
263	.endm
264
265#define INV_INSN	0
266FUNC thread_excp_vect , : , default, 2048, nobti
267	/* -----------------------------------------------------
268	 * EL1 with SP0 : 0x0 - 0x180
269	 * -----------------------------------------------------
270	 */
271	.balign	128, INV_INSN
272el1_sync_sp0:
273	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
274	b	el1_sync_abort
275	check_vector_size el1_sync_sp0
276
277	.balign	128, INV_INSN
278el1_irq_sp0:
279	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
280	b	elx_irq
281	check_vector_size el1_irq_sp0
282
283	.balign	128, INV_INSN
284el1_fiq_sp0:
285	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
286	b	elx_fiq
287	check_vector_size el1_fiq_sp0
288
289	.balign	128, INV_INSN
290el1_serror_sp0:
291	b	el1_serror_sp0
292	check_vector_size el1_serror_sp0
293
294	/* -----------------------------------------------------
295	 * Current EL with SP1: 0x200 - 0x380
296	 * -----------------------------------------------------
297	 */
298	.balign	128, INV_INSN
299el1_sync_sp1:
300	b	el1_sync_sp1
301	check_vector_size el1_sync_sp1
302
303	.balign	128, INV_INSN
304el1_irq_sp1:
305	b	el1_irq_sp1
306	check_vector_size el1_irq_sp1
307
308	.balign	128, INV_INSN
309el1_fiq_sp1:
310	b	el1_fiq_sp1
311	check_vector_size el1_fiq_sp1
312
313	.balign	128, INV_INSN
314el1_serror_sp1:
315	b	el1_serror_sp1
316	check_vector_size el1_serror_sp1
317
318	/* -----------------------------------------------------
319	 * Lower EL using AArch64 : 0x400 - 0x580
320	 * -----------------------------------------------------
321	 */
322	.balign	128, INV_INSN
323el0_sync_a64:
324	restore_mapping
325	/* PAuth will be disabled later else check_vector_size will fail */
326
327	b	el0_sync_a64_finish
328	check_vector_size el0_sync_a64
329
330	.balign	128, INV_INSN
331el0_irq_a64:
332	restore_mapping
333	disable_pauth x1
334
335	b	elx_irq
336	check_vector_size el0_irq_a64
337
338	.balign	128, INV_INSN
339el0_fiq_a64:
340	restore_mapping
341	disable_pauth x1
342
343	b	elx_fiq
344	check_vector_size el0_fiq_a64
345
346	.balign	128, INV_INSN
347el0_serror_a64:
348	b   	el0_serror_a64
349	check_vector_size el0_serror_a64
350
351	/* -----------------------------------------------------
352	 * Lower EL using AArch32 : 0x0 - 0x180
353	 * -----------------------------------------------------
354	 */
355	.balign	128, INV_INSN
356el0_sync_a32:
357	restore_mapping
358
359	b 	el0_sync_a32_finish
360	check_vector_size el0_sync_a32
361
362	.balign	128, INV_INSN
363el0_irq_a32:
364	restore_mapping
365
366	b	elx_irq
367	check_vector_size el0_irq_a32
368
369	.balign	128, INV_INSN
370el0_fiq_a32:
371	restore_mapping
372
373	b	elx_fiq
374	check_vector_size el0_fiq_a32
375
376	.balign	128, INV_INSN
377el0_serror_a32:
378	b	el0_serror_a32
379	check_vector_size el0_serror_a32
380
381#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
382	.macro invalidate_branch_predictor
383		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
384		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
385		smc	#0
386		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
387	.endm
388
389	.balign	2048, INV_INSN
390	.global thread_excp_vect_workaround
391thread_excp_vect_workaround:
392	/* -----------------------------------------------------
393	 * EL1 with SP0 : 0x0 - 0x180
394	 * -----------------------------------------------------
395	 */
396	.balign	128, INV_INSN
397workaround_el1_sync_sp0:
398	b	el1_sync_sp0
399	check_vector_size workaround_el1_sync_sp0
400
401	.balign	128, INV_INSN
402workaround_el1_irq_sp0:
403	b	el1_irq_sp0
404	check_vector_size workaround_el1_irq_sp0
405
406	.balign	128, INV_INSN
407workaround_el1_fiq_sp0:
408	b	el1_fiq_sp0
409	check_vector_size workaround_el1_fiq_sp0
410
411	.balign	128, INV_INSN
412workaround_el1_serror_sp0:
413	b	el1_serror_sp0
414	check_vector_size workaround_el1_serror_sp0
415
416	/* -----------------------------------------------------
417	 * Current EL with SP1: 0x200 - 0x380
418	 * -----------------------------------------------------
419	 */
420	.balign	128, INV_INSN
421workaround_el1_sync_sp1:
422	b	workaround_el1_sync_sp1
423	check_vector_size workaround_el1_sync_sp1
424
425	.balign	128, INV_INSN
426workaround_el1_irq_sp1:
427	b	workaround_el1_irq_sp1
428	check_vector_size workaround_el1_irq_sp1
429
430	.balign	128, INV_INSN
431workaround_el1_fiq_sp1:
432	b	workaround_el1_fiq_sp1
433	check_vector_size workaround_el1_fiq_sp1
434
435	.balign	128, INV_INSN
436workaround_el1_serror_sp1:
437	b	workaround_el1_serror_sp1
438	check_vector_size workaround_el1_serror_sp1
439
440	/* -----------------------------------------------------
441	 * Lower EL using AArch64 : 0x400 - 0x580
442	 * -----------------------------------------------------
443	 */
444	.balign	128, INV_INSN
445workaround_el0_sync_a64:
446	invalidate_branch_predictor
447	b	el0_sync_a64
448	check_vector_size workaround_el0_sync_a64
449
450	.balign	128, INV_INSN
451workaround_el0_irq_a64:
452	invalidate_branch_predictor
453	b	el0_irq_a64
454	check_vector_size workaround_el0_irq_a64
455
456	.balign	128, INV_INSN
457workaround_el0_fiq_a64:
458	invalidate_branch_predictor
459	b	el0_fiq_a64
460	check_vector_size workaround_el0_fiq_a64
461
462	.balign	128, INV_INSN
463workaround_el0_serror_a64:
464	b   	workaround_el0_serror_a64
465	check_vector_size workaround_el0_serror_a64
466
467	/* -----------------------------------------------------
468	 * Lower EL using AArch32 : 0x0 - 0x180
469	 * -----------------------------------------------------
470	 */
471	.balign	128, INV_INSN
472workaround_el0_sync_a32:
473	invalidate_branch_predictor
474	b	el0_sync_a32
475	check_vector_size workaround_el0_sync_a32
476
477	.balign	128, INV_INSN
478workaround_el0_irq_a32:
479	invalidate_branch_predictor
480	b	el0_irq_a32
481	check_vector_size workaround_el0_irq_a32
482
483	.balign	128, INV_INSN
484workaround_el0_fiq_a32:
485	invalidate_branch_predictor
486	b	el0_fiq_a32
487	check_vector_size workaround_el0_fiq_a32
488
489	.balign	128, INV_INSN
490workaround_el0_serror_a32:
491	b	workaround_el0_serror_a32
492	check_vector_size workaround_el0_serror_a32
493#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
494
495/*
496 * We're keeping this code in the same section as the vector to make sure
497 * that it's always available.
498 */
499eret_to_el0:
500	enable_pauth x1
501
502#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
503	/* Point to the vector into the reduced mapping */
504	adr_l	x0, thread_user_kcode_offset
505	ldr	x0, [x0]
506	mrs	x1, vbar_el1
507	sub	x1, x1, x0
508	msr	vbar_el1, x1
509	isb
510
511#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
512	/* Store the SP offset in tpidr_el1 to be used below to update SP */
513	adr_l	x1, thread_user_kdata_sp_offset
514	ldr	x1, [x1]
515	msr	tpidr_el1, x1
516#endif
517
518	/* Jump into the reduced mapping and continue execution */
519	adr_l	x1, 1f
520	sub	x1, x1, x0
521	br	x1
5221:
523BTI(	bti	j)
524	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
525	msr	tpidrro_el0, x0
526
527	/* Update the mapping to exclude the full kernel mapping */
528	mrs	x0, ttbr0_el1
529	add	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
530	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
531	msr	ttbr0_el1, x0
532	isb
533
534#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
535	/*
536	 * Update the SP with thread_user_kdata_sp_offset as described in
537	 * init_user_kcode().
538	 */
539	mrs	x0, tpidr_el1
540	sub	sp, sp, x0
541#endif
542
543	mrs	x0, tpidrro_el0
544#else
545	mrs	x0, ttbr0_el1
546	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
547	msr	ttbr0_el1, x0
548	isb
549	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
550#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
551
552	return_from_exception
553
554el0_sync_a64_finish:
555	mrs	x2, esr_el1
556	mrs	x3, sp_el0
557	lsr	x2, x2, #ESR_EC_SHIFT
558	cmp	x2, #ESR_EC_AARCH64_SVC
559	b.eq	el0_svc
560	b	el0_sync_abort
561
562el0_sync_a32_finish:
563	mrs	x2, esr_el1
564	mrs	x3, sp_el0
565	lsr	x2, x2, #ESR_EC_SHIFT
566	cmp	x2, #ESR_EC_AARCH32_SVC
567	b.eq	el0_svc
568	b	el0_sync_abort
569
570	/*
571	 * void icache_inv_user_range(void *addr, size_t size);
572	 *
573	 * This function has to execute with the user space ASID active,
574	 * this means executing with reduced mapping and the code needs
575	 * to be located here together with the vector.
576	 */
577	.global icache_inv_user_range
578	.type icache_inv_user_range , %function
579icache_inv_user_range:
580	/* Mask all exceptions */
581	mrs	x6, daif	/* this register must be preserved */
582	msr	daifset, #DAIFBIT_ALL
583
584#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
585	/* Point to the vector into the reduced mapping */
586	adr_l	x7, thread_user_kcode_offset
587	ldr	x7, [x7]	/* this register must be preserved */
588	mrs	x4, vbar_el1	/* this register must be preserved */
589	sub	x3, x4, x7
590	msr	vbar_el1, x3
591	isb
592
593	/* Jump into the reduced mapping and continue execution */
594	adr	x3, 1f
595	sub	x3, x3, x7
596	br	x3
5971:
598BTI(	bti	j)
599	/* Update the mapping to exclude the full kernel mapping */
600	mrs	x5, ttbr0_el1	/* this register must be preserved */
601	add	x2, x5, #CORE_MMU_BASE_TABLE_OFFSET
602	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
603	msr	ttbr0_el1, x2
604	isb
605
606#else
607	mrs	x5, ttbr0_el1	/* this register must be preserved */
608	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
609	msr	ttbr0_el1, x2
610	isb
611#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
612
613	/*
614	 * Do the actual icache invalidation
615	 */
616
617	/* Calculate minimum icache line size, result in x2 */
618	mrs	x3, ctr_el0
619	and	x3, x3, #CTR_IMINLINE_MASK
620	mov	x2, #CTR_WORD_SIZE
621	lsl	x2, x2, x3
622
623	add	x1, x0, x1
624	sub	x3, x2, #1
625	bic	x0, x0, x3
6261:
627	ic	ivau, x0
628	add	x0, x0, x2
629	cmp	x0, x1
630	b.lo    1b
631	dsb	ish
632
633#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
634	/* Update the mapping to use the full kernel mapping and ASID */
635	msr	ttbr0_el1, x5
636	isb
637
638	/* Jump into the full mapping and continue execution */
639	adr	x0, 1f
640	add	x0, x0, x7
641	br	x0
6421:
643BTI(	bti	j)
644	/* Point to the vector into the full mapping */
645	msr	vbar_el1, x4
646	isb
647#else
648	/* switch to kernel mode ASID */
649	msr	ttbr0_el1, x5
650	isb
651#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
652
653	msr	daif, x6	/* restore exceptions */
654	ret	/* End of icache_inv_user_range() */
655
656	/*
657	 * Make sure that literals are placed before the
658	 * thread_excp_vect_end label.
659	 */
660	.pool
661	.global thread_excp_vect_end
662thread_excp_vect_end:
663END_FUNC thread_excp_vect
664
665LOCAL_FUNC el0_svc , :
666	disable_pauth x1
667	/* get pointer to current thread context in x0 */
668	get_thread_ctx sp, 0, 1, 2
669	mrs	x1, tpidr_el0
670	str	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
671	/* load saved kernel sp */
672	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
673	/* Keep pointer to initial recod in x1 */
674	mov	x1, sp
675	/* Switch to SP_EL0 and restore kernel sp */
676	msr	spsel, #0
677	mov	x2, sp	/* Save SP_EL0 */
678	mov	sp, x0
679
680	/* Make room for struct thread_svc_regs */
681	sub	sp, sp, #THREAD_SVC_REG_SIZE
682	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
683
684	/* Restore x0-x3 */
685	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
686	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
687
688	/* Prepare the argument for the handler */
689	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
690	mrs	x0, elr_el1
691	mrs	x1, spsr_el1
692	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
693
694#ifdef CFG_TA_PAUTH
695	/* Save APIAKEY */
696	read_apiakeyhi	x0
697	read_apiakeylo	x1
698	store_xregs sp, THREAD_SVC_REG_APIAKEY_HI, 0, 1
699#endif
700
701	mov	x0, sp
702
703	/*
704	 * Unmask native interrupts, Serror, and debug exceptions since we have
705	 * nothing left in sp_el1. Note that the SVC handler is excepted to
706	 * re-enable foreign interrupts by itself.
707	 */
708#if defined(CFG_ARM_GICV3)
709	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
710#else
711	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
712#endif
713
714	/* Call the handler */
715	bl	thread_svc_handler
716
717	/* Mask all maskable exceptions since we're switching back to sp_el1 */
718	msr	daifset, #DAIFBIT_ALL
719
720	/*
721	 * Save kernel sp we'll had at the beginning of this function.
722	 * This is when this TA has called another TA because
723	 * __thread_enter_user_mode() also saves the stack pointer in this
724	 * field.
725	 */
726	msr	spsel, #1
727	get_thread_ctx sp, 0, 1, 2
728	msr	spsel, #0
729	add	x1, sp, #THREAD_SVC_REG_SIZE
730	str	x1, [x0, #THREAD_CTX_KERN_SP]
731
732	/* Restore registers to the required state and return*/
733	ldr	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
734	msr	tpidr_el0, x1
735	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
736	msr	elr_el1, x0
737	msr	spsr_el1, x1
738	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
739	mov	x30, sp
740	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
741	mov	sp, x0
742	b_if_spsr_is_el0 w1, 1f
743	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
744	ldr	x30, [x30, #THREAD_SVC_REG_X30]
745
746	return_from_exception
747
7481:
749#ifdef	CFG_TA_PAUTH
750	/* Restore APIAKEY */
751	load_xregs x30, THREAD_SVC_REG_APIAKEY_HI, 0, 1
752	write_apiakeyhi	x0
753	write_apiakeylo	x1
754#endif
755
756	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
757	ldr	x30, [x30, #THREAD_SVC_REG_X30]
758
759	msr	spsel, #1
760	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
761	b	eret_to_el0
762END_FUNC el0_svc
763
764LOCAL_FUNC el1_sync_abort , :
765	mov	x0, sp
766	msr	spsel, #0
767	mov	x3, sp		/* Save original sp */
768
769	/*
770	 * Update core local flags.
771	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
772	 */
773	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
774	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
775	orr	w1, w1, #THREAD_CLF_ABORT
776	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
777			.Lsel_tmp_sp
778
779	/* Select abort stack */
780	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
781	b	.Lset_sp
782
783.Lsel_tmp_sp:
784	/* Select tmp stack */
785	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
786	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
787
788.Lset_sp:
789	mov	sp, x2
790	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
791
792	/*
793	 * Save state on stack
794	 */
795	sub	sp, sp, #THREAD_ABT_REGS_SIZE
796	mrs	x2, spsr_el1
797	/* Store spsr, sp_el0 */
798	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
799	/* Store original x0, x1 */
800	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
801	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
802	/* Store original x2, x3 and x4 to x29 */
803	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
804	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
805	/* Store x30, elr_el1 */
806	mrs	x0, elr_el1
807	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
808
809	/*
810	 * Call handler
811	 */
812	mov	x0, #0
813	mov	x1, sp
814	bl	abort_handler
815
816	/*
817	 * Restore state from stack
818	 */
819	/* Load x30, elr_el1 */
820	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
821	msr	elr_el1, x0
822	/* Load x0 to x29 */
823	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
824	/* Switch to SP_EL1 */
825	msr	spsel, #1
826	/* Save x0 to x3 in CORE_LOCAL */
827	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
828	/* Restore spsr_el1 and sp_el0 */
829	mrs	x3, sp_el0
830	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
831	msr	spsr_el1, x0
832	msr	sp_el0, x1
833
834	/* Update core local flags */
835	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
836	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
837	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
838
839	/* Restore x0 to x3 */
840	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
841
842	/* Return from exception */
843	return_from_exception
844END_FUNC el1_sync_abort
845
846	/* sp_el0 in x3 */
847LOCAL_FUNC el0_sync_abort , :
848	disable_pauth x1
849	/*
850	 * Update core local flags
851	 */
852	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
853	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
854	orr	w1, w1, #THREAD_CLF_ABORT
855	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
856
857	/*
858	 * Save state on stack
859	 */
860
861	/* load abt_stack_va_end */
862	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
863	/* Keep pointer to initial record in x0 */
864	mov	x0, sp
865	/* Switch to SP_EL0 */
866	msr	spsel, #0
867	mov	sp, x1
868	sub	sp, sp, #THREAD_ABT_REGS_SIZE
869	mrs	x2, spsr_el1
870	/* Store spsr, sp_el0 */
871	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
872	/* Store original x0, x1 */
873	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
874	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
875	/* Store original x2, x3 and x4 to x29 */
876	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
877	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
878	/* Store x30, elr_el1 */
879	mrs	x0, elr_el1
880	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
881
882	/*
883	 * Call handler
884	 */
885	mov	x0, #0
886	mov	x1, sp
887	bl	abort_handler
888
889	/*
890	 * Restore state from stack
891	 */
892
893	/* Load x30, elr_el1 */
894	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
895	msr	elr_el1, x0
896	/* Load x0 to x29 */
897	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
898	/* Switch to SP_EL1 */
899	msr	spsel, #1
900	/* Save x0 to x3 in EL1_REC */
901	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
902	/* Restore spsr_el1 and sp_el0 */
903	mrs	x3, sp_el0
904	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
905	msr	spsr_el1, x0
906	msr	sp_el0, x1
907
908	/* Update core local flags */
909	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
910	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
911	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
912
913	/* Restore x2 to x3 */
914	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
915
916	b_if_spsr_is_el0 w0, 1f
917
918	/* Restore x0 to x1 */
919	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
920
921	/* Return from exception */
922	return_from_exception
9231:	b	eret_to_el0
924END_FUNC el0_sync_abort
925
926/* The handler of foreign interrupt. */
927.macro foreign_intr_handler mode:req
928	/*
929	 * Update core local flags
930	 */
931	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
932	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
933	orr	w1, w1, #THREAD_CLF_TMP
934	.ifc	\mode\(),fiq
935	orr	w1, w1, #THREAD_CLF_FIQ
936	.else
937	orr	w1, w1, #THREAD_CLF_IRQ
938	.endif
939	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
940
941	/* get pointer to current thread context in x0 */
942	get_thread_ctx sp, 0, 1, 2
943	/* Keep original SP_EL0 */
944	mrs	x2, sp_el0
945
946	/* Store original sp_el0 */
947	str	x2, [x0, #THREAD_CTX_REGS_SP]
948	/* Store tpidr_el0 */
949	mrs	x2, tpidr_el0
950	str	x2, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
951	/* Store x4..x30 */
952	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
953	/* Load original x0..x3 into x10..x13 */
954	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
955	/* Save original x0..x3 */
956	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
957
958	/* load tmp_stack_va_end */
959	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
960	/* Switch to SP_EL0 */
961	msr	spsel, #0
962	mov	sp, x1
963
964#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
965	/*
966	 * Prevent leaking information about which entries has been used in
967	 * cache. We're relying on the dispatcher in TF-A to take care of
968	 * the BTB.
969	 */
970	mov	x0, #DCACHE_OP_CLEAN_INV
971	bl	dcache_op_louis
972	ic	iallu
973#endif
974	/*
975	 * Mark current thread as suspended
976	 */
977	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
978	mrs	x1, spsr_el1
979	mrs	x2, elr_el1
980	bl	thread_state_suspend
981
982	/* Update core local flags */
983	/* Switch to SP_EL1 */
984	msr	spsel, #1
985	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
986	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
987	orr	w1, w1, #THREAD_CLF_TMP
988	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
989	msr	spsel, #0
990
991	/*
992	 * Note that we're exiting with SP_EL0 selected since the entry
993	 * functions expects to have SP_EL0 selected with the tmp stack
994	 * set.
995	 */
996
997	/* Passing thread index in w0 */
998	b	thread_foreign_intr_exit
999.endm
1000
1001/*
1002 * This struct is never used from C it's only here to visualize the
1003 * layout.
1004 *
1005 * struct elx_nintr_rec {
1006 * 	uint64_t x[19 - 4]; x4..x18
1007 * 	uint64_t lr;
1008 * 	uint64_t sp_el0;
1009 * };
1010 */
1011#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1012#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1013#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1014#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1015
1016/* The handler of native interrupt. */
1017.macro native_intr_handler mode:req
1018	/*
1019	 * Update core local flags
1020	 */
1021	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1022	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1023	.ifc	\mode\(),fiq
1024	orr	w1, w1, #THREAD_CLF_FIQ
1025	.else
1026	orr	w1, w1, #THREAD_CLF_IRQ
1027	.endif
1028	orr	w1, w1, #THREAD_CLF_TMP
1029	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1030
1031	/* load tmp_stack_va_end */
1032	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1033	/* Keep original SP_EL0 */
1034	mrs	x2, sp_el0
1035	/* Switch to SP_EL0 */
1036	msr	spsel, #0
1037	mov	sp, x1
1038
1039	/*
1040	 * Save registers on stack that can be corrupted by a call to
1041	 * a C function
1042	 */
1043	/* Make room for struct elx_nintr_rec */
1044	sub	sp, sp, #ELX_NINTR_REC_SIZE
1045	/* Store x4..x18 */
1046	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1047	/* Store lr and original sp_el0 */
1048	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1049
1050	bl	thread_check_canaries
1051	bl	itr_core_handler
1052
1053	/*
1054	 * Restore registers
1055	 */
1056	/* Restore x4..x18 */
1057	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1058	/* Load  lr and original sp_el0 */
1059	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1060	/* Restore SP_El0 */
1061	mov	sp, x2
1062	/* Switch back to SP_EL1 */
1063	msr	spsel, #1
1064
1065	/* Update core local flags */
1066	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1067	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1068	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1069
1070	mrs	x0, spsr_el1
1071	/* Restore x2..x3 */
1072	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1073	b_if_spsr_is_el0 w0, 1f
1074
1075	/* Restore x0..x1 */
1076	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1077
1078	/* Return from exception */
1079	return_from_exception
10801:	b	eret_to_el0
1081.endm
1082
1083LOCAL_FUNC elx_irq , :
1084#if defined(CFG_ARM_GICV3)
1085	native_intr_handler	irq
1086#else
1087	foreign_intr_handler	irq
1088#endif
1089END_FUNC elx_irq
1090
1091LOCAL_FUNC elx_fiq , :
1092#if defined(CFG_ARM_GICV3)
1093	foreign_intr_handler	fiq
1094#else
1095	native_intr_handler	fiq
1096#endif
1097END_FUNC elx_fiq
1098
1099BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
1100