xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 91d4649de98c6beeb8217d40f1fafa50720fe785)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2022, Linaro Limited
4 */
5
6#include <arm64_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread.h>
13#include <kernel/thread_private.h>
14#include <mm/core_mmu.h>
15#include <smccc.h>
16
17	.macro get_thread_ctx core_local, res, tmp0, tmp1
18		ldrh	w\tmp0, [\core_local, \
19				#THREAD_CORE_LOCAL_CURR_THREAD]
20		adr_l	x\res, threads
21		ldr     x\res, [x\res]
22		mov	x\tmp1, #THREAD_CTX_SIZE
23		madd	x\res, x\tmp0, x\tmp1, x\res
24	.endm
25
26	.macro return_from_exception
27		eret
28		/* Guard against speculation past ERET */
29		dsb nsh
30		isb
31	.endm
32
33	.macro b_if_spsr_is_el0 reg, label
34		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
35		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
36		b.eq	\label
37	.endm
38
39	.macro pauth_el0_to_el1 reg
40		/*
41		 * If pauth is only enabled in one of core or TA (xor) we
42		 * need to update sctlr.
43		 */
44#if (defined(CFG_TA_PAUTH) && !defined(CFG_CORE_PAUTH)) || \
45    (!defined(CFG_TA_PAUTH) && defined(CFG_CORE_PAUTH))
46		mrs	\reg, sctlr_el1
47		/* Flip the SCTLR_ENIA bit */
48		eor     \reg, \reg, #SCTLR_ENIA
49		msr	sctlr_el1, \reg
50#endif
51	.endm
52
53	.macro pauth_el1_to_el0 reg
54		/*
55		 * If pauth is only enabled in one of core or TA (xor) we
56		 * need to update sctlr.
57		 */
58#if (defined(CFG_TA_PAUTH) && !defined(CFG_CORE_PAUTH)) || \
59    (!defined(CFG_TA_PAUTH) && defined(CFG_CORE_PAUTH))
60		mrs	\reg, sctlr_el1
61		/* Flip the SCTLR_ENIA bit */
62		eor     \reg, \reg, #SCTLR_ENIA
63		msr	sctlr_el1, \reg
64#endif
65	.endm
66
67/* void thread_resume(struct thread_ctx_regs *regs) */
68FUNC thread_resume , :
69	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
70	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
71	mov	sp, x1
72	msr	elr_el1, x2
73	msr	spsr_el1, x3
74	ldr	x1, [x0, THREAD_CTX_REGS_TPIDR_EL0]
75	msr	tpidr_el0, x1
76
77#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
78	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
79	write_apiakeyhi	x1
80	write_apiakeylo	x2
81#endif
82	b_if_spsr_is_el0 w3, 1f
83
84#if defined(CFG_CORE_PAUTH) || defined(CFG_TA_PAUTH)
85	/* SCTLR or the APIA key has changed */
86	isb
87#endif
88	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
89	ldr	x0, [x0, THREAD_CTX_REGS_X0]
90	return_from_exception
91
921:
93	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
94	ldr	x0, [x0, THREAD_CTX_REGS_X0]
95
96	msr	spsel, #1
97	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
98	b	eret_to_el0
99END_FUNC thread_resume
100
101#ifdef CFG_CORE_SEL2_SPMC
102FUNC thread_hvc , :
103	hvc	#0
104	ret
105END_FUNC thread_hvc
106#endif
107
108FUNC thread_smc , :
109	smc	#0
110	ret
111END_FUNC thread_smc
112
113/* void thread_smccc(struct thread_smc_args *arg_res) */
114FUNC thread_smccc , :
115	push	x0, xzr
116	mov	x8, x0
117	load_xregs x8, 0, 0, 7
118#ifdef CFG_CORE_SEL2_SPMC
119	hvc	#0
120#else
121	smc	#0
122#endif
123	pop	x8, xzr
124	store_xregs x8, 0, 0, 7
125	ret
126END_FUNC thread_smccc
127
128FUNC thread_init_vbar , :
129	msr	vbar_el1, x0
130	ret
131END_FUNC thread_init_vbar
132DECLARE_KEEP_PAGER thread_init_vbar
133
134/*
135 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
136 *				     uint32_t *exit_status0,
137 *				     uint32_t *exit_status1);
138 *
139 * This function depends on being called with exceptions masked.
140 */
141FUNC __thread_enter_user_mode , :
142	/*
143	 * Create the and fill in the struct thread_user_mode_rec
144	 */
145	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
146	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
147	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
148
149	/*
150	 * Save kern sp in x19
151	 * Switch to SP_EL1
152	 */
153	mov	x19, sp
154	msr	spsel, #1
155
156	/*
157	 * Save the kernel stack pointer in the thread context
158	 */
159	/* get pointer to current thread context */
160	get_thread_ctx sp, 21, 20, 22
161	/*
162	 * Save kernel stack pointer to ensure that el0_svc() uses
163	 * correct stack pointer
164	 */
165	str	x19, [x21, #THREAD_CTX_KERN_SP]
166
167	/*
168	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
169	 */
170	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
171	msr	sp_el0, x1
172	msr	elr_el1, x2
173	msr	spsr_el1, x3
174
175#ifdef	CFG_TA_PAUTH
176	/* Load APIAKEY */
177	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
178	write_apiakeyhi	x1
179	write_apiakeylo	x2
180#endif
181
182	/*
183	 * Save the values for x0 and x1 in struct thread_core_local to be
184	 * restored later just before the eret.
185	 */
186	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
187	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
188
189	/* Load the rest of the general purpose registers */
190	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
191
192	/* Jump into user mode */
193	b eret_to_el0
194END_FUNC __thread_enter_user_mode
195DECLARE_KEEP_PAGER __thread_enter_user_mode
196
197/*
198 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
199 * 		uint32_t exit_status1);
200 * See description in thread.h
201 */
202FUNC thread_unwind_user_mode , :
203	/* Store the exit status */
204	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
205	str	w1, [x4]
206	str	w2, [x5]
207	/* Save x19..x30 */
208	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
209	/* Restore x19..x30 */
210	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
211	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
212	/* Return from the call of thread_enter_user_mode() */
213	ret
214END_FUNC thread_unwind_user_mode
215
216	/*
217	 * This macro verifies that the a given vector doesn't exceed the
218	 * architectural limit of 32 instructions. This is meant to be placed
219	 * immedately after the last instruction in the vector. It takes the
220	 * vector entry as the parameter
221	 */
222	.macro check_vector_size since
223	  .if (. - \since) > (32 * 4)
224	    .error "Vector exceeds 32 instructions"
225	  .endif
226	.endm
227
228	.macro restore_mapping
229#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
230		/* Temporarily save x0, x1 */
231		msr	tpidr_el1, x0
232		msr	tpidrro_el0, x1
233
234		/* Update the mapping to use the full kernel mapping */
235		mrs	x0, ttbr0_el1
236		sub_imm	x0, __CORE_MMU_BASE_TABLE_OFFSET
237		/* switch to kernel mode ASID */
238		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
239		msr	ttbr0_el1, x0
240		isb
241
242		/* Jump into the full mapping and continue execution */
243		adr	x0, 1f
244		ldr	x1, [sp, #THREAD_CORE_LOCAL_KCODE_OFFSET]
245		add	x0, x0, x1
246		br	x0
247	1:
248BTI(		bti	j)
249		/* Point to the vector into the full mapping */
250		adr_l	x0, thread_user_kcode_offset
251		ldr	x0, [x0]
252		mrs	x1, vbar_el1
253		add	x1, x1, x0
254		msr	vbar_el1, x1
255		isb
256
257#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
258		/*
259		 * Update the SP with thread_user_kdata_sp_offset as
260		 * described in init_user_kcode().
261		 */
262		adr_l	x0, thread_user_kdata_sp_offset
263		ldr	x0, [x0]
264		add	sp, sp, x0
265#endif
266
267		/* Restore x0, x1 */
268		mrs	x0, tpidr_el1
269		mrs	x1, tpidrro_el0
270		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
271#else
272		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
273		mrs	x0, ttbr0_el1
274		/* switch to kernel mode ASID */
275		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
276		msr	ttbr0_el1, x0
277		isb
278#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
279	.endm
280
281#define INV_INSN	0
282FUNC thread_excp_vect , : , default, 2048, nobti
283	/* -----------------------------------------------------
284	 * EL1 with SP0 : 0x0 - 0x180
285	 * -----------------------------------------------------
286	 */
287	.balign	128, INV_INSN
288el1_sync_sp0:
289	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
290	b	el1_sync_abort
291	check_vector_size el1_sync_sp0
292
293	.balign	128, INV_INSN
294el1_irq_sp0:
295	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
296	b	elx_irq
297	check_vector_size el1_irq_sp0
298
299	.balign	128, INV_INSN
300el1_fiq_sp0:
301	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
302	b	elx_fiq
303	check_vector_size el1_fiq_sp0
304
305	.balign	128, INV_INSN
306el1_serror_sp0:
307	b	el1_serror_sp0
308	check_vector_size el1_serror_sp0
309
310	/* -----------------------------------------------------
311	 * Current EL with SP1: 0x200 - 0x380
312	 * -----------------------------------------------------
313	 */
314	.balign	128, INV_INSN
315el1_sync_sp1:
316	b	el1_sync_sp1
317	check_vector_size el1_sync_sp1
318
319	.balign	128, INV_INSN
320el1_irq_sp1:
321	b	el1_irq_sp1
322	check_vector_size el1_irq_sp1
323
324	.balign	128, INV_INSN
325el1_fiq_sp1:
326	b	el1_fiq_sp1
327	check_vector_size el1_fiq_sp1
328
329	.balign	128, INV_INSN
330el1_serror_sp1:
331	b	el1_serror_sp1
332	check_vector_size el1_serror_sp1
333
334	/* -----------------------------------------------------
335	 * Lower EL using AArch64 : 0x400 - 0x580
336	 * -----------------------------------------------------
337	 */
338	.balign	128, INV_INSN
339el0_sync_a64:
340	restore_mapping
341	/* PAuth will be disabled later else check_vector_size will fail */
342
343	b	el0_sync_a64_finish
344	check_vector_size el0_sync_a64
345
346	.balign	128, INV_INSN
347el0_irq_a64:
348	restore_mapping
349	pauth_el0_to_el1 x1
350
351	b	elx_irq
352	check_vector_size el0_irq_a64
353
354	.balign	128, INV_INSN
355el0_fiq_a64:
356	restore_mapping
357	pauth_el0_to_el1 x1
358
359	b	elx_fiq
360	check_vector_size el0_fiq_a64
361
362	.balign	128, INV_INSN
363el0_serror_a64:
364	b   	el0_serror_a64
365	check_vector_size el0_serror_a64
366
367	/* -----------------------------------------------------
368	 * Lower EL using AArch32 : 0x0 - 0x180
369	 * -----------------------------------------------------
370	 */
371	.balign	128, INV_INSN
372el0_sync_a32:
373	restore_mapping
374
375	b 	el0_sync_a32_finish
376	check_vector_size el0_sync_a32
377
378	.balign	128, INV_INSN
379el0_irq_a32:
380	restore_mapping
381
382	b	elx_irq
383	check_vector_size el0_irq_a32
384
385	.balign	128, INV_INSN
386el0_fiq_a32:
387	restore_mapping
388
389	b	elx_fiq
390	check_vector_size el0_fiq_a32
391
392	.balign	128, INV_INSN
393el0_serror_a32:
394	b	el0_serror_a32
395	check_vector_size el0_serror_a32
396
397#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
398	.macro invalidate_branch_predictor
399		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
400		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
401		smc	#0
402		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
403	.endm
404
405	.balign	2048, INV_INSN
406	.global thread_excp_vect_wa_spectre_v2
407thread_excp_vect_wa_spectre_v2:
408	/* -----------------------------------------------------
409	 * EL1 with SP0 : 0x0 - 0x180
410	 * -----------------------------------------------------
411	 */
412	.balign	128, INV_INSN
413wa_spectre_v2_el1_sync_sp0:
414	b	el1_sync_sp0
415	check_vector_size wa_spectre_v2_el1_sync_sp0
416
417	.balign	128, INV_INSN
418wa_spectre_v2_el1_irq_sp0:
419	b	el1_irq_sp0
420	check_vector_size wa_spectre_v2_el1_irq_sp0
421
422	.balign	128, INV_INSN
423wa_spectre_v2_el1_fiq_sp0:
424	b	el1_fiq_sp0
425	check_vector_size wa_spectre_v2_el1_fiq_sp0
426
427	.balign	128, INV_INSN
428wa_spectre_v2_el1_serror_sp0:
429	b	el1_serror_sp0
430	check_vector_size wa_spectre_v2_el1_serror_sp0
431
432	/* -----------------------------------------------------
433	 * Current EL with SP1: 0x200 - 0x380
434	 * -----------------------------------------------------
435	 */
436	.balign	128, INV_INSN
437wa_spectre_v2_el1_sync_sp1:
438	b	wa_spectre_v2_el1_sync_sp1
439	check_vector_size wa_spectre_v2_el1_sync_sp1
440
441	.balign	128, INV_INSN
442wa_spectre_v2_el1_irq_sp1:
443	b	wa_spectre_v2_el1_irq_sp1
444	check_vector_size wa_spectre_v2_el1_irq_sp1
445
446	.balign	128, INV_INSN
447wa_spectre_v2_el1_fiq_sp1:
448	b	wa_spectre_v2_el1_fiq_sp1
449	check_vector_size wa_spectre_v2_el1_fiq_sp1
450
451	.balign	128, INV_INSN
452wa_spectre_v2_el1_serror_sp1:
453	b	wa_spectre_v2_el1_serror_sp1
454	check_vector_size wa_spectre_v2_el1_serror_sp1
455
456	/* -----------------------------------------------------
457	 * Lower EL using AArch64 : 0x400 - 0x580
458	 * -----------------------------------------------------
459	 */
460	.balign	128, INV_INSN
461wa_spectre_v2_el0_sync_a64:
462	invalidate_branch_predictor
463	b	el0_sync_a64
464	check_vector_size wa_spectre_v2_el0_sync_a64
465
466	.balign	128, INV_INSN
467wa_spectre_v2_el0_irq_a64:
468	invalidate_branch_predictor
469	b	el0_irq_a64
470	check_vector_size wa_spectre_v2_el0_irq_a64
471
472	.balign	128, INV_INSN
473wa_spectre_v2_el0_fiq_a64:
474	invalidate_branch_predictor
475	b	el0_fiq_a64
476	check_vector_size wa_spectre_v2_el0_fiq_a64
477
478	.balign	128, INV_INSN
479wa_spectre_v2_el0_serror_a64:
480	b   	wa_spectre_v2_el0_serror_a64
481	check_vector_size wa_spectre_v2_el0_serror_a64
482
483	/* -----------------------------------------------------
484	 * Lower EL using AArch32 : 0x0 - 0x180
485	 * -----------------------------------------------------
486	 */
487	.balign	128, INV_INSN
488wa_spectre_v2_el0_sync_a32:
489	invalidate_branch_predictor
490	b	el0_sync_a32
491	check_vector_size wa_spectre_v2_el0_sync_a32
492
493	.balign	128, INV_INSN
494wa_spectre_v2_el0_irq_a32:
495	invalidate_branch_predictor
496	b	el0_irq_a32
497	check_vector_size wa_spectre_v2_el0_irq_a32
498
499	.balign	128, INV_INSN
500wa_spectre_v2_el0_fiq_a32:
501	invalidate_branch_predictor
502	b	el0_fiq_a32
503	check_vector_size wa_spectre_v2_el0_fiq_a32
504
505	.balign	128, INV_INSN
506wa_spectre_v2_el0_serror_a32:
507	b	wa_spectre_v2_el0_serror_a32
508	check_vector_size wa_spectre_v2_el0_serror_a32
509
510	.macro discard_branch_history
511		str	x0, [sp, #THREAD_CORE_LOCAL_X0]
512		ldrb	w0, [sp, #THREAD_CORE_LOCAL_BHB_LOOP_COUNT]
513	1:	b	2f
514	2:	subs	w0, w0, #1
515		bne	1b
516		dsb	sy
517		isb
518		ldr	x0, [sp, #THREAD_CORE_LOCAL_X0]
519	.endm
520
521	.balign	2048, INV_INSN
522	.global thread_excp_vect_wa_spectre_bhb
523thread_excp_vect_wa_spectre_bhb:
524	/* -----------------------------------------------------
525	 * EL1 with SP0 : 0x0 - 0x180
526	 * -----------------------------------------------------
527	 */
528	.balign	128, INV_INSN
529wa_spectre_bhb_el1_sync_sp0:
530	b	el1_sync_sp0
531	check_vector_size wa_spectre_bhb_el1_sync_sp0
532
533	.balign	128, INV_INSN
534wa_spectre_bhb_el1_irq_sp0:
535	b	el1_irq_sp0
536	check_vector_size wa_spectre_bhb_el1_irq_sp0
537
538	.balign	128, INV_INSN
539wa_spectre_bhb_el1_fiq_sp0:
540	b	el1_fiq_sp0
541	check_vector_size wa_spectre_bhb_el1_fiq_sp0
542
543	.balign	128, INV_INSN
544wa_spectre_bhb_el1_serror_sp0:
545	b	el1_serror_sp0
546	check_vector_size wa_spectre_bhb_el1_serror_sp0
547
548	/* -----------------------------------------------------
549	 * Current EL with SP1: 0x200 - 0x380
550	 * -----------------------------------------------------
551	 */
552	.balign	128, INV_INSN
553wa_spectre_bhb_el1_sync_sp1:
554	b	wa_spectre_bhb_el1_sync_sp1
555	check_vector_size wa_spectre_bhb_el1_sync_sp1
556
557	.balign	128, INV_INSN
558wa_spectre_bhb_el1_irq_sp1:
559	b	wa_spectre_bhb_el1_irq_sp1
560	check_vector_size wa_spectre_bhb_el1_irq_sp1
561
562	.balign	128, INV_INSN
563wa_spectre_bhb_el1_fiq_sp1:
564	b	wa_spectre_bhb_el1_fiq_sp1
565	check_vector_size wa_spectre_bhb_el1_fiq_sp1
566
567	.balign	128, INV_INSN
568wa_spectre_bhb_el1_serror_sp1:
569	b	wa_spectre_bhb_el1_serror_sp1
570	check_vector_size wa_spectre_bhb_el1_serror_sp1
571
572	/* -----------------------------------------------------
573	 * Lower EL using AArch64 : 0x400 - 0x580
574	 * -----------------------------------------------------
575	 */
576	.balign	128, INV_INSN
577wa_spectre_bhb_el0_sync_a64:
578	discard_branch_history
579	b	el0_sync_a64
580	check_vector_size wa_spectre_bhb_el0_sync_a64
581
582	.balign	128, INV_INSN
583wa_spectre_bhb_el0_irq_a64:
584	discard_branch_history
585	b	el0_irq_a64
586	check_vector_size wa_spectre_bhb_el0_irq_a64
587
588	.balign	128, INV_INSN
589wa_spectre_bhb_el0_fiq_a64:
590	discard_branch_history
591	b	el0_fiq_a64
592	check_vector_size wa_spectre_bhb_el0_fiq_a64
593
594	.balign	128, INV_INSN
595wa_spectre_bhb_el0_serror_a64:
596	b   	wa_spectre_bhb_el0_serror_a64
597	check_vector_size wa_spectre_bhb_el0_serror_a64
598
599	/* -----------------------------------------------------
600	 * Lower EL using AArch32 : 0x0 - 0x180
601	 * -----------------------------------------------------
602	 */
603	.balign	128, INV_INSN
604wa_spectre_bhb_el0_sync_a32:
605	discard_branch_history
606	b	el0_sync_a32
607	check_vector_size wa_spectre_bhb_el0_sync_a32
608
609	.balign	128, INV_INSN
610wa_spectre_bhb_el0_irq_a32:
611	discard_branch_history
612	b	el0_irq_a32
613	check_vector_size wa_spectre_bhb_el0_irq_a32
614
615	.balign	128, INV_INSN
616wa_spectre_bhb_el0_fiq_a32:
617	discard_branch_history
618	b	el0_fiq_a32
619	check_vector_size wa_spectre_bhb_el0_fiq_a32
620
621	.balign	128, INV_INSN
622wa_spectre_bhb_el0_serror_a32:
623	b	wa_spectre_bhb_el0_serror_a32
624	check_vector_size wa_spectre_bhb_el0_serror_a32
625#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
626
627/*
628 * We're keeping this code in the same section as the vector to make sure
629 * that it's always available.
630 */
631eret_to_el0:
632	pauth_el1_to_el0 x1
633
634#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
635	/* Point to the vector into the reduced mapping */
636	adr_l	x0, thread_user_kcode_offset
637	ldr	x0, [x0]
638	mrs	x1, vbar_el1
639	sub	x1, x1, x0
640	msr	vbar_el1, x1
641	isb
642
643#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
644	/* Store the SP offset in tpidr_el1 to be used below to update SP */
645	adr_l	x1, thread_user_kdata_sp_offset
646	ldr	x1, [x1]
647	msr	tpidr_el1, x1
648#endif
649
650	/* Jump into the reduced mapping and continue execution */
651	adr_l	x1, 1f
652	sub	x1, x1, x0
653	br	x1
6541:
655BTI(	bti	j)
656	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
657	msr	tpidrro_el0, x0
658
659	/* Update the mapping to exclude the full kernel mapping */
660	mrs	x0, ttbr0_el1
661	add_imm	x0, __CORE_MMU_BASE_TABLE_OFFSET
662	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
663	msr	ttbr0_el1, x0
664	isb
665
666#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
667	/*
668	 * Update the SP with thread_user_kdata_sp_offset as described in
669	 * init_user_kcode().
670	 */
671	mrs	x0, tpidr_el1
672	sub	sp, sp, x0
673#endif
674
675	mrs	x0, tpidrro_el0
676#else
677	mrs	x0, ttbr0_el1
678	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
679	msr	ttbr0_el1, x0
680	isb
681	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
682#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
683
684	return_from_exception
685
686el0_sync_a64_finish:
687	mrs	x2, esr_el1
688	mrs	x3, sp_el0
689	lsr	x2, x2, #ESR_EC_SHIFT
690	cmp	x2, #ESR_EC_AARCH64_SVC
691	b.eq	el0_svc
692	b	el0_sync_abort
693
694el0_sync_a32_finish:
695	mrs	x2, esr_el1
696	mrs	x3, sp_el0
697	lsr	x2, x2, #ESR_EC_SHIFT
698	cmp	x2, #ESR_EC_AARCH32_SVC
699	b.eq	el0_svc
700	b	el0_sync_abort
701
702	/*
703	 * void icache_inv_user_range(void *addr, size_t size);
704	 *
705	 * This function has to execute with the user space ASID active,
706	 * this means executing with reduced mapping and the code needs
707	 * to be located here together with the vector.
708	 */
709	.global icache_inv_user_range
710	.type icache_inv_user_range , %function
711icache_inv_user_range:
712	/* Mask all exceptions */
713	mrs	x6, daif	/* this register must be preserved */
714	msr	daifset, #DAIFBIT_ALL
715
716#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
717	/* Point to the vector into the reduced mapping */
718	adr_l	x7, thread_user_kcode_offset
719	ldr	x7, [x7]	/* this register must be preserved */
720	mrs	x4, vbar_el1	/* this register must be preserved */
721	sub	x3, x4, x7
722	msr	vbar_el1, x3
723	isb
724
725	/* Jump into the reduced mapping and continue execution */
726	adr	x3, 1f
727	sub	x3, x3, x7
728	br	x3
7291:
730BTI(	bti	j)
731	/* Update the mapping to exclude the full kernel mapping */
732	mrs	x5, ttbr0_el1	/* this register must be preserved */
733	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
734	add_imm	x2, __CORE_MMU_BASE_TABLE_OFFSET
735	msr	ttbr0_el1, x2
736	isb
737
738#else
739	mrs	x5, ttbr0_el1	/* this register must be preserved */
740	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
741	msr	ttbr0_el1, x2
742	isb
743#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
744
745	/*
746	 * Do the actual icache invalidation
747	 */
748
749	/* Calculate minimum icache line size, result in x2 */
750	mrs	x3, ctr_el0
751	and	x3, x3, #CTR_IMINLINE_MASK
752	mov	x2, #CTR_WORD_SIZE
753	lsl	x2, x2, x3
754
755	add	x1, x0, x1
756	sub	x3, x2, #1
757	bic	x0, x0, x3
7581:
759	ic	ivau, x0
760	add	x0, x0, x2
761	cmp	x0, x1
762	b.lo    1b
763	dsb	ish
764
765#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
766	/* Update the mapping to use the full kernel mapping and ASID */
767	msr	ttbr0_el1, x5
768	isb
769
770	/* Jump into the full mapping and continue execution */
771	adr	x0, 1f
772	add	x0, x0, x7
773	br	x0
7741:
775BTI(	bti	j)
776	/* Point to the vector into the full mapping */
777	msr	vbar_el1, x4
778	isb
779#else
780	/* switch to kernel mode ASID */
781	msr	ttbr0_el1, x5
782	isb
783#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
784
785	msr	daif, x6	/* restore exceptions */
786	ret	/* End of icache_inv_user_range() */
787
788	/*
789	 * Make sure that literals are placed before the
790	 * thread_excp_vect_end label.
791	 */
792	.pool
793	.global thread_excp_vect_end
794thread_excp_vect_end:
795END_FUNC thread_excp_vect
796
797LOCAL_FUNC el0_svc , :
798	pauth_el0_to_el1 x1
799	/* get pointer to current thread context in x0 */
800	get_thread_ctx sp, 0, 1, 2
801	mrs	x1, tpidr_el0
802	str	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
803	/* load saved kernel sp */
804	ldr	x3, [x0, #THREAD_CTX_KERN_SP]
805	/* Keep pointer to initial recod in x1 */
806	mov	x1, sp
807	/* Switch to SP_EL0 and restore kernel sp */
808	msr	spsel, #0
809	mov	x2, sp	/* Save SP_EL0 */
810	mov	sp, x3
811
812	/* Make room for struct thread_scall_regs */
813	sub	sp, sp, #THREAD_SCALL_REG_SIZE
814	stp	x30, x2, [sp, #THREAD_SCALL_REG_X30]
815
816#ifdef CFG_TA_PAUTH
817	/* Save APIAKEY */
818	read_apiakeyhi	x2
819	read_apiakeylo	x3
820	stp	x2, x3, [sp, #THREAD_SCALL_REG_APIAKEY_HI]
821#endif
822
823#ifdef CFG_CORE_PAUTH
824	ldp	x2, x3, [x0, #THREAD_CTX_KEYS]
825	write_apiakeyhi	x2
826	write_apiakeylo	x3
827#endif
828#if defined(CFG_CORE_PAUTH) || defined(CFG_TA_PAUTH)
829	/* SCTLR or the APIA key has changed */
830	isb
831#endif
832
833	/* Restore x0-x3 */
834	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
835	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
836
837	/* Prepare the argument for the handler */
838	store_xregs sp, THREAD_SCALL_REG_X0, 0, 14
839	mrs	x0, elr_el1
840	mrs	x1, spsr_el1
841	store_xregs sp, THREAD_SCALL_REG_ELR, 0, 1
842
843	mov	x0, sp
844
845	/*
846	 * Unmask native interrupts, Serror, and debug exceptions since we have
847	 * nothing left in sp_el1. Note that the SVC handler is excepted to
848	 * re-enable foreign interrupts by itself.
849	 */
850#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
851	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
852#else
853	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
854#endif
855
856	/* Call the handler */
857	bl	thread_scall_handler
858
859	/* Mask all maskable exceptions since we're switching back to sp_el1 */
860	msr	daifset, #DAIFBIT_ALL
861
862	/*
863	 * Save kernel sp we'll had at the beginning of this function.
864	 * This is when this TA has called another TA because
865	 * __thread_enter_user_mode() also saves the stack pointer in this
866	 * field.
867	 */
868	msr	spsel, #1
869	get_thread_ctx sp, 0, 1, 2
870	msr	spsel, #0
871	add	x1, sp, #THREAD_SCALL_REG_SIZE
872	str	x1, [x0, #THREAD_CTX_KERN_SP]
873
874	/* Restore registers to the required state and return*/
875	ldr	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
876	msr	tpidr_el0, x1
877	load_xregs sp, THREAD_SCALL_REG_ELR, 0, 1
878	msr	elr_el1, x0
879	msr	spsr_el1, x1
880	load_xregs sp, THREAD_SCALL_REG_X2, 2, 14
881	mov	x30, sp
882	ldr	x0, [x30, #THREAD_SCALL_REG_SP_EL0]
883	mov	sp, x0
884	b_if_spsr_is_el0 w1, 1f
885	ldp	x0, x1, [x30, THREAD_SCALL_REG_X0]
886	ldr	x30, [x30, #THREAD_SCALL_REG_X30]
887
888	return_from_exception
889
8901:
891#ifdef	CFG_TA_PAUTH
892	/* Restore APIAKEY */
893	load_xregs x30, THREAD_SCALL_REG_APIAKEY_HI, 0, 1
894	write_apiakeyhi	x0
895	write_apiakeylo	x1
896#endif
897
898	ldp	x0, x1, [x30, THREAD_SCALL_REG_X0]
899	ldr	x30, [x30, #THREAD_SCALL_REG_X30]
900
901	msr	spsel, #1
902	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
903	b	eret_to_el0
904END_FUNC el0_svc
905
906LOCAL_FUNC el1_sync_abort , :
907	mov	x0, sp
908	msr	spsel, #0
909	mov	x3, sp		/* Save original sp */
910
911	/*
912	 * Update core local flags.
913	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
914	 */
915	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
916	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
917	orr	w1, w1, #THREAD_CLF_ABORT
918	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
919			.Lsel_tmp_sp
920
921	/* Select abort stack */
922	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
923	b	.Lset_sp
924
925.Lsel_tmp_sp:
926	/* We have an abort while using the abort stack, select tmp stack */
927	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
928	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
929
930.Lset_sp:
931	mov	sp, x2
932	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
933
934	/*
935	 * Save state on stack
936	 */
937	sub	sp, sp, #THREAD_ABT_REGS_SIZE
938	mrs	x2, spsr_el1
939	/* Store spsr, sp_el0 */
940	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
941	/* Store original x0, x1 */
942	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
943	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
944	/* Store original x2, x3 and x4 to x29 */
945	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
946	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
947	/* Store x30, elr_el1 */
948	mrs	x1, elr_el1
949	stp	x30, x1, [sp, #THREAD_ABT_REG_X30]
950
951#if defined(CFG_CORE_PAUTH)
952	read_apiakeyhi	x2
953	read_apiakeylo	x3
954	stp	x2, x3, [sp, #THREAD_ABT_REGS_APIAKEY_HI]
955	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_KEYS]
956	write_apiakeyhi	x2
957	write_apiakeylo	x3
958	isb
959#endif
960
961	/*
962	 * Call handler
963	 */
964	mov	x0, #0
965	mov	x1, sp
966	bl	abort_handler
967
968	/*
969	 * Restore state from stack
970	 */
971	/* Load x30, elr_el1 */
972	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
973	msr	elr_el1, x0
974	/* Load x0 to x29 */
975	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
976	/* Switch to SP_EL1 */
977	msr	spsel, #1
978	/* Save x0 to x3 in CORE_LOCAL */
979	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
980	/* Restore spsr_el1 and sp_el0 */
981	mrs	x3, sp_el0
982	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
983	msr	spsr_el1, x0
984	msr	sp_el0, x1
985
986	/* Update core local flags */
987	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
988	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
989	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
990
991#if defined(CFG_CORE_PAUTH)
992	ldp	x0, x1, [x3, #THREAD_ABT_REGS_APIAKEY_HI]
993	write_apiakeyhi	x0
994	write_apiakeylo	x1
995	isb
996#endif
997
998	/* Restore x0 to x3 */
999	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
1000
1001	/* Return from exception */
1002	return_from_exception
1003END_FUNC el1_sync_abort
1004
1005	/* sp_el0 in x3 */
1006LOCAL_FUNC el0_sync_abort , :
1007	pauth_el0_to_el1 x1
1008	/*
1009	 * Update core local flags
1010	 */
1011	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1012	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1013	orr	w1, w1, #THREAD_CLF_ABORT
1014	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1015
1016	/*
1017	 * Save state on stack
1018	 */
1019
1020	/* load abt_stack_va_end */
1021	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
1022	/* Keep pointer to initial record in x0 */
1023	mov	x0, sp
1024	/* Switch to SP_EL0 */
1025	msr	spsel, #0
1026	mov	sp, x1
1027	sub	sp, sp, #THREAD_ABT_REGS_SIZE
1028	mrs	x2, spsr_el1
1029	/* Store spsr, sp_el0 */
1030	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
1031	/* Store original x0, x1 */
1032	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
1033	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
1034	/* Store original x2, x3 and x4 to x29 */
1035	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
1036	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
1037	/* Store x30, elr_el1 */
1038	mrs	x1, elr_el1
1039	stp	x30, x1, [sp, #THREAD_ABT_REG_X30]
1040
1041#if defined(CFG_TA_PAUTH)
1042	read_apiakeyhi	x2
1043	read_apiakeylo	x3
1044	stp	x2, x3, [sp, #THREAD_ABT_REGS_APIAKEY_HI]
1045#endif
1046
1047#if defined(CFG_CORE_PAUTH)
1048	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_KEYS]
1049	write_apiakeyhi	x2
1050	write_apiakeylo	x3
1051#endif
1052
1053#if defined(CFG_CORE_PAUTH) || defined(CFG_TA_PAUTH)
1054	/* SCTLR or the APIA key has changed */
1055	isb
1056#endif
1057
1058	/*
1059	 * Call handler
1060	 */
1061	mov	x0, #0
1062	mov	x1, sp
1063	bl	abort_handler
1064
1065	/*
1066	 * Restore state from stack
1067	 */
1068
1069	/* Load x30, elr_el1 */
1070	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
1071	msr	elr_el1, x0
1072	/* Load x0 to x29 */
1073	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
1074	/* Switch to SP_EL1 */
1075	msr	spsel, #1
1076	/* Save x0 to x3 in EL1_REC */
1077	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
1078	/* Restore spsr_el1 and sp_el0 */
1079	mrs	x3, sp_el0
1080	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
1081	msr	spsr_el1, x0
1082	msr	sp_el0, x1
1083
1084	/* Update core local flags */
1085	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1086	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
1087	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1088
1089#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1090	ldp	x1, x2, [x3, #THREAD_ABT_REGS_APIAKEY_HI]
1091	write_apiakeyhi	x1
1092	write_apiakeylo	x2
1093#endif
1094
1095	/* Restore x2 to x3 */
1096	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1097
1098	b_if_spsr_is_el0 w0, 1f
1099
1100#if defined(CFG_CORE_PAUTH)
1101	/* the APIA key has changed */
1102	isb
1103#endif
1104
1105	/* Restore x0 to x1 */
1106	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1107
1108	/* Return from exception */
1109	return_from_exception
11101:	b	eret_to_el0
1111END_FUNC el0_sync_abort
1112
1113/* The handler of foreign interrupt. */
1114.macro foreign_intr_handler mode:req
1115	/*
1116	 * Update core local flags
1117	 */
1118	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1119	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1120	orr	w1, w1, #THREAD_CLF_TMP
1121	.ifc	\mode\(),fiq
1122	orr	w1, w1, #THREAD_CLF_FIQ
1123	.else
1124	orr	w1, w1, #THREAD_CLF_IRQ
1125	.endif
1126	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1127
1128	/* get pointer to current thread context in x0 */
1129	get_thread_ctx sp, 0, 1, 2
1130	/* Keep original SP_EL0 */
1131	mrs	x2, sp_el0
1132
1133	/* Store original sp_el0 */
1134	str	x2, [x0, #THREAD_CTX_REGS_SP]
1135	/* Store tpidr_el0 */
1136	mrs	x2, tpidr_el0
1137	str	x2, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
1138	/* Store x4..x30 */
1139	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
1140	/* Load original x0..x3 into x10..x13 */
1141	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
1142	/* Save original x0..x3 */
1143	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
1144
1145#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1146	/* Save APIAKEY */
1147	read_apiakeyhi	x1
1148	read_apiakeylo	x2
1149	store_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
1150#endif
1151#if defined(CFG_CORE_PAUTH)
1152	ldp	x1, x2, [sp, #THREAD_CORE_LOCAL_KEYS]
1153	write_apiakeyhi	x1
1154	write_apiakeylo	x2
1155	isb
1156#endif
1157
1158#ifdef CFG_CORE_FFA
1159	/* x0 is still pointing to the current thread_ctx */
1160	/* load curr_thread_ctx->tsd.rpc_target_info into w19 */
1161	ldr	w19, [x0, #THREAD_CTX_TSD_RPC_TARGET_INFO]
1162	/* load curr_thread_ctx->flags into w19 */
1163	ldr	w20, [x0, #THREAD_CTX_FLAGS]
1164#endif
1165
1166	/* load tmp_stack_va_end */
1167	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1168	/* Switch to SP_EL0 */
1169	msr	spsel, #0
1170	mov	sp, x1
1171
1172#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
1173	/*
1174	 * Prevent leaking information about which entries has been used in
1175	 * cache. We're relying on the dispatcher in TF-A to take care of
1176	 * the BTB.
1177	 */
1178	mov	x0, #DCACHE_OP_CLEAN_INV
1179	bl	dcache_op_louis
1180	ic	iallu
1181#endif
1182	/*
1183	 * Mark current thread as suspended
1184	 */
1185	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
1186	mrs	x1, spsr_el1
1187	mrs	x2, elr_el1
1188	bl	thread_state_suspend
1189
1190	/* Update core local flags */
1191	/* Switch to SP_EL1 */
1192	msr	spsel, #1
1193	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1194	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
1195	orr	w1, w1, #THREAD_CLF_TMP
1196	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1197	msr	spsel, #0
1198
1199	/*
1200	 * Note that we're exiting with SP_EL0 selected since the entry
1201	 * functions expects to have SP_EL0 selected with the tmp stack
1202	 * set.
1203	 */
1204
1205	/* Passing thread index in w0 */
1206#ifdef CFG_CORE_FFA
1207	mov	w1, w19		/* rpc_target_info */
1208	mov	w2, w20		/* flags */
1209#endif
1210	b	thread_foreign_intr_exit
1211.endm
1212
1213/*
1214 * This struct is never used from C it's only here to visualize the
1215 * layout.
1216 *
1217 * struct elx_nintr_rec {
1218 * 	uint64_t x[19 - 4]; x4..x18
1219 * 	uint64_t lr;
1220 * 	uint64_t sp_el0;
1221 * #if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1222 * 	uint64_t apiakey_hi;
1223 * 	uint64_t apiakey_lo;
1224 * #endif
1225 * };
1226 */
1227#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1228#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1229#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1230#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1231#define ELX_NINTR_REG_APIAKEY_HI	(8 + ELX_NINTR_REC_SP_EL0)
1232#define ELX_NINTR_REG_APIAKEY_LO	(8 + ELX_NINTR_REG_APIAKEY_HI)
1233#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REG_APIAKEY_LO)
1234#else
1235#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1236#endif
1237
1238
1239/* The handler of native interrupt. */
1240.macro native_intr_handler mode:req
1241	/*
1242	 * Update core local flags
1243	 */
1244	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1245	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1246	.ifc	\mode\(),fiq
1247	orr	w1, w1, #THREAD_CLF_FIQ
1248	.else
1249	orr	w1, w1, #THREAD_CLF_IRQ
1250	.endif
1251	orr	w1, w1, #THREAD_CLF_TMP
1252	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1253
1254	/*
1255	 * Save registers on the temp stack that can be corrupted by a call
1256	 * to a C function.
1257	 *
1258	 * Note that we're temporarily using x1 to access the temp stack
1259	 * until we're ready to switch to sp_el0 and update sp.
1260	 */
1261	/* load tmp_stack_va_end */
1262	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1263	/* Make room for struct elx_nintr_rec */
1264	sub	x1, x1, #ELX_NINTR_REC_SIZE
1265	/* Store lr and original sp_el0 */
1266	mrs	x2, sp_el0
1267	stp	x30, x2, [x1, #ELX_NINTR_REC_LR]
1268	/* Store x4..x18 */
1269	store_xregs x1, ELX_NINTR_REC_X(4), 4, 18
1270
1271#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1272	read_apiakeyhi	x2
1273	read_apiakeylo	x3
1274	stp	x2, x3, [x1, #ELX_NINTR_REG_APIAKEY_HI]
1275#if defined(CFG_CORE_PAUTH)
1276	ldp	x2, x3, [sp, #THREAD_CORE_LOCAL_KEYS]
1277	write_apiakeyhi	x2
1278	write_apiakeylo	x3
1279#endif
1280	/* SCTLR or the APIA key has changed */
1281	isb
1282#endif
1283
1284	/* Switch to SP_EL0 */
1285	msr	spsel, #0
1286	mov	sp, x1
1287
1288	bl	thread_check_canaries
1289	bl	interrupt_main_handler
1290
1291	/*
1292	 * Restore registers
1293	 */
1294
1295#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1296	ldp	x0, x1, [sp, #ELX_NINTR_REG_APIAKEY_HI]
1297	write_apiakeyhi	x0
1298	write_apiakeylo	x1
1299#endif
1300
1301	/* Restore x4..x18 */
1302	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1303	/* Load  lr and original sp_el0 */
1304	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1305	/* Restore SP_El0 */
1306	mov	sp, x2
1307	/* Switch back to SP_EL1 */
1308	msr	spsel, #1
1309
1310	/* Update core local flags */
1311	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1312	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1313	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1314
1315	mrs	x0, spsr_el1
1316
1317	/* Restore x2..x3 */
1318	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1319	b_if_spsr_is_el0 w0, 1f
1320
1321#if defined(CFG_CORE_PAUTH)
1322	/* APIA key has changed */
1323	isb
1324#endif
1325
1326	/* Restore x0..x1 */
1327	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1328
1329	/* Return from exception */
1330	return_from_exception
13311:	b	eret_to_el0
1332.endm
1333
1334LOCAL_FUNC elx_irq , :
1335#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
1336	native_intr_handler	irq
1337#else
1338	foreign_intr_handler	irq
1339#endif
1340END_FUNC elx_irq
1341
1342LOCAL_FUNC elx_fiq , :
1343#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
1344	foreign_intr_handler	fiq
1345#else
1346	native_intr_handler	fiq
1347#endif
1348END_FUNC elx_fiq
1349
1350BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
1351