xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 6cfa381e534b362afbd103f526b132048e54ba47)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2022, Linaro Limited
4 */
5
6#include <arm64_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread.h>
13#include <kernel/thread_private.h>
14#include <mm/core_mmu.h>
15#include <smccc.h>
16
17	.macro get_thread_ctx core_local, res, tmp0, tmp1
18		ldrh	w\tmp0, [\core_local, \
19				#THREAD_CORE_LOCAL_CURR_THREAD]
20		adr_l	x\res, threads
21		mov	x\tmp1, #THREAD_CTX_SIZE
22		madd	x\res, x\tmp0, x\tmp1, x\res
23	.endm
24
25	.macro return_from_exception
26		eret
27		/* Guard against speculation past ERET */
28		dsb nsh
29		isb
30	.endm
31
32	.macro b_if_spsr_is_el0 reg, label
33		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
34		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
35		b.eq	\label
36	.endm
37
38	.macro pauth_el0_to_el1 reg
39		/*
40		 * If pauth is only enabled in one of core or TA (xor) we
41		 * need to update sctlr.
42		 */
43#if (defined(CFG_TA_PAUTH) && !defined(CFG_CORE_PAUTH)) || \
44    (!defined(CFG_TA_PAUTH) && defined(CFG_CORE_PAUTH))
45		mrs	\reg, sctlr_el1
46		/* Flip the SCTLR_ENIA bit */
47		eor     \reg, \reg, #SCTLR_ENIA
48		msr	sctlr_el1, \reg
49#endif
50	.endm
51
52	.macro pauth_el1_to_el0 reg
53		/*
54		 * If pauth is only enabled in one of core or TA (xor) we
55		 * need to update sctlr.
56		 */
57#if (defined(CFG_TA_PAUTH) && !defined(CFG_CORE_PAUTH)) || \
58    (!defined(CFG_TA_PAUTH) && defined(CFG_CORE_PAUTH))
59		mrs	\reg, sctlr_el1
60		/* Flip the SCTLR_ENIA bit */
61		eor     \reg, \reg, #SCTLR_ENIA
62		msr	sctlr_el1, \reg
63#endif
64	.endm
65
66/* void thread_resume(struct thread_ctx_regs *regs) */
67FUNC thread_resume , :
68	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
69	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
70	mov	sp, x1
71	msr	elr_el1, x2
72	msr	spsr_el1, x3
73	ldr	x1, [x0, THREAD_CTX_REGS_TPIDR_EL0]
74	msr	tpidr_el0, x1
75
76#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
77	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
78	write_apiakeyhi	x1
79	write_apiakeylo	x2
80#endif
81	b_if_spsr_is_el0 w3, 1f
82
83#if defined(CFG_CORE_PAUTH) || defined(CFG_TA_PAUTH)
84	/* SCTLR or the APIA key has changed */
85	isb
86#endif
87	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
88	ldr	x0, [x0, THREAD_CTX_REGS_X0]
89	return_from_exception
90
911:
92	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
93	ldr	x0, [x0, THREAD_CTX_REGS_X0]
94
95	msr	spsel, #1
96	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
97	b	eret_to_el0
98END_FUNC thread_resume
99
100#ifdef CFG_CORE_SEL2_SPMC
101FUNC thread_hvc , :
102	hvc	#0
103	ret
104END_FUNC thread_hvc
105#endif
106
107FUNC thread_smc , :
108	smc	#0
109	ret
110END_FUNC thread_smc
111
112/* void thread_smccc(struct thread_smc_args *arg_res) */
113FUNC thread_smccc , :
114	push	x0, xzr
115	mov	x8, x0
116	load_xregs x8, 0, 0, 7
117#ifdef CFG_CORE_SEL2_SPMC
118	hvc	#0
119#else
120	smc	#0
121#endif
122	pop	x8, xzr
123	store_xregs x8, 0, 0, 7
124	ret
125END_FUNC thread_smccc
126
127FUNC thread_init_vbar , :
128	msr	vbar_el1, x0
129	ret
130END_FUNC thread_init_vbar
131DECLARE_KEEP_PAGER thread_init_vbar
132
133/*
134 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
135 *				     uint32_t *exit_status0,
136 *				     uint32_t *exit_status1);
137 *
138 * This function depends on being called with exceptions masked.
139 */
140FUNC __thread_enter_user_mode , :
141	/*
142	 * Create the and fill in the struct thread_user_mode_rec
143	 */
144	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
145	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
146	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
147
148	/*
149	 * Save kern sp in x19
150	 * Switch to SP_EL1
151	 */
152	mov	x19, sp
153	msr	spsel, #1
154
155	/*
156	 * Save the kernel stack pointer in the thread context
157	 */
158	/* get pointer to current thread context */
159	get_thread_ctx sp, 21, 20, 22
160	/*
161	 * Save kernel stack pointer to ensure that el0_svc() uses
162	 * correct stack pointer
163	 */
164	str	x19, [x21, #THREAD_CTX_KERN_SP]
165
166	/*
167	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
168	 */
169	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
170	msr	sp_el0, x1
171	msr	elr_el1, x2
172	msr	spsr_el1, x3
173
174#ifdef	CFG_TA_PAUTH
175	/* Load APIAKEY */
176	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
177	write_apiakeyhi	x1
178	write_apiakeylo	x2
179#endif
180
181	/*
182	 * Save the values for x0 and x1 in struct thread_core_local to be
183	 * restored later just before the eret.
184	 */
185	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
186	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
187
188	/* Load the rest of the general purpose registers */
189	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
190
191	/* Jump into user mode */
192	b eret_to_el0
193END_FUNC __thread_enter_user_mode
194DECLARE_KEEP_PAGER __thread_enter_user_mode
195
196/*
197 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
198 * 		uint32_t exit_status1);
199 * See description in thread.h
200 */
201FUNC thread_unwind_user_mode , :
202	/* Store the exit status */
203	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
204	str	w1, [x4]
205	str	w2, [x5]
206	/* Save x19..x30 */
207	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
208	/* Restore x19..x30 */
209	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
210	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
211	/* Return from the call of thread_enter_user_mode() */
212	ret
213END_FUNC thread_unwind_user_mode
214
215	/*
216	 * This macro verifies that the a given vector doesn't exceed the
217	 * architectural limit of 32 instructions. This is meant to be placed
218	 * immedately after the last instruction in the vector. It takes the
219	 * vector entry as the parameter
220	 */
221	.macro check_vector_size since
222	  .if (. - \since) > (32 * 4)
223	    .error "Vector exceeds 32 instructions"
224	  .endif
225	.endm
226
227	.macro restore_mapping
228#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
229		/* Temporarily save x0, x1 */
230		msr	tpidr_el1, x0
231		msr	tpidrro_el0, x1
232
233		/* Update the mapping to use the full kernel mapping */
234		mrs	x0, ttbr0_el1
235		sub_imm	x0, __CORE_MMU_BASE_TABLE_OFFSET
236		/* switch to kernel mode ASID */
237		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
238		msr	ttbr0_el1, x0
239		isb
240
241		/* Jump into the full mapping and continue execution */
242		adr	x0, 1f
243		ldr	x1, [sp, #THREAD_CORE_LOCAL_KCODE_OFFSET]
244		add	x0, x0, x1
245		br	x0
246	1:
247BTI(		bti	j)
248		/* Point to the vector into the full mapping */
249		adr_l	x0, thread_user_kcode_offset
250		ldr	x0, [x0]
251		mrs	x1, vbar_el1
252		add	x1, x1, x0
253		msr	vbar_el1, x1
254		isb
255
256#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
257		/*
258		 * Update the SP with thread_user_kdata_sp_offset as
259		 * described in init_user_kcode().
260		 */
261		adr_l	x0, thread_user_kdata_sp_offset
262		ldr	x0, [x0]
263		add	sp, sp, x0
264#endif
265
266		/* Restore x0, x1 */
267		mrs	x0, tpidr_el1
268		mrs	x1, tpidrro_el0
269		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
270#else
271		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
272		mrs	x0, ttbr0_el1
273		/* switch to kernel mode ASID */
274		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
275		msr	ttbr0_el1, x0
276		isb
277#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
278	.endm
279
280#define INV_INSN	0
281FUNC thread_excp_vect , : , default, 2048, nobti
282	/* -----------------------------------------------------
283	 * EL1 with SP0 : 0x0 - 0x180
284	 * -----------------------------------------------------
285	 */
286	.balign	128, INV_INSN
287el1_sync_sp0:
288	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
289	b	el1_sync_abort
290	check_vector_size el1_sync_sp0
291
292	.balign	128, INV_INSN
293el1_irq_sp0:
294	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
295	b	elx_irq
296	check_vector_size el1_irq_sp0
297
298	.balign	128, INV_INSN
299el1_fiq_sp0:
300	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
301	b	elx_fiq
302	check_vector_size el1_fiq_sp0
303
304	.balign	128, INV_INSN
305el1_serror_sp0:
306	b	el1_serror_sp0
307	check_vector_size el1_serror_sp0
308
309	/* -----------------------------------------------------
310	 * Current EL with SP1: 0x200 - 0x380
311	 * -----------------------------------------------------
312	 */
313	.balign	128, INV_INSN
314el1_sync_sp1:
315	b	el1_sync_sp1
316	check_vector_size el1_sync_sp1
317
318	.balign	128, INV_INSN
319el1_irq_sp1:
320	b	el1_irq_sp1
321	check_vector_size el1_irq_sp1
322
323	.balign	128, INV_INSN
324el1_fiq_sp1:
325	b	el1_fiq_sp1
326	check_vector_size el1_fiq_sp1
327
328	.balign	128, INV_INSN
329el1_serror_sp1:
330	b	el1_serror_sp1
331	check_vector_size el1_serror_sp1
332
333	/* -----------------------------------------------------
334	 * Lower EL using AArch64 : 0x400 - 0x580
335	 * -----------------------------------------------------
336	 */
337	.balign	128, INV_INSN
338el0_sync_a64:
339	restore_mapping
340	/* PAuth will be disabled later else check_vector_size will fail */
341
342	b	el0_sync_a64_finish
343	check_vector_size el0_sync_a64
344
345	.balign	128, INV_INSN
346el0_irq_a64:
347	restore_mapping
348	pauth_el0_to_el1 x1
349
350	b	elx_irq
351	check_vector_size el0_irq_a64
352
353	.balign	128, INV_INSN
354el0_fiq_a64:
355	restore_mapping
356	pauth_el0_to_el1 x1
357
358	b	elx_fiq
359	check_vector_size el0_fiq_a64
360
361	.balign	128, INV_INSN
362el0_serror_a64:
363	b   	el0_serror_a64
364	check_vector_size el0_serror_a64
365
366	/* -----------------------------------------------------
367	 * Lower EL using AArch32 : 0x0 - 0x180
368	 * -----------------------------------------------------
369	 */
370	.balign	128, INV_INSN
371el0_sync_a32:
372	restore_mapping
373
374	b 	el0_sync_a32_finish
375	check_vector_size el0_sync_a32
376
377	.balign	128, INV_INSN
378el0_irq_a32:
379	restore_mapping
380
381	b	elx_irq
382	check_vector_size el0_irq_a32
383
384	.balign	128, INV_INSN
385el0_fiq_a32:
386	restore_mapping
387
388	b	elx_fiq
389	check_vector_size el0_fiq_a32
390
391	.balign	128, INV_INSN
392el0_serror_a32:
393	b	el0_serror_a32
394	check_vector_size el0_serror_a32
395
396#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
397	.macro invalidate_branch_predictor
398		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
399		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
400		smc	#0
401		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
402	.endm
403
404	.balign	2048, INV_INSN
405	.global thread_excp_vect_wa_spectre_v2
406thread_excp_vect_wa_spectre_v2:
407	/* -----------------------------------------------------
408	 * EL1 with SP0 : 0x0 - 0x180
409	 * -----------------------------------------------------
410	 */
411	.balign	128, INV_INSN
412wa_spectre_v2_el1_sync_sp0:
413	b	el1_sync_sp0
414	check_vector_size wa_spectre_v2_el1_sync_sp0
415
416	.balign	128, INV_INSN
417wa_spectre_v2_el1_irq_sp0:
418	b	el1_irq_sp0
419	check_vector_size wa_spectre_v2_el1_irq_sp0
420
421	.balign	128, INV_INSN
422wa_spectre_v2_el1_fiq_sp0:
423	b	el1_fiq_sp0
424	check_vector_size wa_spectre_v2_el1_fiq_sp0
425
426	.balign	128, INV_INSN
427wa_spectre_v2_el1_serror_sp0:
428	b	el1_serror_sp0
429	check_vector_size wa_spectre_v2_el1_serror_sp0
430
431	/* -----------------------------------------------------
432	 * Current EL with SP1: 0x200 - 0x380
433	 * -----------------------------------------------------
434	 */
435	.balign	128, INV_INSN
436wa_spectre_v2_el1_sync_sp1:
437	b	wa_spectre_v2_el1_sync_sp1
438	check_vector_size wa_spectre_v2_el1_sync_sp1
439
440	.balign	128, INV_INSN
441wa_spectre_v2_el1_irq_sp1:
442	b	wa_spectre_v2_el1_irq_sp1
443	check_vector_size wa_spectre_v2_el1_irq_sp1
444
445	.balign	128, INV_INSN
446wa_spectre_v2_el1_fiq_sp1:
447	b	wa_spectre_v2_el1_fiq_sp1
448	check_vector_size wa_spectre_v2_el1_fiq_sp1
449
450	.balign	128, INV_INSN
451wa_spectre_v2_el1_serror_sp1:
452	b	wa_spectre_v2_el1_serror_sp1
453	check_vector_size wa_spectre_v2_el1_serror_sp1
454
455	/* -----------------------------------------------------
456	 * Lower EL using AArch64 : 0x400 - 0x580
457	 * -----------------------------------------------------
458	 */
459	.balign	128, INV_INSN
460wa_spectre_v2_el0_sync_a64:
461	invalidate_branch_predictor
462	b	el0_sync_a64
463	check_vector_size wa_spectre_v2_el0_sync_a64
464
465	.balign	128, INV_INSN
466wa_spectre_v2_el0_irq_a64:
467	invalidate_branch_predictor
468	b	el0_irq_a64
469	check_vector_size wa_spectre_v2_el0_irq_a64
470
471	.balign	128, INV_INSN
472wa_spectre_v2_el0_fiq_a64:
473	invalidate_branch_predictor
474	b	el0_fiq_a64
475	check_vector_size wa_spectre_v2_el0_fiq_a64
476
477	.balign	128, INV_INSN
478wa_spectre_v2_el0_serror_a64:
479	b   	wa_spectre_v2_el0_serror_a64
480	check_vector_size wa_spectre_v2_el0_serror_a64
481
482	/* -----------------------------------------------------
483	 * Lower EL using AArch32 : 0x0 - 0x180
484	 * -----------------------------------------------------
485	 */
486	.balign	128, INV_INSN
487wa_spectre_v2_el0_sync_a32:
488	invalidate_branch_predictor
489	b	el0_sync_a32
490	check_vector_size wa_spectre_v2_el0_sync_a32
491
492	.balign	128, INV_INSN
493wa_spectre_v2_el0_irq_a32:
494	invalidate_branch_predictor
495	b	el0_irq_a32
496	check_vector_size wa_spectre_v2_el0_irq_a32
497
498	.balign	128, INV_INSN
499wa_spectre_v2_el0_fiq_a32:
500	invalidate_branch_predictor
501	b	el0_fiq_a32
502	check_vector_size wa_spectre_v2_el0_fiq_a32
503
504	.balign	128, INV_INSN
505wa_spectre_v2_el0_serror_a32:
506	b	wa_spectre_v2_el0_serror_a32
507	check_vector_size wa_spectre_v2_el0_serror_a32
508
509	.macro discard_branch_history
510		str	x0, [sp, #THREAD_CORE_LOCAL_X0]
511		ldrb	w0, [sp, #THREAD_CORE_LOCAL_BHB_LOOP_COUNT]
512	1:	b	2f
513	2:	subs	w0, w0, #1
514		bne	1b
515		dsb	sy
516		isb
517		ldr	x0, [sp, #THREAD_CORE_LOCAL_X0]
518	.endm
519
520	.balign	2048, INV_INSN
521	.global thread_excp_vect_wa_spectre_bhb
522thread_excp_vect_wa_spectre_bhb:
523	/* -----------------------------------------------------
524	 * EL1 with SP0 : 0x0 - 0x180
525	 * -----------------------------------------------------
526	 */
527	.balign	128, INV_INSN
528wa_spectre_bhb_el1_sync_sp0:
529	b	el1_sync_sp0
530	check_vector_size wa_spectre_bhb_el1_sync_sp0
531
532	.balign	128, INV_INSN
533wa_spectre_bhb_el1_irq_sp0:
534	b	el1_irq_sp0
535	check_vector_size wa_spectre_bhb_el1_irq_sp0
536
537	.balign	128, INV_INSN
538wa_spectre_bhb_el1_fiq_sp0:
539	b	el1_fiq_sp0
540	check_vector_size wa_spectre_bhb_el1_fiq_sp0
541
542	.balign	128, INV_INSN
543wa_spectre_bhb_el1_serror_sp0:
544	b	el1_serror_sp0
545	check_vector_size wa_spectre_bhb_el1_serror_sp0
546
547	/* -----------------------------------------------------
548	 * Current EL with SP1: 0x200 - 0x380
549	 * -----------------------------------------------------
550	 */
551	.balign	128, INV_INSN
552wa_spectre_bhb_el1_sync_sp1:
553	b	wa_spectre_bhb_el1_sync_sp1
554	check_vector_size wa_spectre_bhb_el1_sync_sp1
555
556	.balign	128, INV_INSN
557wa_spectre_bhb_el1_irq_sp1:
558	b	wa_spectre_bhb_el1_irq_sp1
559	check_vector_size wa_spectre_bhb_el1_irq_sp1
560
561	.balign	128, INV_INSN
562wa_spectre_bhb_el1_fiq_sp1:
563	b	wa_spectre_bhb_el1_fiq_sp1
564	check_vector_size wa_spectre_bhb_el1_fiq_sp1
565
566	.balign	128, INV_INSN
567wa_spectre_bhb_el1_serror_sp1:
568	b	wa_spectre_bhb_el1_serror_sp1
569	check_vector_size wa_spectre_bhb_el1_serror_sp1
570
571	/* -----------------------------------------------------
572	 * Lower EL using AArch64 : 0x400 - 0x580
573	 * -----------------------------------------------------
574	 */
575	.balign	128, INV_INSN
576wa_spectre_bhb_el0_sync_a64:
577	discard_branch_history
578	b	el0_sync_a64
579	check_vector_size wa_spectre_bhb_el0_sync_a64
580
581	.balign	128, INV_INSN
582wa_spectre_bhb_el0_irq_a64:
583	discard_branch_history
584	b	el0_irq_a64
585	check_vector_size wa_spectre_bhb_el0_irq_a64
586
587	.balign	128, INV_INSN
588wa_spectre_bhb_el0_fiq_a64:
589	discard_branch_history
590	b	el0_fiq_a64
591	check_vector_size wa_spectre_bhb_el0_fiq_a64
592
593	.balign	128, INV_INSN
594wa_spectre_bhb_el0_serror_a64:
595	b   	wa_spectre_bhb_el0_serror_a64
596	check_vector_size wa_spectre_bhb_el0_serror_a64
597
598	/* -----------------------------------------------------
599	 * Lower EL using AArch32 : 0x0 - 0x180
600	 * -----------------------------------------------------
601	 */
602	.balign	128, INV_INSN
603wa_spectre_bhb_el0_sync_a32:
604	discard_branch_history
605	b	el0_sync_a32
606	check_vector_size wa_spectre_bhb_el0_sync_a32
607
608	.balign	128, INV_INSN
609wa_spectre_bhb_el0_irq_a32:
610	discard_branch_history
611	b	el0_irq_a32
612	check_vector_size wa_spectre_bhb_el0_irq_a32
613
614	.balign	128, INV_INSN
615wa_spectre_bhb_el0_fiq_a32:
616	discard_branch_history
617	b	el0_fiq_a32
618	check_vector_size wa_spectre_bhb_el0_fiq_a32
619
620	.balign	128, INV_INSN
621wa_spectre_bhb_el0_serror_a32:
622	b	wa_spectre_bhb_el0_serror_a32
623	check_vector_size wa_spectre_bhb_el0_serror_a32
624#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
625
626/*
627 * We're keeping this code in the same section as the vector to make sure
628 * that it's always available.
629 */
630eret_to_el0:
631	pauth_el1_to_el0 x1
632
633#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
634	/* Point to the vector into the reduced mapping */
635	adr_l	x0, thread_user_kcode_offset
636	ldr	x0, [x0]
637	mrs	x1, vbar_el1
638	sub	x1, x1, x0
639	msr	vbar_el1, x1
640	isb
641
642#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
643	/* Store the SP offset in tpidr_el1 to be used below to update SP */
644	adr_l	x1, thread_user_kdata_sp_offset
645	ldr	x1, [x1]
646	msr	tpidr_el1, x1
647#endif
648
649	/* Jump into the reduced mapping and continue execution */
650	adr_l	x1, 1f
651	sub	x1, x1, x0
652	br	x1
6531:
654BTI(	bti	j)
655	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
656	msr	tpidrro_el0, x0
657
658	/* Update the mapping to exclude the full kernel mapping */
659	mrs	x0, ttbr0_el1
660	add_imm	x0, __CORE_MMU_BASE_TABLE_OFFSET
661	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
662	msr	ttbr0_el1, x0
663	isb
664
665#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
666	/*
667	 * Update the SP with thread_user_kdata_sp_offset as described in
668	 * init_user_kcode().
669	 */
670	mrs	x0, tpidr_el1
671	sub	sp, sp, x0
672#endif
673
674	mrs	x0, tpidrro_el0
675#else
676	mrs	x0, ttbr0_el1
677	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
678	msr	ttbr0_el1, x0
679	isb
680	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
681#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
682
683	return_from_exception
684
685el0_sync_a64_finish:
686	mrs	x2, esr_el1
687	mrs	x3, sp_el0
688	lsr	x2, x2, #ESR_EC_SHIFT
689	cmp	x2, #ESR_EC_AARCH64_SVC
690	b.eq	el0_svc
691	b	el0_sync_abort
692
693el0_sync_a32_finish:
694	mrs	x2, esr_el1
695	mrs	x3, sp_el0
696	lsr	x2, x2, #ESR_EC_SHIFT
697	cmp	x2, #ESR_EC_AARCH32_SVC
698	b.eq	el0_svc
699	b	el0_sync_abort
700
701	/*
702	 * void icache_inv_user_range(void *addr, size_t size);
703	 *
704	 * This function has to execute with the user space ASID active,
705	 * this means executing with reduced mapping and the code needs
706	 * to be located here together with the vector.
707	 */
708	.global icache_inv_user_range
709	.type icache_inv_user_range , %function
710icache_inv_user_range:
711	/* Mask all exceptions */
712	mrs	x6, daif	/* this register must be preserved */
713	msr	daifset, #DAIFBIT_ALL
714
715#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
716	/* Point to the vector into the reduced mapping */
717	adr_l	x7, thread_user_kcode_offset
718	ldr	x7, [x7]	/* this register must be preserved */
719	mrs	x4, vbar_el1	/* this register must be preserved */
720	sub	x3, x4, x7
721	msr	vbar_el1, x3
722	isb
723
724	/* Jump into the reduced mapping and continue execution */
725	adr	x3, 1f
726	sub	x3, x3, x7
727	br	x3
7281:
729BTI(	bti	j)
730	/* Update the mapping to exclude the full kernel mapping */
731	mrs	x5, ttbr0_el1	/* this register must be preserved */
732	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
733	add_imm	x2, __CORE_MMU_BASE_TABLE_OFFSET
734	msr	ttbr0_el1, x2
735	isb
736
737#else
738	mrs	x5, ttbr0_el1	/* this register must be preserved */
739	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
740	msr	ttbr0_el1, x2
741	isb
742#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
743
744	/*
745	 * Do the actual icache invalidation
746	 */
747
748	/* Calculate minimum icache line size, result in x2 */
749	mrs	x3, ctr_el0
750	and	x3, x3, #CTR_IMINLINE_MASK
751	mov	x2, #CTR_WORD_SIZE
752	lsl	x2, x2, x3
753
754	add	x1, x0, x1
755	sub	x3, x2, #1
756	bic	x0, x0, x3
7571:
758	ic	ivau, x0
759	add	x0, x0, x2
760	cmp	x0, x1
761	b.lo    1b
762	dsb	ish
763
764#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
765	/* Update the mapping to use the full kernel mapping and ASID */
766	msr	ttbr0_el1, x5
767	isb
768
769	/* Jump into the full mapping and continue execution */
770	adr	x0, 1f
771	add	x0, x0, x7
772	br	x0
7731:
774BTI(	bti	j)
775	/* Point to the vector into the full mapping */
776	msr	vbar_el1, x4
777	isb
778#else
779	/* switch to kernel mode ASID */
780	msr	ttbr0_el1, x5
781	isb
782#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
783
784	msr	daif, x6	/* restore exceptions */
785	ret	/* End of icache_inv_user_range() */
786
787	/*
788	 * Make sure that literals are placed before the
789	 * thread_excp_vect_end label.
790	 */
791	.pool
792	.global thread_excp_vect_end
793thread_excp_vect_end:
794END_FUNC thread_excp_vect
795
796LOCAL_FUNC el0_svc , :
797	pauth_el0_to_el1 x1
798	/* get pointer to current thread context in x0 */
799	get_thread_ctx sp, 0, 1, 2
800	mrs	x1, tpidr_el0
801	str	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
802	/* load saved kernel sp */
803	ldr	x3, [x0, #THREAD_CTX_KERN_SP]
804	/* Keep pointer to initial recod in x1 */
805	mov	x1, sp
806	/* Switch to SP_EL0 and restore kernel sp */
807	msr	spsel, #0
808	mov	x2, sp	/* Save SP_EL0 */
809	mov	sp, x3
810
811	/* Make room for struct thread_scall_regs */
812	sub	sp, sp, #THREAD_SCALL_REG_SIZE
813	stp	x30, x2, [sp, #THREAD_SCALL_REG_X30]
814
815#ifdef CFG_TA_PAUTH
816	/* Save APIAKEY */
817	read_apiakeyhi	x2
818	read_apiakeylo	x3
819	stp	x2, x3, [sp, #THREAD_SCALL_REG_APIAKEY_HI]
820#endif
821
822#ifdef CFG_CORE_PAUTH
823	ldp	x2, x3, [x0, #THREAD_CTX_KEYS]
824	write_apiakeyhi	x2
825	write_apiakeylo	x3
826#endif
827#if defined(CFG_CORE_PAUTH) || defined(CFG_TA_PAUTH)
828	/* SCTLR or the APIA key has changed */
829	isb
830#endif
831
832	/* Restore x0-x3 */
833	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
834	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
835
836	/* Prepare the argument for the handler */
837	store_xregs sp, THREAD_SCALL_REG_X0, 0, 14
838	mrs	x0, elr_el1
839	mrs	x1, spsr_el1
840	store_xregs sp, THREAD_SCALL_REG_ELR, 0, 1
841
842	mov	x0, sp
843
844	/*
845	 * Unmask native interrupts, Serror, and debug exceptions since we have
846	 * nothing left in sp_el1. Note that the SVC handler is excepted to
847	 * re-enable foreign interrupts by itself.
848	 */
849#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
850	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
851#else
852	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
853#endif
854
855	/* Call the handler */
856	bl	thread_scall_handler
857
858	/* Mask all maskable exceptions since we're switching back to sp_el1 */
859	msr	daifset, #DAIFBIT_ALL
860
861	/*
862	 * Save kernel sp we'll had at the beginning of this function.
863	 * This is when this TA has called another TA because
864	 * __thread_enter_user_mode() also saves the stack pointer in this
865	 * field.
866	 */
867	msr	spsel, #1
868	get_thread_ctx sp, 0, 1, 2
869	msr	spsel, #0
870	add	x1, sp, #THREAD_SCALL_REG_SIZE
871	str	x1, [x0, #THREAD_CTX_KERN_SP]
872
873	/* Restore registers to the required state and return*/
874	ldr	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
875	msr	tpidr_el0, x1
876	load_xregs sp, THREAD_SCALL_REG_ELR, 0, 1
877	msr	elr_el1, x0
878	msr	spsr_el1, x1
879	load_xregs sp, THREAD_SCALL_REG_X2, 2, 14
880	mov	x30, sp
881	ldr	x0, [x30, #THREAD_SCALL_REG_SP_EL0]
882	mov	sp, x0
883	b_if_spsr_is_el0 w1, 1f
884	ldp	x0, x1, [x30, THREAD_SCALL_REG_X0]
885	ldr	x30, [x30, #THREAD_SCALL_REG_X30]
886
887	return_from_exception
888
8891:
890#ifdef	CFG_TA_PAUTH
891	/* Restore APIAKEY */
892	load_xregs x30, THREAD_SCALL_REG_APIAKEY_HI, 0, 1
893	write_apiakeyhi	x0
894	write_apiakeylo	x1
895#endif
896
897	ldp	x0, x1, [x30, THREAD_SCALL_REG_X0]
898	ldr	x30, [x30, #THREAD_SCALL_REG_X30]
899
900	msr	spsel, #1
901	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
902	b	eret_to_el0
903END_FUNC el0_svc
904
905LOCAL_FUNC el1_sync_abort , :
906	mov	x0, sp
907	msr	spsel, #0
908	mov	x3, sp		/* Save original sp */
909
910	/*
911	 * Update core local flags.
912	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
913	 */
914	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
915	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
916	orr	w1, w1, #THREAD_CLF_ABORT
917	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
918			.Lsel_tmp_sp
919
920	/* Select abort stack */
921	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
922	b	.Lset_sp
923
924.Lsel_tmp_sp:
925	/* We have an abort while using the abort stack, select tmp stack */
926	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
927	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
928
929.Lset_sp:
930	mov	sp, x2
931	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
932
933	/*
934	 * Save state on stack
935	 */
936	sub	sp, sp, #THREAD_ABT_REGS_SIZE
937	mrs	x2, spsr_el1
938	/* Store spsr, sp_el0 */
939	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
940	/* Store original x0, x1 */
941	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
942	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
943	/* Store original x2, x3 and x4 to x29 */
944	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
945	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
946	/* Store x30, elr_el1 */
947	mrs	x1, elr_el1
948	stp	x30, x1, [sp, #THREAD_ABT_REG_X30]
949
950#if defined(CFG_CORE_PAUTH)
951	read_apiakeyhi	x2
952	read_apiakeylo	x3
953	stp	x2, x3, [sp, #THREAD_ABT_REGS_APIAKEY_HI]
954	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_KEYS]
955	write_apiakeyhi	x2
956	write_apiakeylo	x3
957	isb
958#endif
959
960	/*
961	 * Call handler
962	 */
963	mov	x0, #0
964	mov	x1, sp
965	bl	abort_handler
966
967	/*
968	 * Restore state from stack
969	 */
970	/* Load x30, elr_el1 */
971	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
972	msr	elr_el1, x0
973	/* Load x0 to x29 */
974	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
975	/* Switch to SP_EL1 */
976	msr	spsel, #1
977	/* Save x0 to x3 in CORE_LOCAL */
978	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
979	/* Restore spsr_el1 and sp_el0 */
980	mrs	x3, sp_el0
981	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
982	msr	spsr_el1, x0
983	msr	sp_el0, x1
984
985	/* Update core local flags */
986	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
987	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
988	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
989
990#if defined(CFG_CORE_PAUTH)
991	ldp	x0, x1, [x3, #THREAD_ABT_REGS_APIAKEY_HI]
992	write_apiakeyhi	x0
993	write_apiakeylo	x1
994	isb
995#endif
996
997	/* Restore x0 to x3 */
998	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
999
1000	/* Return from exception */
1001	return_from_exception
1002END_FUNC el1_sync_abort
1003
1004	/* sp_el0 in x3 */
1005LOCAL_FUNC el0_sync_abort , :
1006	pauth_el0_to_el1 x1
1007	/*
1008	 * Update core local flags
1009	 */
1010	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1011	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1012	orr	w1, w1, #THREAD_CLF_ABORT
1013	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1014
1015	/*
1016	 * Save state on stack
1017	 */
1018
1019	/* load abt_stack_va_end */
1020	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
1021	/* Keep pointer to initial record in x0 */
1022	mov	x0, sp
1023	/* Switch to SP_EL0 */
1024	msr	spsel, #0
1025	mov	sp, x1
1026	sub	sp, sp, #THREAD_ABT_REGS_SIZE
1027	mrs	x2, spsr_el1
1028	/* Store spsr, sp_el0 */
1029	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
1030	/* Store original x0, x1 */
1031	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
1032	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
1033	/* Store original x2, x3 and x4 to x29 */
1034	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
1035	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
1036	/* Store x30, elr_el1 */
1037	mrs	x1, elr_el1
1038	stp	x30, x1, [sp, #THREAD_ABT_REG_X30]
1039
1040#if defined(CFG_TA_PAUTH)
1041	read_apiakeyhi	x2
1042	read_apiakeylo	x3
1043	stp	x2, x3, [sp, #THREAD_ABT_REGS_APIAKEY_HI]
1044#endif
1045
1046#if defined(CFG_CORE_PAUTH)
1047	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_KEYS]
1048	write_apiakeyhi	x2
1049	write_apiakeylo	x3
1050#endif
1051
1052#if defined(CFG_CORE_PAUTH) || defined(CFG_TA_PAUTH)
1053	/* SCTLR or the APIA key has changed */
1054	isb
1055#endif
1056
1057	/*
1058	 * Call handler
1059	 */
1060	mov	x0, #0
1061	mov	x1, sp
1062	bl	abort_handler
1063
1064	/*
1065	 * Restore state from stack
1066	 */
1067
1068	/* Load x30, elr_el1 */
1069	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
1070	msr	elr_el1, x0
1071	/* Load x0 to x29 */
1072	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
1073	/* Switch to SP_EL1 */
1074	msr	spsel, #1
1075	/* Save x0 to x3 in EL1_REC */
1076	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
1077	/* Restore spsr_el1 and sp_el0 */
1078	mrs	x3, sp_el0
1079	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
1080	msr	spsr_el1, x0
1081	msr	sp_el0, x1
1082
1083	/* Update core local flags */
1084	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1085	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
1086	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1087
1088#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1089	ldp	x1, x2, [x3, #THREAD_ABT_REGS_APIAKEY_HI]
1090	write_apiakeyhi	x1
1091	write_apiakeylo	x2
1092#endif
1093
1094	/* Restore x2 to x3 */
1095	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1096
1097	b_if_spsr_is_el0 w0, 1f
1098
1099#if defined(CFG_CORE_PAUTH)
1100	/* the APIA key has changed */
1101	isb
1102#endif
1103
1104	/* Restore x0 to x1 */
1105	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1106
1107	/* Return from exception */
1108	return_from_exception
11091:	b	eret_to_el0
1110END_FUNC el0_sync_abort
1111
1112/* The handler of foreign interrupt. */
1113.macro foreign_intr_handler mode:req
1114	/*
1115	 * Update core local flags
1116	 */
1117	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1118	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1119	orr	w1, w1, #THREAD_CLF_TMP
1120	.ifc	\mode\(),fiq
1121	orr	w1, w1, #THREAD_CLF_FIQ
1122	.else
1123	orr	w1, w1, #THREAD_CLF_IRQ
1124	.endif
1125	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1126
1127	/* get pointer to current thread context in x0 */
1128	get_thread_ctx sp, 0, 1, 2
1129	/* Keep original SP_EL0 */
1130	mrs	x2, sp_el0
1131
1132	/* Store original sp_el0 */
1133	str	x2, [x0, #THREAD_CTX_REGS_SP]
1134	/* Store tpidr_el0 */
1135	mrs	x2, tpidr_el0
1136	str	x2, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
1137	/* Store x4..x30 */
1138	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
1139	/* Load original x0..x3 into x10..x13 */
1140	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
1141	/* Save original x0..x3 */
1142	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
1143
1144#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1145	/* Save APIAKEY */
1146	read_apiakeyhi	x1
1147	read_apiakeylo	x2
1148	store_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
1149#endif
1150#if defined(CFG_CORE_PAUTH)
1151	ldp	x1, x2, [sp, #THREAD_CORE_LOCAL_KEYS]
1152	write_apiakeyhi	x1
1153	write_apiakeylo	x2
1154	isb
1155#endif
1156	/* load tmp_stack_va_end */
1157	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1158	/* Switch to SP_EL0 */
1159	msr	spsel, #0
1160	mov	sp, x1
1161
1162#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
1163	/*
1164	 * Prevent leaking information about which entries has been used in
1165	 * cache. We're relying on the dispatcher in TF-A to take care of
1166	 * the BTB.
1167	 */
1168	mov	x0, #DCACHE_OP_CLEAN_INV
1169	bl	dcache_op_louis
1170	ic	iallu
1171#endif
1172	/*
1173	 * Mark current thread as suspended
1174	 */
1175	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
1176	mrs	x1, spsr_el1
1177	mrs	x2, elr_el1
1178	bl	thread_state_suspend
1179
1180	/* Update core local flags */
1181	/* Switch to SP_EL1 */
1182	msr	spsel, #1
1183	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1184	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
1185	orr	w1, w1, #THREAD_CLF_TMP
1186	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1187	msr	spsel, #0
1188
1189	/*
1190	 * Note that we're exiting with SP_EL0 selected since the entry
1191	 * functions expects to have SP_EL0 selected with the tmp stack
1192	 * set.
1193	 */
1194
1195	/* Passing thread index in w0 */
1196	b	thread_foreign_intr_exit
1197.endm
1198
1199/*
1200 * This struct is never used from C it's only here to visualize the
1201 * layout.
1202 *
1203 * struct elx_nintr_rec {
1204 * 	uint64_t x[19 - 4]; x4..x18
1205 * 	uint64_t lr;
1206 * 	uint64_t sp_el0;
1207 * #if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1208 * 	uint64_t apiakey_hi;
1209 * 	uint64_t apiakey_lo;
1210 * #endif
1211 * };
1212 */
1213#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1214#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1215#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1216#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1217#define ELX_NINTR_REG_APIAKEY_HI	(8 + ELX_NINTR_REC_SP_EL0)
1218#define ELX_NINTR_REG_APIAKEY_LO	(8 + ELX_NINTR_REG_APIAKEY_HI)
1219#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REG_APIAKEY_LO)
1220#else
1221#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1222#endif
1223
1224
1225/* The handler of native interrupt. */
1226.macro native_intr_handler mode:req
1227	/*
1228	 * Update core local flags
1229	 */
1230	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1231	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1232	.ifc	\mode\(),fiq
1233	orr	w1, w1, #THREAD_CLF_FIQ
1234	.else
1235	orr	w1, w1, #THREAD_CLF_IRQ
1236	.endif
1237	orr	w1, w1, #THREAD_CLF_TMP
1238	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1239
1240	/*
1241	 * Save registers on the temp stack that can be corrupted by a call
1242	 * to a C function.
1243	 *
1244	 * Note that we're temporarily using x1 to access the temp stack
1245	 * until we're ready to switch to sp_el0 and update sp.
1246	 */
1247	/* load tmp_stack_va_end */
1248	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1249	/* Make room for struct elx_nintr_rec */
1250	sub	x1, x1, #ELX_NINTR_REC_SIZE
1251	/* Store lr and original sp_el0 */
1252	mrs	x2, sp_el0
1253	stp	x30, x2, [x1, #ELX_NINTR_REC_LR]
1254	/* Store x4..x18 */
1255	store_xregs x1, ELX_NINTR_REC_X(4), 4, 18
1256
1257#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1258	read_apiakeyhi	x2
1259	read_apiakeylo	x3
1260	stp	x2, x3, [x1, #ELX_NINTR_REG_APIAKEY_HI]
1261#if defined(CFG_CORE_PAUTH)
1262	ldp	x2, x3, [sp, #THREAD_CORE_LOCAL_KEYS]
1263	write_apiakeyhi	x2
1264	write_apiakeylo	x3
1265#endif
1266	/* SCTLR or the APIA key has changed */
1267	isb
1268#endif
1269
1270	/* Switch to SP_EL0 */
1271	msr	spsel, #0
1272	mov	sp, x1
1273
1274	bl	thread_check_canaries
1275	bl	interrupt_main_handler
1276
1277	/*
1278	 * Restore registers
1279	 */
1280
1281#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1282	ldp	x0, x1, [sp, #ELX_NINTR_REG_APIAKEY_HI]
1283	write_apiakeyhi	x0
1284	write_apiakeylo	x1
1285#endif
1286
1287	/* Restore x4..x18 */
1288	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1289	/* Load  lr and original sp_el0 */
1290	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1291	/* Restore SP_El0 */
1292	mov	sp, x2
1293	/* Switch back to SP_EL1 */
1294	msr	spsel, #1
1295
1296	/* Update core local flags */
1297	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1298	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1299	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1300
1301	mrs	x0, spsr_el1
1302
1303	/* Restore x2..x3 */
1304	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1305	b_if_spsr_is_el0 w0, 1f
1306
1307#if defined(CFG_CORE_PAUTH)
1308	/* APIA key has changed */
1309	isb
1310#endif
1311
1312	/* Restore x0..x1 */
1313	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1314
1315	/* Return from exception */
1316	return_from_exception
13171:	b	eret_to_el0
1318.endm
1319
1320LOCAL_FUNC elx_irq , :
1321#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
1322	native_intr_handler	irq
1323#else
1324	foreign_intr_handler	irq
1325#endif
1326END_FUNC elx_irq
1327
1328LOCAL_FUNC elx_fiq , :
1329#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
1330	foreign_intr_handler	fiq
1331#else
1332	native_intr_handler	fiq
1333#endif
1334END_FUNC elx_fiq
1335
1336BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
1337