xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 3cd271a44bb7aafdda808fd47c47def83be41ad7)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2022, Linaro Limited
4 */
5
6#include <arm64_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread.h>
13#include <kernel/thread_private.h>
14#include <mm/core_mmu.h>
15#include <smccc.h>
16
17	.macro get_thread_ctx core_local, res, tmp0, tmp1
18		ldrh	w\tmp0, [\core_local, \
19				#THREAD_CORE_LOCAL_CURR_THREAD]
20		adr_l	x\res, threads
21		mov	x\tmp1, #THREAD_CTX_SIZE
22		madd	x\res, x\tmp0, x\tmp1, x\res
23	.endm
24
25	.macro return_from_exception
26		eret
27		/* Guard against speculation past ERET */
28		dsb nsh
29		isb
30	.endm
31
32	.macro b_if_spsr_is_el0 reg, label
33		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
34		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
35		b.eq	\label
36	.endm
37
38	.macro pauth_el0_to_el1 reg
39		/*
40		 * If pauth is only enabled in one of core or TA (xor) we
41		 * need to update sctlr.
42		 */
43#if (defined(CFG_TA_PAUTH) && !defined(CFG_CORE_PAUTH)) || \
44    (!defined(CFG_TA_PAUTH) && defined(CFG_CORE_PAUTH))
45		mrs	\reg, sctlr_el1
46		/* Flip the SCTLR_ENIA bit */
47		eor     \reg, \reg, #SCTLR_ENIA
48		msr	sctlr_el1, \reg
49#endif
50	.endm
51
52	.macro pauth_el1_to_el0 reg
53		/*
54		 * If pauth is only enabled in one of core or TA (xor) we
55		 * need to update sctlr.
56		 */
57#if (defined(CFG_TA_PAUTH) && !defined(CFG_CORE_PAUTH)) || \
58    (!defined(CFG_TA_PAUTH) && defined(CFG_CORE_PAUTH))
59		mrs	\reg, sctlr_el1
60		/* Flip the SCTLR_ENIA bit */
61		eor     \reg, \reg, #SCTLR_ENIA
62		msr	sctlr_el1, \reg
63#endif
64	.endm
65
66/* void thread_resume(struct thread_ctx_regs *regs) */
67FUNC thread_resume , :
68	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
69	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
70	mov	sp, x1
71	msr	elr_el1, x2
72	msr	spsr_el1, x3
73	ldr	x1, [x0, THREAD_CTX_REGS_TPIDR_EL0]
74	msr	tpidr_el0, x1
75
76#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
77	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
78	write_apiakeyhi	x1
79	write_apiakeylo	x2
80#endif
81	b_if_spsr_is_el0 w3, 1f
82
83#if defined(CFG_CORE_PAUTH) || defined(CFG_TA_PAUTH)
84	/* SCTLR or the APIA key has changed */
85	isb
86#endif
87	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
88	ldr	x0, [x0, THREAD_CTX_REGS_X0]
89	return_from_exception
90
911:
92	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
93	ldr	x0, [x0, THREAD_CTX_REGS_X0]
94
95	msr	spsel, #1
96	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
97	b	eret_to_el0
98END_FUNC thread_resume
99
100FUNC thread_smc , :
101	smc	#0
102	ret
103END_FUNC thread_smc
104
105/* void thread_smccc(struct thread_smc_args *arg_res) */
106FUNC thread_smccc , :
107	push	x0, xzr
108	mov	x8, x0
109	load_xregs x8, 0, 0, 7
110#ifdef CFG_CORE_SEL2_SPMC
111	hvc	#0
112#else
113	smc	#0
114#endif
115	pop	x8, xzr
116	store_xregs x8, 0, 0, 7
117	ret
118END_FUNC thread_smccc
119
120FUNC thread_init_vbar , :
121	msr	vbar_el1, x0
122	ret
123END_FUNC thread_init_vbar
124DECLARE_KEEP_PAGER thread_init_vbar
125
126/*
127 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
128 *				     uint32_t *exit_status0,
129 *				     uint32_t *exit_status1);
130 *
131 * This function depends on being called with exceptions masked.
132 */
133FUNC __thread_enter_user_mode , :
134	/*
135	 * Create the and fill in the struct thread_user_mode_rec
136	 */
137	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
138	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
139	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
140
141	/*
142	 * Save kern sp in x19
143	 * Switch to SP_EL1
144	 */
145	mov	x19, sp
146	msr	spsel, #1
147
148	/*
149	 * Save the kernel stack pointer in the thread context
150	 */
151	/* get pointer to current thread context */
152	get_thread_ctx sp, 21, 20, 22
153	/*
154	 * Save kernel stack pointer to ensure that el0_svc() uses
155	 * correct stack pointer
156	 */
157	str	x19, [x21, #THREAD_CTX_KERN_SP]
158
159	/*
160	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
161	 */
162	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
163	msr	sp_el0, x1
164	msr	elr_el1, x2
165	msr	spsr_el1, x3
166
167#ifdef	CFG_TA_PAUTH
168	/* Load APIAKEY */
169	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
170	write_apiakeyhi	x1
171	write_apiakeylo	x2
172#endif
173
174	/*
175	 * Save the values for x0 and x1 in struct thread_core_local to be
176	 * restored later just before the eret.
177	 */
178	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
179	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
180
181	/* Load the rest of the general purpose registers */
182	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
183
184	/* Jump into user mode */
185	b eret_to_el0
186END_FUNC __thread_enter_user_mode
187DECLARE_KEEP_PAGER __thread_enter_user_mode
188
189/*
190 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
191 * 		uint32_t exit_status1);
192 * See description in thread.h
193 */
194FUNC thread_unwind_user_mode , :
195	/* Store the exit status */
196	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
197	str	w1, [x4]
198	str	w2, [x5]
199	/* Save x19..x30 */
200	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
201	/* Restore x19..x30 */
202	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
203	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
204	/* Return from the call of thread_enter_user_mode() */
205	ret
206END_FUNC thread_unwind_user_mode
207
208	/*
209	 * This macro verifies that the a given vector doesn't exceed the
210	 * architectural limit of 32 instructions. This is meant to be placed
211	 * immedately after the last instruction in the vector. It takes the
212	 * vector entry as the parameter
213	 */
214	.macro check_vector_size since
215	  .if (. - \since) > (32 * 4)
216	    .error "Vector exceeds 32 instructions"
217	  .endif
218	.endm
219
220	.macro restore_mapping
221#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
222		/* Temporarily save x0, x1 */
223		msr	tpidr_el1, x0
224		msr	tpidrro_el0, x1
225
226		/* Update the mapping to use the full kernel mapping */
227		mrs	x0, ttbr0_el1
228		sub	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
229		/* switch to kernel mode ASID */
230		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
231		msr	ttbr0_el1, x0
232		isb
233
234		/* Jump into the full mapping and continue execution */
235		adr	x0, 1f
236		ldr	x1, [sp, #THREAD_CORE_LOCAL_KCODE_OFFSET]
237		add	x0, x0, x1
238		br	x0
239	1:
240BTI(		bti	j)
241		/* Point to the vector into the full mapping */
242		adr_l	x0, thread_user_kcode_offset
243		ldr	x0, [x0]
244		mrs	x1, vbar_el1
245		add	x1, x1, x0
246		msr	vbar_el1, x1
247		isb
248
249#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
250		/*
251		 * Update the SP with thread_user_kdata_sp_offset as
252		 * described in init_user_kcode().
253		 */
254		adr_l	x0, thread_user_kdata_sp_offset
255		ldr	x0, [x0]
256		add	sp, sp, x0
257#endif
258
259		/* Restore x0, x1 */
260		mrs	x0, tpidr_el1
261		mrs	x1, tpidrro_el0
262		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
263#else
264		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
265		mrs	x0, ttbr0_el1
266		/* switch to kernel mode ASID */
267		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
268		msr	ttbr0_el1, x0
269		isb
270#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
271	.endm
272
273#define INV_INSN	0
274FUNC thread_excp_vect , : , default, 2048, nobti
275	/* -----------------------------------------------------
276	 * EL1 with SP0 : 0x0 - 0x180
277	 * -----------------------------------------------------
278	 */
279	.balign	128, INV_INSN
280el1_sync_sp0:
281	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
282	b	el1_sync_abort
283	check_vector_size el1_sync_sp0
284
285	.balign	128, INV_INSN
286el1_irq_sp0:
287	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
288	b	elx_irq
289	check_vector_size el1_irq_sp0
290
291	.balign	128, INV_INSN
292el1_fiq_sp0:
293	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
294	b	elx_fiq
295	check_vector_size el1_fiq_sp0
296
297	.balign	128, INV_INSN
298el1_serror_sp0:
299	b	el1_serror_sp0
300	check_vector_size el1_serror_sp0
301
302	/* -----------------------------------------------------
303	 * Current EL with SP1: 0x200 - 0x380
304	 * -----------------------------------------------------
305	 */
306	.balign	128, INV_INSN
307el1_sync_sp1:
308	b	el1_sync_sp1
309	check_vector_size el1_sync_sp1
310
311	.balign	128, INV_INSN
312el1_irq_sp1:
313	b	el1_irq_sp1
314	check_vector_size el1_irq_sp1
315
316	.balign	128, INV_INSN
317el1_fiq_sp1:
318	b	el1_fiq_sp1
319	check_vector_size el1_fiq_sp1
320
321	.balign	128, INV_INSN
322el1_serror_sp1:
323	b	el1_serror_sp1
324	check_vector_size el1_serror_sp1
325
326	/* -----------------------------------------------------
327	 * Lower EL using AArch64 : 0x400 - 0x580
328	 * -----------------------------------------------------
329	 */
330	.balign	128, INV_INSN
331el0_sync_a64:
332	restore_mapping
333	/* PAuth will be disabled later else check_vector_size will fail */
334
335	b	el0_sync_a64_finish
336	check_vector_size el0_sync_a64
337
338	.balign	128, INV_INSN
339el0_irq_a64:
340	restore_mapping
341	pauth_el0_to_el1 x1
342
343	b	elx_irq
344	check_vector_size el0_irq_a64
345
346	.balign	128, INV_INSN
347el0_fiq_a64:
348	restore_mapping
349	pauth_el0_to_el1 x1
350
351	b	elx_fiq
352	check_vector_size el0_fiq_a64
353
354	.balign	128, INV_INSN
355el0_serror_a64:
356	b   	el0_serror_a64
357	check_vector_size el0_serror_a64
358
359	/* -----------------------------------------------------
360	 * Lower EL using AArch32 : 0x0 - 0x180
361	 * -----------------------------------------------------
362	 */
363	.balign	128, INV_INSN
364el0_sync_a32:
365	restore_mapping
366
367	b 	el0_sync_a32_finish
368	check_vector_size el0_sync_a32
369
370	.balign	128, INV_INSN
371el0_irq_a32:
372	restore_mapping
373
374	b	elx_irq
375	check_vector_size el0_irq_a32
376
377	.balign	128, INV_INSN
378el0_fiq_a32:
379	restore_mapping
380
381	b	elx_fiq
382	check_vector_size el0_fiq_a32
383
384	.balign	128, INV_INSN
385el0_serror_a32:
386	b	el0_serror_a32
387	check_vector_size el0_serror_a32
388
389#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
390	.macro invalidate_branch_predictor
391		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
392		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
393		smc	#0
394		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
395	.endm
396
397	.balign	2048, INV_INSN
398	.global thread_excp_vect_wa_spectre_v2
399thread_excp_vect_wa_spectre_v2:
400	/* -----------------------------------------------------
401	 * EL1 with SP0 : 0x0 - 0x180
402	 * -----------------------------------------------------
403	 */
404	.balign	128, INV_INSN
405wa_spectre_v2_el1_sync_sp0:
406	b	el1_sync_sp0
407	check_vector_size wa_spectre_v2_el1_sync_sp0
408
409	.balign	128, INV_INSN
410wa_spectre_v2_el1_irq_sp0:
411	b	el1_irq_sp0
412	check_vector_size wa_spectre_v2_el1_irq_sp0
413
414	.balign	128, INV_INSN
415wa_spectre_v2_el1_fiq_sp0:
416	b	el1_fiq_sp0
417	check_vector_size wa_spectre_v2_el1_fiq_sp0
418
419	.balign	128, INV_INSN
420wa_spectre_v2_el1_serror_sp0:
421	b	el1_serror_sp0
422	check_vector_size wa_spectre_v2_el1_serror_sp0
423
424	/* -----------------------------------------------------
425	 * Current EL with SP1: 0x200 - 0x380
426	 * -----------------------------------------------------
427	 */
428	.balign	128, INV_INSN
429wa_spectre_v2_el1_sync_sp1:
430	b	wa_spectre_v2_el1_sync_sp1
431	check_vector_size wa_spectre_v2_el1_sync_sp1
432
433	.balign	128, INV_INSN
434wa_spectre_v2_el1_irq_sp1:
435	b	wa_spectre_v2_el1_irq_sp1
436	check_vector_size wa_spectre_v2_el1_irq_sp1
437
438	.balign	128, INV_INSN
439wa_spectre_v2_el1_fiq_sp1:
440	b	wa_spectre_v2_el1_fiq_sp1
441	check_vector_size wa_spectre_v2_el1_fiq_sp1
442
443	.balign	128, INV_INSN
444wa_spectre_v2_el1_serror_sp1:
445	b	wa_spectre_v2_el1_serror_sp1
446	check_vector_size wa_spectre_v2_el1_serror_sp1
447
448	/* -----------------------------------------------------
449	 * Lower EL using AArch64 : 0x400 - 0x580
450	 * -----------------------------------------------------
451	 */
452	.balign	128, INV_INSN
453wa_spectre_v2_el0_sync_a64:
454	invalidate_branch_predictor
455	b	el0_sync_a64
456	check_vector_size wa_spectre_v2_el0_sync_a64
457
458	.balign	128, INV_INSN
459wa_spectre_v2_el0_irq_a64:
460	invalidate_branch_predictor
461	b	el0_irq_a64
462	check_vector_size wa_spectre_v2_el0_irq_a64
463
464	.balign	128, INV_INSN
465wa_spectre_v2_el0_fiq_a64:
466	invalidate_branch_predictor
467	b	el0_fiq_a64
468	check_vector_size wa_spectre_v2_el0_fiq_a64
469
470	.balign	128, INV_INSN
471wa_spectre_v2_el0_serror_a64:
472	b   	wa_spectre_v2_el0_serror_a64
473	check_vector_size wa_spectre_v2_el0_serror_a64
474
475	/* -----------------------------------------------------
476	 * Lower EL using AArch32 : 0x0 - 0x180
477	 * -----------------------------------------------------
478	 */
479	.balign	128, INV_INSN
480wa_spectre_v2_el0_sync_a32:
481	invalidate_branch_predictor
482	b	el0_sync_a32
483	check_vector_size wa_spectre_v2_el0_sync_a32
484
485	.balign	128, INV_INSN
486wa_spectre_v2_el0_irq_a32:
487	invalidate_branch_predictor
488	b	el0_irq_a32
489	check_vector_size wa_spectre_v2_el0_irq_a32
490
491	.balign	128, INV_INSN
492wa_spectre_v2_el0_fiq_a32:
493	invalidate_branch_predictor
494	b	el0_fiq_a32
495	check_vector_size wa_spectre_v2_el0_fiq_a32
496
497	.balign	128, INV_INSN
498wa_spectre_v2_el0_serror_a32:
499	b	wa_spectre_v2_el0_serror_a32
500	check_vector_size wa_spectre_v2_el0_serror_a32
501
502	.macro discard_branch_history
503		str	x0, [sp, #THREAD_CORE_LOCAL_X0]
504		ldrb	w0, [sp, #THREAD_CORE_LOCAL_BHB_LOOP_COUNT]
505	1:	b	2f
506	2:	subs	w0, w0, #1
507		bne	1b
508		dsb	sy
509		isb
510		ldr	x0, [sp, #THREAD_CORE_LOCAL_X0]
511	.endm
512
513	.balign	2048, INV_INSN
514	.global thread_excp_vect_wa_spectre_bhb
515thread_excp_vect_wa_spectre_bhb:
516	/* -----------------------------------------------------
517	 * EL1 with SP0 : 0x0 - 0x180
518	 * -----------------------------------------------------
519	 */
520	.balign	128, INV_INSN
521wa_spectre_bhb_el1_sync_sp0:
522	b	el1_sync_sp0
523	check_vector_size wa_spectre_bhb_el1_sync_sp0
524
525	.balign	128, INV_INSN
526wa_spectre_bhb_el1_irq_sp0:
527	b	el1_irq_sp0
528	check_vector_size wa_spectre_bhb_el1_irq_sp0
529
530	.balign	128, INV_INSN
531wa_spectre_bhb_el1_fiq_sp0:
532	b	el1_fiq_sp0
533	check_vector_size wa_spectre_bhb_el1_fiq_sp0
534
535	.balign	128, INV_INSN
536wa_spectre_bhb_el1_serror_sp0:
537	b	el1_serror_sp0
538	check_vector_size wa_spectre_bhb_el1_serror_sp0
539
540	/* -----------------------------------------------------
541	 * Current EL with SP1: 0x200 - 0x380
542	 * -----------------------------------------------------
543	 */
544	.balign	128, INV_INSN
545wa_spectre_bhb_el1_sync_sp1:
546	b	wa_spectre_bhb_el1_sync_sp1
547	check_vector_size wa_spectre_bhb_el1_sync_sp1
548
549	.balign	128, INV_INSN
550wa_spectre_bhb_el1_irq_sp1:
551	b	wa_spectre_bhb_el1_irq_sp1
552	check_vector_size wa_spectre_bhb_el1_irq_sp1
553
554	.balign	128, INV_INSN
555wa_spectre_bhb_el1_fiq_sp1:
556	b	wa_spectre_bhb_el1_fiq_sp1
557	check_vector_size wa_spectre_bhb_el1_fiq_sp1
558
559	.balign	128, INV_INSN
560wa_spectre_bhb_el1_serror_sp1:
561	b	wa_spectre_bhb_el1_serror_sp1
562	check_vector_size wa_spectre_bhb_el1_serror_sp1
563
564	/* -----------------------------------------------------
565	 * Lower EL using AArch64 : 0x400 - 0x580
566	 * -----------------------------------------------------
567	 */
568	.balign	128, INV_INSN
569wa_spectre_bhb_el0_sync_a64:
570	discard_branch_history
571	b	el0_sync_a64
572	check_vector_size wa_spectre_bhb_el0_sync_a64
573
574	.balign	128, INV_INSN
575wa_spectre_bhb_el0_irq_a64:
576	discard_branch_history
577	b	el0_irq_a64
578	check_vector_size wa_spectre_bhb_el0_irq_a64
579
580	.balign	128, INV_INSN
581wa_spectre_bhb_el0_fiq_a64:
582	discard_branch_history
583	b	el0_fiq_a64
584	check_vector_size wa_spectre_bhb_el0_fiq_a64
585
586	.balign	128, INV_INSN
587wa_spectre_bhb_el0_serror_a64:
588	b   	wa_spectre_bhb_el0_serror_a64
589	check_vector_size wa_spectre_bhb_el0_serror_a64
590
591	/* -----------------------------------------------------
592	 * Lower EL using AArch32 : 0x0 - 0x180
593	 * -----------------------------------------------------
594	 */
595	.balign	128, INV_INSN
596wa_spectre_bhb_el0_sync_a32:
597	discard_branch_history
598	b	el0_sync_a32
599	check_vector_size wa_spectre_bhb_el0_sync_a32
600
601	.balign	128, INV_INSN
602wa_spectre_bhb_el0_irq_a32:
603	discard_branch_history
604	b	el0_irq_a32
605	check_vector_size wa_spectre_bhb_el0_irq_a32
606
607	.balign	128, INV_INSN
608wa_spectre_bhb_el0_fiq_a32:
609	discard_branch_history
610	b	el0_fiq_a32
611	check_vector_size wa_spectre_bhb_el0_fiq_a32
612
613	.balign	128, INV_INSN
614wa_spectre_bhb_el0_serror_a32:
615	b	wa_spectre_bhb_el0_serror_a32
616	check_vector_size wa_spectre_bhb_el0_serror_a32
617#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
618
619/*
620 * We're keeping this code in the same section as the vector to make sure
621 * that it's always available.
622 */
623eret_to_el0:
624	pauth_el1_to_el0 x1
625
626#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
627	/* Point to the vector into the reduced mapping */
628	adr_l	x0, thread_user_kcode_offset
629	ldr	x0, [x0]
630	mrs	x1, vbar_el1
631	sub	x1, x1, x0
632	msr	vbar_el1, x1
633	isb
634
635#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
636	/* Store the SP offset in tpidr_el1 to be used below to update SP */
637	adr_l	x1, thread_user_kdata_sp_offset
638	ldr	x1, [x1]
639	msr	tpidr_el1, x1
640#endif
641
642	/* Jump into the reduced mapping and continue execution */
643	adr_l	x1, 1f
644	sub	x1, x1, x0
645	br	x1
6461:
647BTI(	bti	j)
648	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
649	msr	tpidrro_el0, x0
650
651	/* Update the mapping to exclude the full kernel mapping */
652	mrs	x0, ttbr0_el1
653	add	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
654	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
655	msr	ttbr0_el1, x0
656	isb
657
658#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
659	/*
660	 * Update the SP with thread_user_kdata_sp_offset as described in
661	 * init_user_kcode().
662	 */
663	mrs	x0, tpidr_el1
664	sub	sp, sp, x0
665#endif
666
667	mrs	x0, tpidrro_el0
668#else
669	mrs	x0, ttbr0_el1
670	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
671	msr	ttbr0_el1, x0
672	isb
673	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
674#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
675
676	return_from_exception
677
678el0_sync_a64_finish:
679	mrs	x2, esr_el1
680	mrs	x3, sp_el0
681	lsr	x2, x2, #ESR_EC_SHIFT
682	cmp	x2, #ESR_EC_AARCH64_SVC
683	b.eq	el0_svc
684	b	el0_sync_abort
685
686el0_sync_a32_finish:
687	mrs	x2, esr_el1
688	mrs	x3, sp_el0
689	lsr	x2, x2, #ESR_EC_SHIFT
690	cmp	x2, #ESR_EC_AARCH32_SVC
691	b.eq	el0_svc
692	b	el0_sync_abort
693
694	/*
695	 * void icache_inv_user_range(void *addr, size_t size);
696	 *
697	 * This function has to execute with the user space ASID active,
698	 * this means executing with reduced mapping and the code needs
699	 * to be located here together with the vector.
700	 */
701	.global icache_inv_user_range
702	.type icache_inv_user_range , %function
703icache_inv_user_range:
704	/* Mask all exceptions */
705	mrs	x6, daif	/* this register must be preserved */
706	msr	daifset, #DAIFBIT_ALL
707
708#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
709	/* Point to the vector into the reduced mapping */
710	adr_l	x7, thread_user_kcode_offset
711	ldr	x7, [x7]	/* this register must be preserved */
712	mrs	x4, vbar_el1	/* this register must be preserved */
713	sub	x3, x4, x7
714	msr	vbar_el1, x3
715	isb
716
717	/* Jump into the reduced mapping and continue execution */
718	adr	x3, 1f
719	sub	x3, x3, x7
720	br	x3
7211:
722BTI(	bti	j)
723	/* Update the mapping to exclude the full kernel mapping */
724	mrs	x5, ttbr0_el1	/* this register must be preserved */
725	add	x2, x5, #CORE_MMU_BASE_TABLE_OFFSET
726	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
727	msr	ttbr0_el1, x2
728	isb
729
730#else
731	mrs	x5, ttbr0_el1	/* this register must be preserved */
732	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
733	msr	ttbr0_el1, x2
734	isb
735#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
736
737	/*
738	 * Do the actual icache invalidation
739	 */
740
741	/* Calculate minimum icache line size, result in x2 */
742	mrs	x3, ctr_el0
743	and	x3, x3, #CTR_IMINLINE_MASK
744	mov	x2, #CTR_WORD_SIZE
745	lsl	x2, x2, x3
746
747	add	x1, x0, x1
748	sub	x3, x2, #1
749	bic	x0, x0, x3
7501:
751	ic	ivau, x0
752	add	x0, x0, x2
753	cmp	x0, x1
754	b.lo    1b
755	dsb	ish
756
757#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
758	/* Update the mapping to use the full kernel mapping and ASID */
759	msr	ttbr0_el1, x5
760	isb
761
762	/* Jump into the full mapping and continue execution */
763	adr	x0, 1f
764	add	x0, x0, x7
765	br	x0
7661:
767BTI(	bti	j)
768	/* Point to the vector into the full mapping */
769	msr	vbar_el1, x4
770	isb
771#else
772	/* switch to kernel mode ASID */
773	msr	ttbr0_el1, x5
774	isb
775#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
776
777	msr	daif, x6	/* restore exceptions */
778	ret	/* End of icache_inv_user_range() */
779
780	/*
781	 * Make sure that literals are placed before the
782	 * thread_excp_vect_end label.
783	 */
784	.pool
785	.global thread_excp_vect_end
786thread_excp_vect_end:
787END_FUNC thread_excp_vect
788
789LOCAL_FUNC el0_svc , :
790	pauth_el0_to_el1 x1
791	/* get pointer to current thread context in x0 */
792	get_thread_ctx sp, 0, 1, 2
793	mrs	x1, tpidr_el0
794	str	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
795	/* load saved kernel sp */
796	ldr	x3, [x0, #THREAD_CTX_KERN_SP]
797	/* Keep pointer to initial recod in x1 */
798	mov	x1, sp
799	/* Switch to SP_EL0 and restore kernel sp */
800	msr	spsel, #0
801	mov	x2, sp	/* Save SP_EL0 */
802	mov	sp, x3
803
804	/* Make room for struct thread_scall_regs */
805	sub	sp, sp, #THREAD_SCALL_REG_SIZE
806	stp	x30, x2, [sp, #THREAD_SCALL_REG_X30]
807
808#ifdef CFG_TA_PAUTH
809	/* Save APIAKEY */
810	read_apiakeyhi	x2
811	read_apiakeylo	x3
812	stp	x2, x3, [sp, #THREAD_SCALL_REG_APIAKEY_HI]
813#endif
814
815#ifdef CFG_CORE_PAUTH
816	ldp	x2, x3, [x0, #THREAD_CTX_KEYS]
817	write_apiakeyhi	x2
818	write_apiakeylo	x3
819#endif
820#if defined(CFG_CORE_PAUTH) || defined(CFG_TA_PAUTH)
821	/* SCTLR or the APIA key has changed */
822	isb
823#endif
824
825	/* Restore x0-x3 */
826	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
827	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
828
829	/* Prepare the argument for the handler */
830	store_xregs sp, THREAD_SCALL_REG_X0, 0, 14
831	mrs	x0, elr_el1
832	mrs	x1, spsr_el1
833	store_xregs sp, THREAD_SCALL_REG_ELR, 0, 1
834
835	mov	x0, sp
836
837	/*
838	 * Unmask native interrupts, Serror, and debug exceptions since we have
839	 * nothing left in sp_el1. Note that the SVC handler is excepted to
840	 * re-enable foreign interrupts by itself.
841	 */
842#if defined(CFG_ARM_GICV3)
843	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
844#else
845	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
846#endif
847
848	/* Call the handler */
849	bl	thread_scall_handler
850
851	/* Mask all maskable exceptions since we're switching back to sp_el1 */
852	msr	daifset, #DAIFBIT_ALL
853
854	/*
855	 * Save kernel sp we'll had at the beginning of this function.
856	 * This is when this TA has called another TA because
857	 * __thread_enter_user_mode() also saves the stack pointer in this
858	 * field.
859	 */
860	msr	spsel, #1
861	get_thread_ctx sp, 0, 1, 2
862	msr	spsel, #0
863	add	x1, sp, #THREAD_SCALL_REG_SIZE
864	str	x1, [x0, #THREAD_CTX_KERN_SP]
865
866	/* Restore registers to the required state and return*/
867	ldr	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
868	msr	tpidr_el0, x1
869	load_xregs sp, THREAD_SCALL_REG_ELR, 0, 1
870	msr	elr_el1, x0
871	msr	spsr_el1, x1
872	load_xregs sp, THREAD_SCALL_REG_X2, 2, 14
873	mov	x30, sp
874	ldr	x0, [x30, #THREAD_SCALL_REG_SP_EL0]
875	mov	sp, x0
876	b_if_spsr_is_el0 w1, 1f
877	ldp	x0, x1, [x30, THREAD_SCALL_REG_X0]
878	ldr	x30, [x30, #THREAD_SCALL_REG_X30]
879
880	return_from_exception
881
8821:
883#ifdef	CFG_TA_PAUTH
884	/* Restore APIAKEY */
885	load_xregs x30, THREAD_SCALL_REG_APIAKEY_HI, 0, 1
886	write_apiakeyhi	x0
887	write_apiakeylo	x1
888#endif
889
890	ldp	x0, x1, [x30, THREAD_SCALL_REG_X0]
891	ldr	x30, [x30, #THREAD_SCALL_REG_X30]
892
893	msr	spsel, #1
894	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
895	b	eret_to_el0
896END_FUNC el0_svc
897
898LOCAL_FUNC el1_sync_abort , :
899	mov	x0, sp
900	msr	spsel, #0
901	mov	x3, sp		/* Save original sp */
902
903	/*
904	 * Update core local flags.
905	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
906	 */
907	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
908	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
909	orr	w1, w1, #THREAD_CLF_ABORT
910	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
911			.Lsel_tmp_sp
912
913	/* Select abort stack */
914	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
915	b	.Lset_sp
916
917.Lsel_tmp_sp:
918	/* We have an abort while using the abort stack, select tmp stack */
919	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
920	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
921
922.Lset_sp:
923	mov	sp, x2
924	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
925
926	/*
927	 * Save state on stack
928	 */
929	sub	sp, sp, #THREAD_ABT_REGS_SIZE
930	mrs	x2, spsr_el1
931	/* Store spsr, sp_el0 */
932	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
933	/* Store original x0, x1 */
934	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
935	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
936	/* Store original x2, x3 and x4 to x29 */
937	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
938	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
939	/* Store x30, elr_el1 */
940	mrs	x1, elr_el1
941	stp	x30, x1, [sp, #THREAD_ABT_REG_X30]
942
943#if defined(CFG_CORE_PAUTH)
944	read_apiakeyhi	x2
945	read_apiakeylo	x3
946	stp	x2, x3, [sp, #THREAD_ABT_REGS_APIAKEY_HI]
947	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_KEYS]
948	write_apiakeyhi	x2
949	write_apiakeylo	x3
950	isb
951#endif
952
953	/*
954	 * Call handler
955	 */
956	mov	x0, #0
957	mov	x1, sp
958	bl	abort_handler
959
960	/*
961	 * Restore state from stack
962	 */
963	/* Load x30, elr_el1 */
964	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
965	msr	elr_el1, x0
966	/* Load x0 to x29 */
967	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
968	/* Switch to SP_EL1 */
969	msr	spsel, #1
970	/* Save x0 to x3 in CORE_LOCAL */
971	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
972	/* Restore spsr_el1 and sp_el0 */
973	mrs	x3, sp_el0
974	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
975	msr	spsr_el1, x0
976	msr	sp_el0, x1
977
978	/* Update core local flags */
979	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
980	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
981	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
982
983#if defined(CFG_CORE_PAUTH)
984	ldp	x0, x1, [x3, #THREAD_ABT_REGS_APIAKEY_HI]
985	write_apiakeyhi	x0
986	write_apiakeylo	x1
987	isb
988#endif
989
990	/* Restore x0 to x3 */
991	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
992
993	/* Return from exception */
994	return_from_exception
995END_FUNC el1_sync_abort
996
997	/* sp_el0 in x3 */
998LOCAL_FUNC el0_sync_abort , :
999	pauth_el0_to_el1 x1
1000	/*
1001	 * Update core local flags
1002	 */
1003	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1004	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1005	orr	w1, w1, #THREAD_CLF_ABORT
1006	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1007
1008	/*
1009	 * Save state on stack
1010	 */
1011
1012	/* load abt_stack_va_end */
1013	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
1014	/* Keep pointer to initial record in x0 */
1015	mov	x0, sp
1016	/* Switch to SP_EL0 */
1017	msr	spsel, #0
1018	mov	sp, x1
1019	sub	sp, sp, #THREAD_ABT_REGS_SIZE
1020	mrs	x2, spsr_el1
1021	/* Store spsr, sp_el0 */
1022	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
1023	/* Store original x0, x1 */
1024	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
1025	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
1026	/* Store original x2, x3 and x4 to x29 */
1027	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
1028	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
1029	/* Store x30, elr_el1 */
1030	mrs	x1, elr_el1
1031	stp	x30, x1, [sp, #THREAD_ABT_REG_X30]
1032
1033#if defined(CFG_TA_PAUTH)
1034	read_apiakeyhi	x2
1035	read_apiakeylo	x3
1036	stp	x2, x3, [sp, #THREAD_ABT_REGS_APIAKEY_HI]
1037#endif
1038
1039#if defined(CFG_CORE_PAUTH)
1040	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_KEYS]
1041	write_apiakeyhi	x2
1042	write_apiakeylo	x3
1043#endif
1044
1045#if defined(CFG_CORE_PAUTH) || defined(CFG_TA_PAUTH)
1046	/* SCTLR or the APIA key has changed */
1047	isb
1048#endif
1049
1050	/*
1051	 * Call handler
1052	 */
1053	mov	x0, #0
1054	mov	x1, sp
1055	bl	abort_handler
1056
1057	/*
1058	 * Restore state from stack
1059	 */
1060
1061	/* Load x30, elr_el1 */
1062	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
1063	msr	elr_el1, x0
1064	/* Load x0 to x29 */
1065	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
1066	/* Switch to SP_EL1 */
1067	msr	spsel, #1
1068	/* Save x0 to x3 in EL1_REC */
1069	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
1070	/* Restore spsr_el1 and sp_el0 */
1071	mrs	x3, sp_el0
1072	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
1073	msr	spsr_el1, x0
1074	msr	sp_el0, x1
1075
1076	/* Update core local flags */
1077	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1078	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
1079	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1080
1081#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1082	ldp	x1, x2, [x3, #THREAD_ABT_REGS_APIAKEY_HI]
1083	write_apiakeyhi	x1
1084	write_apiakeylo	x2
1085#endif
1086
1087	/* Restore x2 to x3 */
1088	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1089
1090	b_if_spsr_is_el0 w0, 1f
1091
1092#if defined(CFG_CORE_PAUTH)
1093	/* the APIA key has changed */
1094	isb
1095#endif
1096
1097	/* Restore x0 to x1 */
1098	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1099
1100	/* Return from exception */
1101	return_from_exception
11021:	b	eret_to_el0
1103END_FUNC el0_sync_abort
1104
1105/* The handler of foreign interrupt. */
1106.macro foreign_intr_handler mode:req
1107	/*
1108	 * Update core local flags
1109	 */
1110	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1111	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1112	orr	w1, w1, #THREAD_CLF_TMP
1113	.ifc	\mode\(),fiq
1114	orr	w1, w1, #THREAD_CLF_FIQ
1115	.else
1116	orr	w1, w1, #THREAD_CLF_IRQ
1117	.endif
1118	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1119
1120	/* get pointer to current thread context in x0 */
1121	get_thread_ctx sp, 0, 1, 2
1122	/* Keep original SP_EL0 */
1123	mrs	x2, sp_el0
1124
1125	/* Store original sp_el0 */
1126	str	x2, [x0, #THREAD_CTX_REGS_SP]
1127	/* Store tpidr_el0 */
1128	mrs	x2, tpidr_el0
1129	str	x2, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
1130	/* Store x4..x30 */
1131	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
1132	/* Load original x0..x3 into x10..x13 */
1133	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
1134	/* Save original x0..x3 */
1135	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
1136
1137#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1138	/* Save APIAKEY */
1139	read_apiakeyhi	x1
1140	read_apiakeylo	x2
1141	store_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
1142#endif
1143#if defined(CFG_CORE_PAUTH)
1144	ldp	x1, x2, [sp, #THREAD_CORE_LOCAL_KEYS]
1145	write_apiakeyhi	x1
1146	write_apiakeylo	x2
1147	isb
1148#endif
1149	/* load tmp_stack_va_end */
1150	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1151	/* Switch to SP_EL0 */
1152	msr	spsel, #0
1153	mov	sp, x1
1154
1155#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
1156	/*
1157	 * Prevent leaking information about which entries has been used in
1158	 * cache. We're relying on the dispatcher in TF-A to take care of
1159	 * the BTB.
1160	 */
1161	mov	x0, #DCACHE_OP_CLEAN_INV
1162	bl	dcache_op_louis
1163	ic	iallu
1164#endif
1165	/*
1166	 * Mark current thread as suspended
1167	 */
1168	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
1169	mrs	x1, spsr_el1
1170	mrs	x2, elr_el1
1171	bl	thread_state_suspend
1172
1173	/* Update core local flags */
1174	/* Switch to SP_EL1 */
1175	msr	spsel, #1
1176	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1177	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
1178	orr	w1, w1, #THREAD_CLF_TMP
1179	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1180	msr	spsel, #0
1181
1182	/*
1183	 * Note that we're exiting with SP_EL0 selected since the entry
1184	 * functions expects to have SP_EL0 selected with the tmp stack
1185	 * set.
1186	 */
1187
1188	/* Passing thread index in w0 */
1189	b	thread_foreign_intr_exit
1190.endm
1191
1192/*
1193 * This struct is never used from C it's only here to visualize the
1194 * layout.
1195 *
1196 * struct elx_nintr_rec {
1197 * 	uint64_t x[19 - 4]; x4..x18
1198 * 	uint64_t lr;
1199 * 	uint64_t sp_el0;
1200 * #if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1201 * 	uint64_t apiakey_hi;
1202 * 	uint64_t apiakey_lo;
1203 * #endif
1204 * };
1205 */
1206#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1207#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1208#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1209#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1210#define ELX_NINTR_REG_APIAKEY_HI	(8 + ELX_NINTR_REC_SP_EL0)
1211#define ELX_NINTR_REG_APIAKEY_LO	(8 + ELX_NINTR_REG_APIAKEY_HI)
1212#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REG_APIAKEY_LO)
1213#else
1214#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1215#endif
1216
1217
1218/* The handler of native interrupt. */
1219.macro native_intr_handler mode:req
1220	/*
1221	 * Update core local flags
1222	 */
1223	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1224	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1225	.ifc	\mode\(),fiq
1226	orr	w1, w1, #THREAD_CLF_FIQ
1227	.else
1228	orr	w1, w1, #THREAD_CLF_IRQ
1229	.endif
1230	orr	w1, w1, #THREAD_CLF_TMP
1231	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1232
1233	/*
1234	 * Save registers on the temp stack that can be corrupted by a call
1235	 * to a C function.
1236	 *
1237	 * Note that we're temporarily using x1 to access the temp stack
1238	 * until we're ready to switch to sp_el0 and update sp.
1239	 */
1240	/* load tmp_stack_va_end */
1241	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1242	/* Make room for struct elx_nintr_rec */
1243	sub	x1, x1, #ELX_NINTR_REC_SIZE
1244	/* Store lr and original sp_el0 */
1245	mrs	x2, sp_el0
1246	stp	x30, x2, [x1, #ELX_NINTR_REC_LR]
1247	/* Store x4..x18 */
1248	store_xregs x1, ELX_NINTR_REC_X(4), 4, 18
1249
1250#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1251	read_apiakeyhi	x2
1252	read_apiakeylo	x3
1253	stp	x2, x3, [x1, #ELX_NINTR_REG_APIAKEY_HI]
1254#if defined(CFG_CORE_PAUTH)
1255	ldp	x2, x3, [sp, #THREAD_CORE_LOCAL_KEYS]
1256	write_apiakeyhi	x2
1257	write_apiakeylo	x3
1258#endif
1259	/* SCTLR or the APIA key has changed */
1260	isb
1261#endif
1262
1263	/* Switch to SP_EL0 */
1264	msr	spsel, #0
1265	mov	sp, x1
1266
1267	bl	thread_check_canaries
1268	bl	itr_core_handler
1269
1270	/*
1271	 * Restore registers
1272	 */
1273
1274#if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
1275	ldp	x0, x1, [sp, #ELX_NINTR_REG_APIAKEY_HI]
1276	write_apiakeyhi	x0
1277	write_apiakeylo	x1
1278#endif
1279
1280	/* Restore x4..x18 */
1281	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1282	/* Load  lr and original sp_el0 */
1283	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1284	/* Restore SP_El0 */
1285	mov	sp, x2
1286	/* Switch back to SP_EL1 */
1287	msr	spsel, #1
1288
1289	/* Update core local flags */
1290	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1291	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1292	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1293
1294	mrs	x0, spsr_el1
1295
1296	/* Restore x2..x3 */
1297	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1298	b_if_spsr_is_el0 w0, 1f
1299
1300#if defined(CFG_CORE_PAUTH)
1301	/* APIA key has changed */
1302	isb
1303#endif
1304
1305	/* Restore x0..x1 */
1306	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1307
1308	/* Return from exception */
1309	return_from_exception
13101:	b	eret_to_el0
1311.endm
1312
1313LOCAL_FUNC elx_irq , :
1314#if defined(CFG_ARM_GICV3)
1315	native_intr_handler	irq
1316#else
1317	foreign_intr_handler	irq
1318#endif
1319END_FUNC elx_irq
1320
1321LOCAL_FUNC elx_fiq , :
1322#if defined(CFG_ARM_GICV3)
1323	foreign_intr_handler	fiq
1324#else
1325	native_intr_handler	fiq
1326#endif
1327END_FUNC elx_fiq
1328
1329BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
1330