xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 0d77037f5943c86560dd7c8f473fbc6a55d60a34)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread_defs.h>
13#include <mm/core_mmu.h>
14#include <smccc.h>
15
16#include "thread_private.h"
17
18	.macro get_thread_ctx core_local, res, tmp0, tmp1
19		ldr	w\tmp0, [\core_local, \
20				#THREAD_CORE_LOCAL_CURR_THREAD]
21		ldr	x\res, =threads
22		mov	x\tmp1, #THREAD_CTX_SIZE
23		madd	x\res, x\tmp0, x\tmp1, x\res
24	.endm
25
26	.macro b_if_spsr_is_el0 reg, label
27		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
28		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
29		b.eq	\label
30	.endm
31
32/* void thread_resume(struct thread_ctx_regs *regs) */
33FUNC thread_resume , :
34	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
35	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
36	mov	sp, x1
37	msr	elr_el1, x2
38	msr	spsr_el1, x3
39
40	b_if_spsr_is_el0 w3, 1f
41
42	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
43	ldr	x0, [x0, THREAD_CTX_REGS_X0]
44	eret
45
461:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
47	ldr	x0, [x0, THREAD_CTX_REGS_X0]
48
49	msr	spsel, #1
50	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
51	b	eret_to_el0
52END_FUNC thread_resume
53
54FUNC thread_smc , :
55	smc	#0
56	ret
57END_FUNC thread_smc
58
59FUNC thread_init_vbar , :
60	msr	vbar_el1, x0
61	ret
62END_FUNC thread_init_vbar
63KEEP_PAGER thread_init_vbar
64
65/*
66 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
67 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
68 *               unsigned long user_func, unsigned long spsr,
69 *               uint32_t *exit_status0, uint32_t *exit_status1)
70 *
71 */
72FUNC __thread_enter_user_mode , :
73	ldr	x8, [sp]
74	/*
75	 * Create the and fill in the struct thread_user_mode_rec
76	 */
77	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
78	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
79	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
80
81	/*
82	 * Switch to SP_EL1
83	 * Disable exceptions
84	 * Save kern sp in x19
85	 */
86	msr	daifset, #DAIFBIT_ALL
87	mov	x19, sp
88	msr	spsel, #1
89
90	/*
91	 * Save the kernel stack pointer in the thread context
92	 */
93	/* get pointer to current thread context */
94	get_thread_ctx sp, 21, 20, 22
95	/*
96	 * Save kernel stack pointer to ensure that el0_svc() uses
97	 * correct stack pointer
98	 */
99	str	x19, [x21, #THREAD_CTX_KERN_SP]
100
101	/*
102	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
103	 */
104	msr	spsr_el1, x6
105	/* Set user sp */
106	mov	x13, x4		/* Used when running TA in Aarch32 */
107	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
108	/* Set user function */
109	msr	elr_el1, x5
110	/* Set frame pointer (user stack can't be unwound past this point) */
111	mov x29, #0
112
113	/* Jump into user mode */
114	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
115	b eret_to_el0
116END_FUNC __thread_enter_user_mode
117KEEP_PAGER __thread_enter_user_mode
118
119/*
120 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
121 * 		uint32_t exit_status1);
122 * See description in thread.h
123 */
124FUNC thread_unwind_user_mode , :
125	/* Store the exit status */
126	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
127	str	w1, [x3]
128	str	w2, [x4]
129	/* Restore x19..x30 */
130	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
131	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
132	/* Return from the call of thread_enter_user_mode() */
133	ret
134END_FUNC thread_unwind_user_mode
135
136	/*
137	 * This macro verifies that the a given vector doesn't exceed the
138	 * architectural limit of 32 instructions. This is meant to be placed
139	 * immedately after the last instruction in the vector. It takes the
140	 * vector entry as the parameter
141	 */
142	.macro check_vector_size since
143	  .if (. - \since) > (32 * 4)
144	    .error "Vector exceeds 32 instructions"
145	  .endif
146	.endm
147
148	.macro restore_mapping
149#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
150		/* Temporarily save x0, x1 */
151		msr	tpidr_el1, x0
152		msr	tpidrro_el0, x1
153
154		/* Update the mapping to use the full kernel mapping */
155		mrs	x0, ttbr0_el1
156		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
157		/* switch to kernel mode ASID */
158		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
159		msr	ttbr0_el1, x0
160		isb
161
162		/* Jump into the full mapping and continue execution */
163		ldr	x0, =1f
164		br	x0
165	1:
166
167		/* Point to the vector into the full mapping */
168		adr	x0, thread_user_kcode_offset
169		ldr	x0, [x0]
170		mrs	x1, vbar_el1
171		add	x1, x1, x0
172		msr	vbar_el1, x1
173		isb
174
175#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
176		/*
177		 * Update the SP with thread_user_kdata_sp_offset as
178		 * described in init_user_kcode().
179		 */
180		adr	x0, thread_user_kdata_sp_offset
181		ldr	x0, [x0]
182		add	sp, sp, x0
183#endif
184
185		/* Restore x0, x1 */
186		mrs	x0, tpidr_el1
187		mrs	x1, tpidrro_el0
188		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
189#else
190		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
191		mrs	x0, ttbr0_el1
192		/* switch to kernel mode ASID */
193		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
194		msr	ttbr0_el1, x0
195		isb
196#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
197	.endm
198
199#define INV_INSN	0
200	.section .text.thread_excp_vect
201	.align	11, INV_INSN
202FUNC thread_excp_vect , :
203	/* -----------------------------------------------------
204	 * EL1 with SP0 : 0x0 - 0x180
205	 * -----------------------------------------------------
206	 */
207	.align	7, INV_INSN
208el1_sync_sp0:
209	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
210	b	el1_sync_abort
211	check_vector_size el1_sync_sp0
212
213	.align	7, INV_INSN
214el1_irq_sp0:
215	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
216	b	elx_irq
217	check_vector_size el1_irq_sp0
218
219	.align	7, INV_INSN
220el1_fiq_sp0:
221	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
222	b	elx_fiq
223	check_vector_size el1_fiq_sp0
224
225	.align	7, INV_INSN
226el1_serror_sp0:
227	b	el1_serror_sp0
228	check_vector_size el1_serror_sp0
229
230	/* -----------------------------------------------------
231	 * Current EL with SP1: 0x200 - 0x380
232	 * -----------------------------------------------------
233	 */
234	.align	7, INV_INSN
235el1_sync_sp1:
236	b	el1_sync_sp1
237	check_vector_size el1_sync_sp1
238
239	.align	7, INV_INSN
240el1_irq_sp1:
241	b	el1_irq_sp1
242	check_vector_size el1_irq_sp1
243
244	.align	7, INV_INSN
245el1_fiq_sp1:
246	b	el1_fiq_sp1
247	check_vector_size el1_fiq_sp1
248
249	.align	7, INV_INSN
250el1_serror_sp1:
251	b	el1_serror_sp1
252	check_vector_size el1_serror_sp1
253
254	/* -----------------------------------------------------
255	 * Lower EL using AArch64 : 0x400 - 0x580
256	 * -----------------------------------------------------
257	 */
258	.align	7, INV_INSN
259el0_sync_a64:
260	restore_mapping
261
262	mrs	x2, esr_el1
263	mrs	x3, sp_el0
264	lsr	x2, x2, #ESR_EC_SHIFT
265	cmp	x2, #ESR_EC_AARCH64_SVC
266	b.eq	el0_svc
267	b	el0_sync_abort
268	check_vector_size el0_sync_a64
269
270	.align	7, INV_INSN
271el0_irq_a64:
272	restore_mapping
273
274	b	elx_irq
275	check_vector_size el0_irq_a64
276
277	.align	7, INV_INSN
278el0_fiq_a64:
279	restore_mapping
280
281	b	elx_fiq
282	check_vector_size el0_fiq_a64
283
284	.align	7, INV_INSN
285el0_serror_a64:
286	b   	el0_serror_a64
287	check_vector_size el0_serror_a64
288
289	/* -----------------------------------------------------
290	 * Lower EL using AArch32 : 0x0 - 0x180
291	 * -----------------------------------------------------
292	 */
293	.align	7, INV_INSN
294el0_sync_a32:
295	restore_mapping
296
297	mrs	x2, esr_el1
298	mrs	x3, sp_el0
299	lsr	x2, x2, #ESR_EC_SHIFT
300	cmp	x2, #ESR_EC_AARCH32_SVC
301	b.eq	el0_svc
302	b	el0_sync_abort
303	check_vector_size el0_sync_a32
304
305	.align	7, INV_INSN
306el0_irq_a32:
307	restore_mapping
308
309	b	elx_irq
310	check_vector_size el0_irq_a32
311
312	.align	7, INV_INSN
313el0_fiq_a32:
314	restore_mapping
315
316	b	elx_fiq
317	check_vector_size el0_fiq_a32
318
319	.align	7, INV_INSN
320el0_serror_a32:
321	b	el0_serror_a32
322	check_vector_size el0_serror_a32
323
324#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
325	.macro invalidate_branch_predictor
326		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
327		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
328		smc	#0
329		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
330	.endm
331
332	.align	11, INV_INSN
333	.global thread_excp_vect_workaround
334thread_excp_vect_workaround:
335	/* -----------------------------------------------------
336	 * EL1 with SP0 : 0x0 - 0x180
337	 * -----------------------------------------------------
338	 */
339	.align	7, INV_INSN
340workaround_el1_sync_sp0:
341	b	el1_sync_sp0
342	check_vector_size workaround_el1_sync_sp0
343
344	.align	7, INV_INSN
345workaround_el1_irq_sp0:
346	b	el1_irq_sp0
347	check_vector_size workaround_el1_irq_sp0
348
349	.align	7, INV_INSN
350workaround_el1_fiq_sp0:
351	b	el1_fiq_sp0
352	check_vector_size workaround_el1_fiq_sp0
353
354	.align	7, INV_INSN
355workaround_el1_serror_sp0:
356	b	el1_serror_sp0
357	check_vector_size workaround_el1_serror_sp0
358
359	/* -----------------------------------------------------
360	 * Current EL with SP1: 0x200 - 0x380
361	 * -----------------------------------------------------
362	 */
363	.align	7, INV_INSN
364workaround_el1_sync_sp1:
365	b	workaround_el1_sync_sp1
366	check_vector_size workaround_el1_sync_sp1
367
368	.align	7, INV_INSN
369workaround_el1_irq_sp1:
370	b	workaround_el1_irq_sp1
371	check_vector_size workaround_el1_irq_sp1
372
373	.align	7, INV_INSN
374workaround_el1_fiq_sp1:
375	b	workaround_el1_fiq_sp1
376	check_vector_size workaround_el1_fiq_sp1
377
378	.align	7, INV_INSN
379workaround_el1_serror_sp1:
380	b	workaround_el1_serror_sp1
381	check_vector_size workaround_el1_serror_sp1
382
383	/* -----------------------------------------------------
384	 * Lower EL using AArch64 : 0x400 - 0x580
385	 * -----------------------------------------------------
386	 */
387	.align	7, INV_INSN
388workaround_el0_sync_a64:
389	invalidate_branch_predictor
390	b	el0_sync_a64
391	check_vector_size workaround_el0_sync_a64
392
393	.align	7, INV_INSN
394workaround_el0_irq_a64:
395	invalidate_branch_predictor
396	b	el0_irq_a64
397	check_vector_size workaround_el0_irq_a64
398
399	.align	7, INV_INSN
400workaround_el0_fiq_a64:
401	invalidate_branch_predictor
402	b	el0_fiq_a64
403	check_vector_size workaround_el0_fiq_a64
404
405	.align	7, INV_INSN
406workaround_el0_serror_a64:
407	b   	workaround_el0_serror_a64
408	check_vector_size workaround_el0_serror_a64
409
410	/* -----------------------------------------------------
411	 * Lower EL using AArch32 : 0x0 - 0x180
412	 * -----------------------------------------------------
413	 */
414	.align	7, INV_INSN
415workaround_el0_sync_a32:
416	invalidate_branch_predictor
417	b	el0_sync_a32
418	check_vector_size workaround_el0_sync_a32
419
420	.align	7, INV_INSN
421workaround_el0_irq_a32:
422	invalidate_branch_predictor
423	b	el0_irq_a32
424	check_vector_size workaround_el0_irq_a32
425
426	.align	7, INV_INSN
427workaround_el0_fiq_a32:
428	invalidate_branch_predictor
429	b	el0_fiq_a32
430	check_vector_size workaround_el0_fiq_a32
431
432	.align	7, INV_INSN
433workaround_el0_serror_a32:
434	b	workaround_el0_serror_a32
435	check_vector_size workaround_el0_serror_a32
436#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
437
438/*
439 * We're keeping this code in the same section as the vector to make sure
440 * that it's always available.
441 */
442eret_to_el0:
443
444#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
445	/* Point to the vector into the reduced mapping */
446	adr	x0, thread_user_kcode_offset
447	ldr	x0, [x0]
448	mrs	x1, vbar_el1
449	sub	x1, x1, x0
450	msr	vbar_el1, x1
451	isb
452
453#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
454	/* Store the SP offset in tpidr_el1 to be used below to update SP */
455	adr	x1, thread_user_kdata_sp_offset
456	ldr	x1, [x1]
457	msr	tpidr_el1, x1
458#endif
459
460	/* Jump into the reduced mapping and continue execution */
461	ldr	x1, =1f
462	sub	x1, x1, x0
463	br	x1
4641:
465
466	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
467	msr	tpidrro_el0, x0
468
469	/* Update the mapping to exclude the full kernel mapping */
470	mrs	x0, ttbr0_el1
471	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
472	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
473	msr	ttbr0_el1, x0
474	isb
475
476#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
477	/*
478	 * Update the SP with thread_user_kdata_sp_offset as described in
479	 * init_user_kcode().
480	 */
481	mrs	x0, tpidr_el1
482	sub	sp, sp, x0
483#endif
484
485	mrs	x0, tpidrro_el0
486#else
487	mrs	x0, ttbr0_el1
488	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
489	msr	ttbr0_el1, x0
490	isb
491	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
492#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
493
494	eret
495
496	/*
497	 * void icache_inv_user_range(void *addr, size_t size);
498	 *
499	 * This function has to execute with the user space ASID active,
500	 * this means executing with reduced mapping and the code needs
501	 * to be located here together with the vector.
502	 */
503	.global icache_inv_user_range
504	.type icache_inv_user_range , %function
505icache_inv_user_range:
506	/* Mask all exceptions */
507	mrs	x6, daif	/* this register must be preserved */
508	msr	daifset, #DAIFBIT_ALL
509
510#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
511	/* Point to the vector into the reduced mapping */
512	adr	x2, thread_user_kcode_offset
513	ldr	x2, [x2]
514	mrs	x4, vbar_el1	/* this register must be preserved */
515	sub	x3, x4, x2
516	msr	vbar_el1, x3
517	isb
518
519	/* Jump into the reduced mapping and continue execution */
520	ldr	x3, =1f
521	sub	x3, x3, x2
522	br	x3
5231:
524
525	/* Update the mapping to exclude the full kernel mapping */
526	mrs	x5, ttbr0_el1	/* this register must be preserved */
527	add	x2, x5, #CORE_MMU_L1_TBL_OFFSET
528	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
529	msr	ttbr0_el1, x2
530	isb
531
532#else
533	mrs	x5, ttbr0_el1	/* this register must be preserved */
534	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
535	msr	ttbr0_el1, x2
536	isb
537#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
538
539	/*
540	 * Do the actual icache invalidation
541	 */
542
543	/* Calculate minimum icache line size, result in x2 */
544	mrs	x3, ctr_el0
545	and	x3, x3, #CTR_IMINLINE_MASK
546	mov	x2, #CTR_WORD_SIZE
547	lsl	x2, x2, x3
548
549	add	x1, x0, x1
550	sub	x3, x2, #1
551	bic	x0, x0, x3
5521:
553	ic	ivau, x0
554	add	x0, x0, x2
555	cmp	x0, x1
556	b.lo    1b
557	dsb	ish
558
559#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
560	/* Update the mapping to use the full kernel mapping and ASID */
561	msr	ttbr0_el1, x5
562	isb
563
564	/* Jump into the full mapping and continue execution */
565	ldr	x0, =1f
566	br	x0
5671:
568
569	/* Point to the vector into the full mapping */
570	msr	vbar_el1, x4
571	isb
572#else
573	/* switch to kernel mode ASID */
574	msr	ttbr0_el1, x5
575	isb
576#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
577
578	msr	daif, x6	/* restore exceptions */
579	ret	/* End of icache_inv_user_range() */
580
581	/*
582	 * Make sure that literals are placed before the
583	 * thread_excp_vect_end label.
584	 */
585	.pool
586	.global thread_excp_vect_end
587thread_excp_vect_end:
588END_FUNC thread_excp_vect
589
590LOCAL_FUNC el0_svc , :
591	/* get pointer to current thread context in x0 */
592	get_thread_ctx sp, 0, 1, 2
593	/* load saved kernel sp */
594	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
595	/* Keep pointer to initial recod in x1 */
596	mov	x1, sp
597	/* Switch to SP_EL0 and restore kernel sp */
598	msr	spsel, #0
599	mov	x2, sp	/* Save SP_EL0 */
600	mov	sp, x0
601
602	/* Make room for struct thread_svc_regs */
603	sub	sp, sp, #THREAD_SVC_REG_SIZE
604	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
605
606	/* Restore x0-x3 */
607	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
608	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
609
610	/* Prepare the argument for the handler */
611	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
612	mrs	x0, elr_el1
613	mrs	x1, spsr_el1
614	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
615	mov	x0, sp
616
617	/*
618	 * Unmask native interrupts, Serror, and debug exceptions since we have
619	 * nothing left in sp_el1. Note that the SVC handler is excepted to
620	 * re-enable foreign interrupts by itself.
621	 */
622#if defined(CFG_ARM_GICV3)
623	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
624#else
625	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
626#endif
627
628	/* Call the handler */
629	bl	tee_svc_handler
630
631	/* Mask all maskable exceptions since we're switching back to sp_el1 */
632	msr	daifset, #DAIFBIT_ALL
633
634	/*
635	 * Save kernel sp we'll had at the beginning of this function.
636	 * This is when this TA has called another TA because
637	 * __thread_enter_user_mode() also saves the stack pointer in this
638	 * field.
639	 */
640	msr	spsel, #1
641	get_thread_ctx sp, 0, 1, 2
642	msr	spsel, #0
643	add	x1, sp, #THREAD_SVC_REG_SIZE
644	str	x1, [x0, #THREAD_CTX_KERN_SP]
645
646	/* Restore registers to the required state and return*/
647	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
648	msr	elr_el1, x0
649	msr	spsr_el1, x1
650	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
651	mov	x30, sp
652	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
653	mov	sp, x0
654	b_if_spsr_is_el0 w1, 1f
655	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
656	ldr	x30, [x30, #THREAD_SVC_REG_X30]
657
658	eret
659
6601:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
661	ldr	x30, [x30, #THREAD_SVC_REG_X30]
662
663	msr	spsel, #1
664	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
665	b	eret_to_el0
666END_FUNC el0_svc
667
668LOCAL_FUNC el1_sync_abort , :
669	mov	x0, sp
670	msr	spsel, #0
671	mov	x3, sp		/* Save original sp */
672
673	/*
674	 * Update core local flags.
675	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
676	 */
677	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
678	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
679	orr	w1, w1, #THREAD_CLF_ABORT
680	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
681			.Lsel_tmp_sp
682
683	/* Select abort stack */
684	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
685	b	.Lset_sp
686
687.Lsel_tmp_sp:
688	/* Select tmp stack */
689	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
690	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
691
692.Lset_sp:
693	mov	sp, x2
694	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
695
696	/*
697	 * Save state on stack
698	 */
699	sub	sp, sp, #THREAD_ABT_REGS_SIZE
700	mrs	x2, spsr_el1
701	/* Store spsr, sp_el0 */
702	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
703	/* Store original x0, x1 */
704	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
705	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
706	/* Store original x2, x3 and x4 to x29 */
707	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
708	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
709	/* Store x30, elr_el1 */
710	mrs	x0, elr_el1
711	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
712
713	/*
714	 * Call handler
715	 */
716	mov	x0, #0
717	mov	x1, sp
718	bl	abort_handler
719
720	/*
721	 * Restore state from stack
722	 */
723	/* Load x30, elr_el1 */
724	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
725	msr	elr_el1, x0
726	/* Load x0 to x29 */
727	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
728	/* Switch to SP_EL1 */
729	msr	spsel, #1
730	/* Save x0 to x3 in CORE_LOCAL */
731	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
732	/* Restore spsr_el1 and sp_el0 */
733	mrs	x3, sp_el0
734	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
735	msr	spsr_el1, x0
736	msr	sp_el0, x1
737
738	/* Update core local flags */
739	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
740	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
741	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
742
743	/* Restore x0 to x3 */
744	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
745
746	/* Return from exception */
747	eret
748END_FUNC el1_sync_abort
749
750	/* sp_el0 in x3 */
751LOCAL_FUNC el0_sync_abort , :
752	/*
753	 * Update core local flags
754	 */
755	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
756	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
757	orr	w1, w1, #THREAD_CLF_ABORT
758	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
759
760	/*
761	 * Save state on stack
762	 */
763
764	/* load abt_stack_va_end */
765	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
766	/* Keep pointer to initial record in x0 */
767	mov	x0, sp
768	/* Switch to SP_EL0 */
769	msr	spsel, #0
770	mov	sp, x1
771	sub	sp, sp, #THREAD_ABT_REGS_SIZE
772	mrs	x2, spsr_el1
773	/* Store spsr, sp_el0 */
774	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
775	/* Store original x0, x1 */
776	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
777	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
778	/* Store original x2, x3 and x4 to x29 */
779	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
780	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
781	/* Store x30, elr_el1 */
782	mrs	x0, elr_el1
783	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
784
785	/*
786	 * Call handler
787	 */
788	mov	x0, #0
789	mov	x1, sp
790	bl	abort_handler
791
792	/*
793	 * Restore state from stack
794	 */
795
796	/* Load x30, elr_el1 */
797	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
798	msr	elr_el1, x0
799	/* Load x0 to x29 */
800	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
801	/* Switch to SP_EL1 */
802	msr	spsel, #1
803	/* Save x0 to x3 in EL1_REC */
804	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
805	/* Restore spsr_el1 and sp_el0 */
806	mrs	x3, sp_el0
807	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
808	msr	spsr_el1, x0
809	msr	sp_el0, x1
810
811	/* Update core local flags */
812	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
813	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
814	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
815
816	/* Restore x2 to x3 */
817	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
818
819	b_if_spsr_is_el0 w0, 1f
820
821	/* Restore x0 to x1 */
822	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
823
824	/* Return from exception */
825	eret
8261:	b	eret_to_el0
827END_FUNC el0_sync_abort
828
829/* The handler of foreign interrupt. */
830.macro foreign_intr_handler mode:req
831	/*
832	 * Update core local flags
833	 */
834	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
835	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
836	orr	w1, w1, #THREAD_CLF_TMP
837	.ifc	\mode\(),fiq
838	orr	w1, w1, #THREAD_CLF_FIQ
839	.else
840	orr	w1, w1, #THREAD_CLF_IRQ
841	.endif
842	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
843
844	/* get pointer to current thread context in x0 */
845	get_thread_ctx sp, 0, 1, 2
846	/* Keep original SP_EL0 */
847	mrs	x2, sp_el0
848
849	/* Store original sp_el0 */
850	str	x2, [x0, #THREAD_CTX_REGS_SP]
851	/* store x4..x30 */
852	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
853	/* Load original x0..x3 into x10..x13 */
854	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
855	/* Save original x0..x3 */
856	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
857
858	/* load tmp_stack_va_end */
859	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
860	/* Switch to SP_EL0 */
861	msr	spsel, #0
862	mov	sp, x1
863
864#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
865	/*
866	 * Prevent leaking information about which entries has been used in
867	 * cache. We're relying on the dispatcher in TF-A to take care of
868	 * the BTB.
869	 */
870	mov	x0, #DCACHE_OP_CLEAN_INV
871	bl	dcache_op_louis
872	ic	iallu
873#endif
874	/*
875	 * Mark current thread as suspended
876	 */
877	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
878	mrs	x1, spsr_el1
879	mrs	x2, elr_el1
880	bl	thread_state_suspend
881
882	/* Update core local flags */
883	/* Switch to SP_EL1 */
884	msr	spsel, #1
885	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
886	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
887	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
888	msr	spsel, #0
889
890	/*
891	 * Note that we're exiting with SP_EL0 selected since the entry
892	 * functions expects to have SP_EL0 selected with the tmp stack
893	 * set.
894	 */
895
896	/* Passing thread index in w0 */
897	b	thread_foreign_intr_exit
898.endm
899
900/*
901 * This struct is never used from C it's only here to visualize the
902 * layout.
903 *
904 * struct elx_nintr_rec {
905 * 	uint64_t x[19 - 4]; x4..x18
906 * 	uint64_t lr;
907 * 	uint64_t sp_el0;
908 * };
909 */
910#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
911#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
912#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
913#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
914
915/* The handler of native interrupt. */
916.macro native_intr_handler mode:req
917	/*
918	 * Update core local flags
919	 */
920	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
921	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
922	.ifc	\mode\(),fiq
923	orr	w1, w1, #THREAD_CLF_FIQ
924	.else
925	orr	w1, w1, #THREAD_CLF_IRQ
926	.endif
927	orr	w1, w1, #THREAD_CLF_TMP
928	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
929
930	/* load tmp_stack_va_end */
931	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
932	/* Keep original SP_EL0 */
933	mrs	x2, sp_el0
934	/* Switch to SP_EL0 */
935	msr	spsel, #0
936	mov	sp, x1
937
938	/*
939	 * Save registers on stack that can be corrupted by a call to
940	 * a C function
941	 */
942	/* Make room for struct elx_nintr_rec */
943	sub	sp, sp, #ELX_NINTR_REC_SIZE
944	/* Store x4..x18 */
945	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
946	/* Store lr and original sp_el0 */
947	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
948
949	bl	thread_check_canaries
950	bl	itr_core_handler
951
952	/*
953	 * Restore registers
954	 */
955	/* Restore x4..x18 */
956	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
957	/* Load  lr and original sp_el0 */
958	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
959	/* Restore SP_El0 */
960	mov	sp, x2
961	/* Switch back to SP_EL1 */
962	msr	spsel, #1
963
964	/* Update core local flags */
965	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
966	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
967	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
968
969	mrs	x0, spsr_el1
970	/* Restore x2..x3 */
971	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
972	b_if_spsr_is_el0 w0, 1f
973
974	/* Restore x0..x1 */
975	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
976
977	/* Return from exception */
978	eret
9791:	b	eret_to_el0
980.endm
981
982LOCAL_FUNC elx_irq , :
983#if defined(CFG_ARM_GICV3)
984	native_intr_handler	irq
985#else
986	foreign_intr_handler	irq
987#endif
988END_FUNC elx_irq
989
990LOCAL_FUNC elx_fiq , :
991#if defined(CFG_ARM_GICV3)
992	foreign_intr_handler	fiq
993#else
994	native_intr_handler	fiq
995#endif
996END_FUNC elx_fiq
997