xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 5b25c76ac40f830867e3d60800120ffd7874e8dc)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread_defs.h>
13#include <mm/core_mmu.h>
14#include <smccc.h>
15
16#include "thread_private.h"
17
18	.macro get_thread_ctx core_local, res, tmp0, tmp1
19		ldr	w\tmp0, [\core_local, \
20				#THREAD_CORE_LOCAL_CURR_THREAD]
21		ldr	x\res, =threads
22		mov	x\tmp1, #THREAD_CTX_SIZE
23		madd	x\res, x\tmp0, x\tmp1, x\res
24	.endm
25
26	.macro return_from_exception
27		eret
28		/* Guard against speculation past ERET */
29		dsb nsh
30		isb
31	.endm
32
33	.macro b_if_spsr_is_el0 reg, label
34		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
35		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
36		b.eq	\label
37	.endm
38
39/* void thread_resume(struct thread_ctx_regs *regs) */
40FUNC thread_resume , :
41	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
42	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
43	mov	sp, x1
44	msr	elr_el1, x2
45	msr	spsr_el1, x3
46
47	b_if_spsr_is_el0 w3, 1f
48
49	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
50	ldr	x0, [x0, THREAD_CTX_REGS_X0]
51	return_from_exception
52
531:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
54	ldr	x0, [x0, THREAD_CTX_REGS_X0]
55
56	msr	spsel, #1
57	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
58	b	eret_to_el0
59END_FUNC thread_resume
60
61FUNC thread_smc , :
62	smc	#0
63	ret
64END_FUNC thread_smc
65
66FUNC thread_init_vbar , :
67	msr	vbar_el1, x0
68	ret
69END_FUNC thread_init_vbar
70KEEP_PAGER thread_init_vbar
71
72/*
73 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
74 *				     uint32_t *exit_status0,
75 *				     uint32_t *exit_status1);
76 *
77 * This function depends on being called with exceptions masked.
78 */
79FUNC __thread_enter_user_mode , :
80	/*
81	 * Create the and fill in the struct thread_user_mode_rec
82	 */
83	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
84	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
85	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
86
87	/*
88	 * Save kern sp in x19
89	 * Switch to SP_EL1
90	 */
91	mov	x19, sp
92	msr	spsel, #1
93
94	/*
95	 * Save the kernel stack pointer in the thread context
96	 */
97	/* get pointer to current thread context */
98	get_thread_ctx sp, 21, 20, 22
99	/*
100	 * Save kernel stack pointer to ensure that el0_svc() uses
101	 * correct stack pointer
102	 */
103	str	x19, [x21, #THREAD_CTX_KERN_SP]
104
105	/*
106	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
107	 */
108	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
109	msr	sp_el0, x1
110	msr	elr_el1, x2
111	msr	spsr_el1, x3
112
113	/*
114	 * Save the values for x0 and x1 in struct thread_core_local to be
115	 * restored later just before the eret.
116	 */
117	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
118	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
119
120	/* Load the rest of the general purpose registers */
121	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
122
123	/* Jump into user mode */
124	b eret_to_el0
125END_FUNC __thread_enter_user_mode
126KEEP_PAGER __thread_enter_user_mode
127
128/*
129 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
130 * 		uint32_t exit_status1);
131 * See description in thread.h
132 */
133FUNC thread_unwind_user_mode , :
134	/* Store the exit status */
135	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
136	str	w1, [x4]
137	str	w2, [x5]
138	/* Save x19..x30 */
139	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
140	/* Restore x19..x30 */
141	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
142	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
143	/* Return from the call of thread_enter_user_mode() */
144	ret
145END_FUNC thread_unwind_user_mode
146
147	/*
148	 * This macro verifies that the a given vector doesn't exceed the
149	 * architectural limit of 32 instructions. This is meant to be placed
150	 * immedately after the last instruction in the vector. It takes the
151	 * vector entry as the parameter
152	 */
153	.macro check_vector_size since
154	  .if (. - \since) > (32 * 4)
155	    .error "Vector exceeds 32 instructions"
156	  .endif
157	.endm
158
159	.macro restore_mapping
160#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
161		/* Temporarily save x0, x1 */
162		msr	tpidr_el1, x0
163		msr	tpidrro_el0, x1
164
165		/* Update the mapping to use the full kernel mapping */
166		mrs	x0, ttbr0_el1
167		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
168		/* switch to kernel mode ASID */
169		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
170		msr	ttbr0_el1, x0
171		isb
172
173		/* Jump into the full mapping and continue execution */
174		ldr	x0, =1f
175		br	x0
176	1:
177
178		/* Point to the vector into the full mapping */
179		adr_l	x0, thread_user_kcode_offset
180		ldr	x0, [x0]
181		mrs	x1, vbar_el1
182		add	x1, x1, x0
183		msr	vbar_el1, x1
184		isb
185
186#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
187		/*
188		 * Update the SP with thread_user_kdata_sp_offset as
189		 * described in init_user_kcode().
190		 */
191		adr_l	x0, thread_user_kdata_sp_offset
192		ldr	x0, [x0]
193		add	sp, sp, x0
194#endif
195
196		/* Restore x0, x1 */
197		mrs	x0, tpidr_el1
198		mrs	x1, tpidrro_el0
199		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
200#else
201		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
202		mrs	x0, ttbr0_el1
203		/* switch to kernel mode ASID */
204		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
205		msr	ttbr0_el1, x0
206		isb
207#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
208	.endm
209
210#define INV_INSN	0
211	.align	11, INV_INSN
212FUNC thread_excp_vect , :
213	/* -----------------------------------------------------
214	 * EL1 with SP0 : 0x0 - 0x180
215	 * -----------------------------------------------------
216	 */
217	.align	7, INV_INSN
218el1_sync_sp0:
219	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
220	b	el1_sync_abort
221	check_vector_size el1_sync_sp0
222
223	.align	7, INV_INSN
224el1_irq_sp0:
225	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
226	b	elx_irq
227	check_vector_size el1_irq_sp0
228
229	.align	7, INV_INSN
230el1_fiq_sp0:
231	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
232	b	elx_fiq
233	check_vector_size el1_fiq_sp0
234
235	.align	7, INV_INSN
236el1_serror_sp0:
237	b	el1_serror_sp0
238	check_vector_size el1_serror_sp0
239
240	/* -----------------------------------------------------
241	 * Current EL with SP1: 0x200 - 0x380
242	 * -----------------------------------------------------
243	 */
244	.align	7, INV_INSN
245el1_sync_sp1:
246	b	el1_sync_sp1
247	check_vector_size el1_sync_sp1
248
249	.align	7, INV_INSN
250el1_irq_sp1:
251	b	el1_irq_sp1
252	check_vector_size el1_irq_sp1
253
254	.align	7, INV_INSN
255el1_fiq_sp1:
256	b	el1_fiq_sp1
257	check_vector_size el1_fiq_sp1
258
259	.align	7, INV_INSN
260el1_serror_sp1:
261	b	el1_serror_sp1
262	check_vector_size el1_serror_sp1
263
264	/* -----------------------------------------------------
265	 * Lower EL using AArch64 : 0x400 - 0x580
266	 * -----------------------------------------------------
267	 */
268	.align	7, INV_INSN
269el0_sync_a64:
270	restore_mapping
271
272	mrs	x2, esr_el1
273	mrs	x3, sp_el0
274	lsr	x2, x2, #ESR_EC_SHIFT
275	cmp	x2, #ESR_EC_AARCH64_SVC
276	b.eq	el0_svc
277	b	el0_sync_abort
278	check_vector_size el0_sync_a64
279
280	.align	7, INV_INSN
281el0_irq_a64:
282	restore_mapping
283
284	b	elx_irq
285	check_vector_size el0_irq_a64
286
287	.align	7, INV_INSN
288el0_fiq_a64:
289	restore_mapping
290
291	b	elx_fiq
292	check_vector_size el0_fiq_a64
293
294	.align	7, INV_INSN
295el0_serror_a64:
296	b   	el0_serror_a64
297	check_vector_size el0_serror_a64
298
299	/* -----------------------------------------------------
300	 * Lower EL using AArch32 : 0x0 - 0x180
301	 * -----------------------------------------------------
302	 */
303	.align	7, INV_INSN
304el0_sync_a32:
305	restore_mapping
306
307	mrs	x2, esr_el1
308	mrs	x3, sp_el0
309	lsr	x2, x2, #ESR_EC_SHIFT
310	cmp	x2, #ESR_EC_AARCH32_SVC
311	b.eq	el0_svc
312	b	el0_sync_abort
313	check_vector_size el0_sync_a32
314
315	.align	7, INV_INSN
316el0_irq_a32:
317	restore_mapping
318
319	b	elx_irq
320	check_vector_size el0_irq_a32
321
322	.align	7, INV_INSN
323el0_fiq_a32:
324	restore_mapping
325
326	b	elx_fiq
327	check_vector_size el0_fiq_a32
328
329	.align	7, INV_INSN
330el0_serror_a32:
331	b	el0_serror_a32
332	check_vector_size el0_serror_a32
333
334#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
335	.macro invalidate_branch_predictor
336		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
337		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
338		smc	#0
339		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
340	.endm
341
342	.align	11, INV_INSN
343	.global thread_excp_vect_workaround
344thread_excp_vect_workaround:
345	/* -----------------------------------------------------
346	 * EL1 with SP0 : 0x0 - 0x180
347	 * -----------------------------------------------------
348	 */
349	.align	7, INV_INSN
350workaround_el1_sync_sp0:
351	b	el1_sync_sp0
352	check_vector_size workaround_el1_sync_sp0
353
354	.align	7, INV_INSN
355workaround_el1_irq_sp0:
356	b	el1_irq_sp0
357	check_vector_size workaround_el1_irq_sp0
358
359	.align	7, INV_INSN
360workaround_el1_fiq_sp0:
361	b	el1_fiq_sp0
362	check_vector_size workaround_el1_fiq_sp0
363
364	.align	7, INV_INSN
365workaround_el1_serror_sp0:
366	b	el1_serror_sp0
367	check_vector_size workaround_el1_serror_sp0
368
369	/* -----------------------------------------------------
370	 * Current EL with SP1: 0x200 - 0x380
371	 * -----------------------------------------------------
372	 */
373	.align	7, INV_INSN
374workaround_el1_sync_sp1:
375	b	workaround_el1_sync_sp1
376	check_vector_size workaround_el1_sync_sp1
377
378	.align	7, INV_INSN
379workaround_el1_irq_sp1:
380	b	workaround_el1_irq_sp1
381	check_vector_size workaround_el1_irq_sp1
382
383	.align	7, INV_INSN
384workaround_el1_fiq_sp1:
385	b	workaround_el1_fiq_sp1
386	check_vector_size workaround_el1_fiq_sp1
387
388	.align	7, INV_INSN
389workaround_el1_serror_sp1:
390	b	workaround_el1_serror_sp1
391	check_vector_size workaround_el1_serror_sp1
392
393	/* -----------------------------------------------------
394	 * Lower EL using AArch64 : 0x400 - 0x580
395	 * -----------------------------------------------------
396	 */
397	.align	7, INV_INSN
398workaround_el0_sync_a64:
399	invalidate_branch_predictor
400	b	el0_sync_a64
401	check_vector_size workaround_el0_sync_a64
402
403	.align	7, INV_INSN
404workaround_el0_irq_a64:
405	invalidate_branch_predictor
406	b	el0_irq_a64
407	check_vector_size workaround_el0_irq_a64
408
409	.align	7, INV_INSN
410workaround_el0_fiq_a64:
411	invalidate_branch_predictor
412	b	el0_fiq_a64
413	check_vector_size workaround_el0_fiq_a64
414
415	.align	7, INV_INSN
416workaround_el0_serror_a64:
417	b   	workaround_el0_serror_a64
418	check_vector_size workaround_el0_serror_a64
419
420	/* -----------------------------------------------------
421	 * Lower EL using AArch32 : 0x0 - 0x180
422	 * -----------------------------------------------------
423	 */
424	.align	7, INV_INSN
425workaround_el0_sync_a32:
426	invalidate_branch_predictor
427	b	el0_sync_a32
428	check_vector_size workaround_el0_sync_a32
429
430	.align	7, INV_INSN
431workaround_el0_irq_a32:
432	invalidate_branch_predictor
433	b	el0_irq_a32
434	check_vector_size workaround_el0_irq_a32
435
436	.align	7, INV_INSN
437workaround_el0_fiq_a32:
438	invalidate_branch_predictor
439	b	el0_fiq_a32
440	check_vector_size workaround_el0_fiq_a32
441
442	.align	7, INV_INSN
443workaround_el0_serror_a32:
444	b	workaround_el0_serror_a32
445	check_vector_size workaround_el0_serror_a32
446#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
447
448/*
449 * We're keeping this code in the same section as the vector to make sure
450 * that it's always available.
451 */
452eret_to_el0:
453
454#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
455	/* Point to the vector into the reduced mapping */
456	adr_l	x0, thread_user_kcode_offset
457	ldr	x0, [x0]
458	mrs	x1, vbar_el1
459	sub	x1, x1, x0
460	msr	vbar_el1, x1
461	isb
462
463#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
464	/* Store the SP offset in tpidr_el1 to be used below to update SP */
465	adr_l	x1, thread_user_kdata_sp_offset
466	ldr	x1, [x1]
467	msr	tpidr_el1, x1
468#endif
469
470	/* Jump into the reduced mapping and continue execution */
471	ldr	x1, =1f
472	sub	x1, x1, x0
473	br	x1
4741:
475
476	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
477	msr	tpidrro_el0, x0
478
479	/* Update the mapping to exclude the full kernel mapping */
480	mrs	x0, ttbr0_el1
481	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
482	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
483	msr	ttbr0_el1, x0
484	isb
485
486#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
487	/*
488	 * Update the SP with thread_user_kdata_sp_offset as described in
489	 * init_user_kcode().
490	 */
491	mrs	x0, tpidr_el1
492	sub	sp, sp, x0
493#endif
494
495	mrs	x0, tpidrro_el0
496#else
497	mrs	x0, ttbr0_el1
498	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
499	msr	ttbr0_el1, x0
500	isb
501	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
502#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
503
504	return_from_exception
505
506	/*
507	 * void icache_inv_user_range(void *addr, size_t size);
508	 *
509	 * This function has to execute with the user space ASID active,
510	 * this means executing with reduced mapping and the code needs
511	 * to be located here together with the vector.
512	 */
513	.global icache_inv_user_range
514	.type icache_inv_user_range , %function
515icache_inv_user_range:
516	/* Mask all exceptions */
517	mrs	x6, daif	/* this register must be preserved */
518	msr	daifset, #DAIFBIT_ALL
519
520#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
521	/* Point to the vector into the reduced mapping */
522	adr_l	x2, thread_user_kcode_offset
523	ldr	x2, [x2]
524	mrs	x4, vbar_el1	/* this register must be preserved */
525	sub	x3, x4, x2
526	msr	vbar_el1, x3
527	isb
528
529	/* Jump into the reduced mapping and continue execution */
530	ldr	x3, =1f
531	sub	x3, x3, x2
532	br	x3
5331:
534
535	/* Update the mapping to exclude the full kernel mapping */
536	mrs	x5, ttbr0_el1	/* this register must be preserved */
537	add	x2, x5, #CORE_MMU_L1_TBL_OFFSET
538	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
539	msr	ttbr0_el1, x2
540	isb
541
542#else
543	mrs	x5, ttbr0_el1	/* this register must be preserved */
544	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
545	msr	ttbr0_el1, x2
546	isb
547#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
548
549	/*
550	 * Do the actual icache invalidation
551	 */
552
553	/* Calculate minimum icache line size, result in x2 */
554	mrs	x3, ctr_el0
555	and	x3, x3, #CTR_IMINLINE_MASK
556	mov	x2, #CTR_WORD_SIZE
557	lsl	x2, x2, x3
558
559	add	x1, x0, x1
560	sub	x3, x2, #1
561	bic	x0, x0, x3
5621:
563	ic	ivau, x0
564	add	x0, x0, x2
565	cmp	x0, x1
566	b.lo    1b
567	dsb	ish
568
569#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
570	/* Update the mapping to use the full kernel mapping and ASID */
571	msr	ttbr0_el1, x5
572	isb
573
574	/* Jump into the full mapping and continue execution */
575	ldr	x0, =1f
576	br	x0
5771:
578
579	/* Point to the vector into the full mapping */
580	msr	vbar_el1, x4
581	isb
582#else
583	/* switch to kernel mode ASID */
584	msr	ttbr0_el1, x5
585	isb
586#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
587
588	msr	daif, x6	/* restore exceptions */
589	ret	/* End of icache_inv_user_range() */
590
591	/*
592	 * Make sure that literals are placed before the
593	 * thread_excp_vect_end label.
594	 */
595	.pool
596	.global thread_excp_vect_end
597thread_excp_vect_end:
598END_FUNC thread_excp_vect
599
600LOCAL_FUNC el0_svc , :
601	/* get pointer to current thread context in x0 */
602	get_thread_ctx sp, 0, 1, 2
603	/* load saved kernel sp */
604	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
605	/* Keep pointer to initial recod in x1 */
606	mov	x1, sp
607	/* Switch to SP_EL0 and restore kernel sp */
608	msr	spsel, #0
609	mov	x2, sp	/* Save SP_EL0 */
610	mov	sp, x0
611
612	/* Make room for struct thread_svc_regs */
613	sub	sp, sp, #THREAD_SVC_REG_SIZE
614	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
615
616	/* Restore x0-x3 */
617	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
618	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
619
620	/* Prepare the argument for the handler */
621	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
622	mrs	x0, elr_el1
623	mrs	x1, spsr_el1
624	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
625	mov	x0, sp
626
627	/*
628	 * Unmask native interrupts, Serror, and debug exceptions since we have
629	 * nothing left in sp_el1. Note that the SVC handler is excepted to
630	 * re-enable foreign interrupts by itself.
631	 */
632#if defined(CFG_ARM_GICV3)
633	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
634#else
635	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
636#endif
637
638	/* Call the handler */
639	bl	thread_svc_handler
640
641	/* Mask all maskable exceptions since we're switching back to sp_el1 */
642	msr	daifset, #DAIFBIT_ALL
643
644	/*
645	 * Save kernel sp we'll had at the beginning of this function.
646	 * This is when this TA has called another TA because
647	 * __thread_enter_user_mode() also saves the stack pointer in this
648	 * field.
649	 */
650	msr	spsel, #1
651	get_thread_ctx sp, 0, 1, 2
652	msr	spsel, #0
653	add	x1, sp, #THREAD_SVC_REG_SIZE
654	str	x1, [x0, #THREAD_CTX_KERN_SP]
655
656	/* Restore registers to the required state and return*/
657	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
658	msr	elr_el1, x0
659	msr	spsr_el1, x1
660	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
661	mov	x30, sp
662	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
663	mov	sp, x0
664	b_if_spsr_is_el0 w1, 1f
665	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
666	ldr	x30, [x30, #THREAD_SVC_REG_X30]
667
668	return_from_exception
669
6701:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
671	ldr	x30, [x30, #THREAD_SVC_REG_X30]
672
673	msr	spsel, #1
674	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
675	b	eret_to_el0
676END_FUNC el0_svc
677
678LOCAL_FUNC el1_sync_abort , :
679	mov	x0, sp
680	msr	spsel, #0
681	mov	x3, sp		/* Save original sp */
682
683	/*
684	 * Update core local flags.
685	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
686	 */
687	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
688	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
689	orr	w1, w1, #THREAD_CLF_ABORT
690	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
691			.Lsel_tmp_sp
692
693	/* Select abort stack */
694	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
695	b	.Lset_sp
696
697.Lsel_tmp_sp:
698	/* Select tmp stack */
699	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
700	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
701
702.Lset_sp:
703	mov	sp, x2
704	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
705
706	/*
707	 * Save state on stack
708	 */
709	sub	sp, sp, #THREAD_ABT_REGS_SIZE
710	mrs	x2, spsr_el1
711	/* Store spsr, sp_el0 */
712	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
713	/* Store original x0, x1 */
714	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
715	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
716	/* Store original x2, x3 and x4 to x29 */
717	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
718	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
719	/* Store x30, elr_el1 */
720	mrs	x0, elr_el1
721	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
722
723	/*
724	 * Call handler
725	 */
726	mov	x0, #0
727	mov	x1, sp
728	bl	abort_handler
729
730	/*
731	 * Restore state from stack
732	 */
733	/* Load x30, elr_el1 */
734	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
735	msr	elr_el1, x0
736	/* Load x0 to x29 */
737	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
738	/* Switch to SP_EL1 */
739	msr	spsel, #1
740	/* Save x0 to x3 in CORE_LOCAL */
741	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
742	/* Restore spsr_el1 and sp_el0 */
743	mrs	x3, sp_el0
744	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
745	msr	spsr_el1, x0
746	msr	sp_el0, x1
747
748	/* Update core local flags */
749	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
750	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
751	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
752
753	/* Restore x0 to x3 */
754	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
755
756	/* Return from exception */
757	return_from_exception
758END_FUNC el1_sync_abort
759
760	/* sp_el0 in x3 */
761LOCAL_FUNC el0_sync_abort , :
762	/*
763	 * Update core local flags
764	 */
765	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
766	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
767	orr	w1, w1, #THREAD_CLF_ABORT
768	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
769
770	/*
771	 * Save state on stack
772	 */
773
774	/* load abt_stack_va_end */
775	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
776	/* Keep pointer to initial record in x0 */
777	mov	x0, sp
778	/* Switch to SP_EL0 */
779	msr	spsel, #0
780	mov	sp, x1
781	sub	sp, sp, #THREAD_ABT_REGS_SIZE
782	mrs	x2, spsr_el1
783	/* Store spsr, sp_el0 */
784	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
785	/* Store original x0, x1 */
786	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
787	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
788	/* Store original x2, x3 and x4 to x29 */
789	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
790	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
791	/* Store x30, elr_el1 */
792	mrs	x0, elr_el1
793	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
794
795	/*
796	 * Call handler
797	 */
798	mov	x0, #0
799	mov	x1, sp
800	bl	abort_handler
801
802	/*
803	 * Restore state from stack
804	 */
805
806	/* Load x30, elr_el1 */
807	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
808	msr	elr_el1, x0
809	/* Load x0 to x29 */
810	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
811	/* Switch to SP_EL1 */
812	msr	spsel, #1
813	/* Save x0 to x3 in EL1_REC */
814	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
815	/* Restore spsr_el1 and sp_el0 */
816	mrs	x3, sp_el0
817	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
818	msr	spsr_el1, x0
819	msr	sp_el0, x1
820
821	/* Update core local flags */
822	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
823	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
824	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
825
826	/* Restore x2 to x3 */
827	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
828
829	b_if_spsr_is_el0 w0, 1f
830
831	/* Restore x0 to x1 */
832	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
833
834	/* Return from exception */
835	return_from_exception
8361:	b	eret_to_el0
837END_FUNC el0_sync_abort
838
839/* The handler of foreign interrupt. */
840.macro foreign_intr_handler mode:req
841	/*
842	 * Update core local flags
843	 */
844	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
845	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
846	orr	w1, w1, #THREAD_CLF_TMP
847	.ifc	\mode\(),fiq
848	orr	w1, w1, #THREAD_CLF_FIQ
849	.else
850	orr	w1, w1, #THREAD_CLF_IRQ
851	.endif
852	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
853
854	/* get pointer to current thread context in x0 */
855	get_thread_ctx sp, 0, 1, 2
856	/* Keep original SP_EL0 */
857	mrs	x2, sp_el0
858
859	/* Store original sp_el0 */
860	str	x2, [x0, #THREAD_CTX_REGS_SP]
861	/* store x4..x30 */
862	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
863	/* Load original x0..x3 into x10..x13 */
864	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
865	/* Save original x0..x3 */
866	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
867
868	/* load tmp_stack_va_end */
869	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
870	/* Switch to SP_EL0 */
871	msr	spsel, #0
872	mov	sp, x1
873
874#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
875	/*
876	 * Prevent leaking information about which entries has been used in
877	 * cache. We're relying on the dispatcher in TF-A to take care of
878	 * the BTB.
879	 */
880	mov	x0, #DCACHE_OP_CLEAN_INV
881	bl	dcache_op_louis
882	ic	iallu
883#endif
884	/*
885	 * Mark current thread as suspended
886	 */
887	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
888	mrs	x1, spsr_el1
889	mrs	x2, elr_el1
890	bl	thread_state_suspend
891
892	/* Update core local flags */
893	/* Switch to SP_EL1 */
894	msr	spsel, #1
895	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
896	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
897	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
898	msr	spsel, #0
899
900	/*
901	 * Note that we're exiting with SP_EL0 selected since the entry
902	 * functions expects to have SP_EL0 selected with the tmp stack
903	 * set.
904	 */
905
906	/* Passing thread index in w0 */
907	b	thread_foreign_intr_exit
908.endm
909
910/*
911 * This struct is never used from C it's only here to visualize the
912 * layout.
913 *
914 * struct elx_nintr_rec {
915 * 	uint64_t x[19 - 4]; x4..x18
916 * 	uint64_t lr;
917 * 	uint64_t sp_el0;
918 * };
919 */
920#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
921#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
922#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
923#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
924
925/* The handler of native interrupt. */
926.macro native_intr_handler mode:req
927	/*
928	 * Update core local flags
929	 */
930	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
931	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
932	.ifc	\mode\(),fiq
933	orr	w1, w1, #THREAD_CLF_FIQ
934	.else
935	orr	w1, w1, #THREAD_CLF_IRQ
936	.endif
937	orr	w1, w1, #THREAD_CLF_TMP
938	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
939
940	/* load tmp_stack_va_end */
941	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
942	/* Keep original SP_EL0 */
943	mrs	x2, sp_el0
944	/* Switch to SP_EL0 */
945	msr	spsel, #0
946	mov	sp, x1
947
948	/*
949	 * Save registers on stack that can be corrupted by a call to
950	 * a C function
951	 */
952	/* Make room for struct elx_nintr_rec */
953	sub	sp, sp, #ELX_NINTR_REC_SIZE
954	/* Store x4..x18 */
955	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
956	/* Store lr and original sp_el0 */
957	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
958
959	bl	thread_check_canaries
960	bl	itr_core_handler
961
962	/*
963	 * Restore registers
964	 */
965	/* Restore x4..x18 */
966	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
967	/* Load  lr and original sp_el0 */
968	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
969	/* Restore SP_El0 */
970	mov	sp, x2
971	/* Switch back to SP_EL1 */
972	msr	spsel, #1
973
974	/* Update core local flags */
975	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
976	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
977	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
978
979	mrs	x0, spsr_el1
980	/* Restore x2..x3 */
981	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
982	b_if_spsr_is_el0 w0, 1f
983
984	/* Restore x0..x1 */
985	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
986
987	/* Return from exception */
988	return_from_exception
9891:	b	eret_to_el0
990.endm
991
992LOCAL_FUNC elx_irq , :
993#if defined(CFG_ARM_GICV3)
994	native_intr_handler	irq
995#else
996	foreign_intr_handler	irq
997#endif
998END_FUNC elx_irq
999
1000LOCAL_FUNC elx_fiq , :
1001#if defined(CFG_ARM_GICV3)
1002	foreign_intr_handler	fiq
1003#else
1004	native_intr_handler	fiq
1005#endif
1006END_FUNC elx_fiq
1007