xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision df24e6517b6454cf906c16979ea0e7546c5c99d5)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread_defs.h>
13#include <mm/core_mmu.h>
14#include <smccc.h>
15
16#include "thread_private.h"
17
18	.macro get_thread_ctx core_local, res, tmp0, tmp1
19		ldr	w\tmp0, [\core_local, \
20				#THREAD_CORE_LOCAL_CURR_THREAD]
21		ldr	x\res, =threads
22		mov	x\tmp1, #THREAD_CTX_SIZE
23		madd	x\res, x\tmp0, x\tmp1, x\res
24	.endm
25
26	.macro return_from_exception
27		eret
28		/* Guard against speculation past ERET */
29		dsb nsh
30		isb
31	.endm
32
33	.macro b_if_spsr_is_el0 reg, label
34		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
35		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
36		b.eq	\label
37	.endm
38
39/* void thread_resume(struct thread_ctx_regs *regs) */
40FUNC thread_resume , :
41	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
42	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
43	mov	sp, x1
44	msr	elr_el1, x2
45	msr	spsr_el1, x3
46
47	b_if_spsr_is_el0 w3, 1f
48
49	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
50	ldr	x0, [x0, THREAD_CTX_REGS_X0]
51	return_from_exception
52
531:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
54	ldr	x0, [x0, THREAD_CTX_REGS_X0]
55
56	msr	spsel, #1
57	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
58	b	eret_to_el0
59END_FUNC thread_resume
60
61FUNC thread_smc , :
62	smc	#0
63	ret
64END_FUNC thread_smc
65
66FUNC thread_init_vbar , :
67	msr	vbar_el1, x0
68	ret
69END_FUNC thread_init_vbar
70KEEP_PAGER thread_init_vbar
71
72/*
73 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
74 *				     uint32_t *exit_status0,
75 *				     uint32_t *exit_status1);
76 *
77 * This function depends on being called with exceptions masked.
78 */
79FUNC __thread_enter_user_mode , :
80	/*
81	 * Create the and fill in the struct thread_user_mode_rec
82	 */
83	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
84	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
85	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
86
87	/*
88	 * Save kern sp in x19
89	 * Switch to SP_EL1
90	 */
91	mov	x19, sp
92	msr	spsel, #1
93
94	/*
95	 * Save the kernel stack pointer in the thread context
96	 */
97	/* get pointer to current thread context */
98	get_thread_ctx sp, 21, 20, 22
99	/*
100	 * Save kernel stack pointer to ensure that el0_svc() uses
101	 * correct stack pointer
102	 */
103	str	x19, [x21, #THREAD_CTX_KERN_SP]
104
105	/*
106	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
107	 */
108	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
109	msr	sp_el0, x1
110	msr	elr_el1, x2
111	msr	spsr_el1, x3
112
113	/*
114	 * Save the values for x0 and x1 in struct thread_core_local to be
115	 * restored later just before the eret.
116	 */
117	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
118	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
119
120	/* Load the rest of the general purpose registers */
121	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
122
123	/* Jump into user mode */
124	b eret_to_el0
125END_FUNC __thread_enter_user_mode
126KEEP_PAGER __thread_enter_user_mode
127
128/*
129 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
130 * 		uint32_t exit_status1);
131 * See description in thread.h
132 */
133FUNC thread_unwind_user_mode , :
134	/* Store the exit status */
135	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
136	str	w1, [x4]
137	str	w2, [x5]
138	/* Save x19..x30 */
139	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
140	/* Restore x19..x30 */
141	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
142	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
143	/* Return from the call of thread_enter_user_mode() */
144	ret
145END_FUNC thread_unwind_user_mode
146
147	/*
148	 * This macro verifies that the a given vector doesn't exceed the
149	 * architectural limit of 32 instructions. This is meant to be placed
150	 * immedately after the last instruction in the vector. It takes the
151	 * vector entry as the parameter
152	 */
153	.macro check_vector_size since
154	  .if (. - \since) > (32 * 4)
155	    .error "Vector exceeds 32 instructions"
156	  .endif
157	.endm
158
159	.macro restore_mapping
160#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
161		/* Temporarily save x0, x1 */
162		msr	tpidr_el1, x0
163		msr	tpidrro_el0, x1
164
165		/* Update the mapping to use the full kernel mapping */
166		mrs	x0, ttbr0_el1
167		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
168		/* switch to kernel mode ASID */
169		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
170		msr	ttbr0_el1, x0
171		isb
172
173		/* Jump into the full mapping and continue execution */
174		ldr	x0, =1f
175		br	x0
176	1:
177
178		/* Point to the vector into the full mapping */
179		adr	x0, thread_user_kcode_offset
180		ldr	x0, [x0]
181		mrs	x1, vbar_el1
182		add	x1, x1, x0
183		msr	vbar_el1, x1
184		isb
185
186#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
187		/*
188		 * Update the SP with thread_user_kdata_sp_offset as
189		 * described in init_user_kcode().
190		 */
191		adr	x0, thread_user_kdata_sp_offset
192		ldr	x0, [x0]
193		add	sp, sp, x0
194#endif
195
196		/* Restore x0, x1 */
197		mrs	x0, tpidr_el1
198		mrs	x1, tpidrro_el0
199		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
200#else
201		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
202		mrs	x0, ttbr0_el1
203		/* switch to kernel mode ASID */
204		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
205		msr	ttbr0_el1, x0
206		isb
207#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
208	.endm
209
210#define INV_INSN	0
211	.section .text.thread_excp_vect
212	.align	11, INV_INSN
213FUNC thread_excp_vect , :
214	/* -----------------------------------------------------
215	 * EL1 with SP0 : 0x0 - 0x180
216	 * -----------------------------------------------------
217	 */
218	.align	7, INV_INSN
219el1_sync_sp0:
220	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
221	b	el1_sync_abort
222	check_vector_size el1_sync_sp0
223
224	.align	7, INV_INSN
225el1_irq_sp0:
226	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
227	b	elx_irq
228	check_vector_size el1_irq_sp0
229
230	.align	7, INV_INSN
231el1_fiq_sp0:
232	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
233	b	elx_fiq
234	check_vector_size el1_fiq_sp0
235
236	.align	7, INV_INSN
237el1_serror_sp0:
238	b	el1_serror_sp0
239	check_vector_size el1_serror_sp0
240
241	/* -----------------------------------------------------
242	 * Current EL with SP1: 0x200 - 0x380
243	 * -----------------------------------------------------
244	 */
245	.align	7, INV_INSN
246el1_sync_sp1:
247	b	el1_sync_sp1
248	check_vector_size el1_sync_sp1
249
250	.align	7, INV_INSN
251el1_irq_sp1:
252	b	el1_irq_sp1
253	check_vector_size el1_irq_sp1
254
255	.align	7, INV_INSN
256el1_fiq_sp1:
257	b	el1_fiq_sp1
258	check_vector_size el1_fiq_sp1
259
260	.align	7, INV_INSN
261el1_serror_sp1:
262	b	el1_serror_sp1
263	check_vector_size el1_serror_sp1
264
265	/* -----------------------------------------------------
266	 * Lower EL using AArch64 : 0x400 - 0x580
267	 * -----------------------------------------------------
268	 */
269	.align	7, INV_INSN
270el0_sync_a64:
271	restore_mapping
272
273	mrs	x2, esr_el1
274	mrs	x3, sp_el0
275	lsr	x2, x2, #ESR_EC_SHIFT
276	cmp	x2, #ESR_EC_AARCH64_SVC
277	b.eq	el0_svc
278	b	el0_sync_abort
279	check_vector_size el0_sync_a64
280
281	.align	7, INV_INSN
282el0_irq_a64:
283	restore_mapping
284
285	b	elx_irq
286	check_vector_size el0_irq_a64
287
288	.align	7, INV_INSN
289el0_fiq_a64:
290	restore_mapping
291
292	b	elx_fiq
293	check_vector_size el0_fiq_a64
294
295	.align	7, INV_INSN
296el0_serror_a64:
297	b   	el0_serror_a64
298	check_vector_size el0_serror_a64
299
300	/* -----------------------------------------------------
301	 * Lower EL using AArch32 : 0x0 - 0x180
302	 * -----------------------------------------------------
303	 */
304	.align	7, INV_INSN
305el0_sync_a32:
306	restore_mapping
307
308	mrs	x2, esr_el1
309	mrs	x3, sp_el0
310	lsr	x2, x2, #ESR_EC_SHIFT
311	cmp	x2, #ESR_EC_AARCH32_SVC
312	b.eq	el0_svc
313	b	el0_sync_abort
314	check_vector_size el0_sync_a32
315
316	.align	7, INV_INSN
317el0_irq_a32:
318	restore_mapping
319
320	b	elx_irq
321	check_vector_size el0_irq_a32
322
323	.align	7, INV_INSN
324el0_fiq_a32:
325	restore_mapping
326
327	b	elx_fiq
328	check_vector_size el0_fiq_a32
329
330	.align	7, INV_INSN
331el0_serror_a32:
332	b	el0_serror_a32
333	check_vector_size el0_serror_a32
334
335#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
336	.macro invalidate_branch_predictor
337		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
338		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
339		smc	#0
340		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
341	.endm
342
343	.align	11, INV_INSN
344	.global thread_excp_vect_workaround
345thread_excp_vect_workaround:
346	/* -----------------------------------------------------
347	 * EL1 with SP0 : 0x0 - 0x180
348	 * -----------------------------------------------------
349	 */
350	.align	7, INV_INSN
351workaround_el1_sync_sp0:
352	b	el1_sync_sp0
353	check_vector_size workaround_el1_sync_sp0
354
355	.align	7, INV_INSN
356workaround_el1_irq_sp0:
357	b	el1_irq_sp0
358	check_vector_size workaround_el1_irq_sp0
359
360	.align	7, INV_INSN
361workaround_el1_fiq_sp0:
362	b	el1_fiq_sp0
363	check_vector_size workaround_el1_fiq_sp0
364
365	.align	7, INV_INSN
366workaround_el1_serror_sp0:
367	b	el1_serror_sp0
368	check_vector_size workaround_el1_serror_sp0
369
370	/* -----------------------------------------------------
371	 * Current EL with SP1: 0x200 - 0x380
372	 * -----------------------------------------------------
373	 */
374	.align	7, INV_INSN
375workaround_el1_sync_sp1:
376	b	workaround_el1_sync_sp1
377	check_vector_size workaround_el1_sync_sp1
378
379	.align	7, INV_INSN
380workaround_el1_irq_sp1:
381	b	workaround_el1_irq_sp1
382	check_vector_size workaround_el1_irq_sp1
383
384	.align	7, INV_INSN
385workaround_el1_fiq_sp1:
386	b	workaround_el1_fiq_sp1
387	check_vector_size workaround_el1_fiq_sp1
388
389	.align	7, INV_INSN
390workaround_el1_serror_sp1:
391	b	workaround_el1_serror_sp1
392	check_vector_size workaround_el1_serror_sp1
393
394	/* -----------------------------------------------------
395	 * Lower EL using AArch64 : 0x400 - 0x580
396	 * -----------------------------------------------------
397	 */
398	.align	7, INV_INSN
399workaround_el0_sync_a64:
400	invalidate_branch_predictor
401	b	el0_sync_a64
402	check_vector_size workaround_el0_sync_a64
403
404	.align	7, INV_INSN
405workaround_el0_irq_a64:
406	invalidate_branch_predictor
407	b	el0_irq_a64
408	check_vector_size workaround_el0_irq_a64
409
410	.align	7, INV_INSN
411workaround_el0_fiq_a64:
412	invalidate_branch_predictor
413	b	el0_fiq_a64
414	check_vector_size workaround_el0_fiq_a64
415
416	.align	7, INV_INSN
417workaround_el0_serror_a64:
418	b   	workaround_el0_serror_a64
419	check_vector_size workaround_el0_serror_a64
420
421	/* -----------------------------------------------------
422	 * Lower EL using AArch32 : 0x0 - 0x180
423	 * -----------------------------------------------------
424	 */
425	.align	7, INV_INSN
426workaround_el0_sync_a32:
427	invalidate_branch_predictor
428	b	el0_sync_a32
429	check_vector_size workaround_el0_sync_a32
430
431	.align	7, INV_INSN
432workaround_el0_irq_a32:
433	invalidate_branch_predictor
434	b	el0_irq_a32
435	check_vector_size workaround_el0_irq_a32
436
437	.align	7, INV_INSN
438workaround_el0_fiq_a32:
439	invalidate_branch_predictor
440	b	el0_fiq_a32
441	check_vector_size workaround_el0_fiq_a32
442
443	.align	7, INV_INSN
444workaround_el0_serror_a32:
445	b	workaround_el0_serror_a32
446	check_vector_size workaround_el0_serror_a32
447#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
448
449/*
450 * We're keeping this code in the same section as the vector to make sure
451 * that it's always available.
452 */
453eret_to_el0:
454
455#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
456	/* Point to the vector into the reduced mapping */
457	adr	x0, thread_user_kcode_offset
458	ldr	x0, [x0]
459	mrs	x1, vbar_el1
460	sub	x1, x1, x0
461	msr	vbar_el1, x1
462	isb
463
464#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
465	/* Store the SP offset in tpidr_el1 to be used below to update SP */
466	adr	x1, thread_user_kdata_sp_offset
467	ldr	x1, [x1]
468	msr	tpidr_el1, x1
469#endif
470
471	/* Jump into the reduced mapping and continue execution */
472	ldr	x1, =1f
473	sub	x1, x1, x0
474	br	x1
4751:
476
477	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
478	msr	tpidrro_el0, x0
479
480	/* Update the mapping to exclude the full kernel mapping */
481	mrs	x0, ttbr0_el1
482	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
483	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
484	msr	ttbr0_el1, x0
485	isb
486
487#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
488	/*
489	 * Update the SP with thread_user_kdata_sp_offset as described in
490	 * init_user_kcode().
491	 */
492	mrs	x0, tpidr_el1
493	sub	sp, sp, x0
494#endif
495
496	mrs	x0, tpidrro_el0
497#else
498	mrs	x0, ttbr0_el1
499	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
500	msr	ttbr0_el1, x0
501	isb
502	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
503#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
504
505	return_from_exception
506
507	/*
508	 * void icache_inv_user_range(void *addr, size_t size);
509	 *
510	 * This function has to execute with the user space ASID active,
511	 * this means executing with reduced mapping and the code needs
512	 * to be located here together with the vector.
513	 */
514	.global icache_inv_user_range
515	.type icache_inv_user_range , %function
516icache_inv_user_range:
517	/* Mask all exceptions */
518	mrs	x6, daif	/* this register must be preserved */
519	msr	daifset, #DAIFBIT_ALL
520
521#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
522	/* Point to the vector into the reduced mapping */
523	adr	x2, thread_user_kcode_offset
524	ldr	x2, [x2]
525	mrs	x4, vbar_el1	/* this register must be preserved */
526	sub	x3, x4, x2
527	msr	vbar_el1, x3
528	isb
529
530	/* Jump into the reduced mapping and continue execution */
531	ldr	x3, =1f
532	sub	x3, x3, x2
533	br	x3
5341:
535
536	/* Update the mapping to exclude the full kernel mapping */
537	mrs	x5, ttbr0_el1	/* this register must be preserved */
538	add	x2, x5, #CORE_MMU_L1_TBL_OFFSET
539	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
540	msr	ttbr0_el1, x2
541	isb
542
543#else
544	mrs	x5, ttbr0_el1	/* this register must be preserved */
545	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
546	msr	ttbr0_el1, x2
547	isb
548#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
549
550	/*
551	 * Do the actual icache invalidation
552	 */
553
554	/* Calculate minimum icache line size, result in x2 */
555	mrs	x3, ctr_el0
556	and	x3, x3, #CTR_IMINLINE_MASK
557	mov	x2, #CTR_WORD_SIZE
558	lsl	x2, x2, x3
559
560	add	x1, x0, x1
561	sub	x3, x2, #1
562	bic	x0, x0, x3
5631:
564	ic	ivau, x0
565	add	x0, x0, x2
566	cmp	x0, x1
567	b.lo    1b
568	dsb	ish
569
570#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
571	/* Update the mapping to use the full kernel mapping and ASID */
572	msr	ttbr0_el1, x5
573	isb
574
575	/* Jump into the full mapping and continue execution */
576	ldr	x0, =1f
577	br	x0
5781:
579
580	/* Point to the vector into the full mapping */
581	msr	vbar_el1, x4
582	isb
583#else
584	/* switch to kernel mode ASID */
585	msr	ttbr0_el1, x5
586	isb
587#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
588
589	msr	daif, x6	/* restore exceptions */
590	ret	/* End of icache_inv_user_range() */
591
592	/*
593	 * Make sure that literals are placed before the
594	 * thread_excp_vect_end label.
595	 */
596	.pool
597	.global thread_excp_vect_end
598thread_excp_vect_end:
599END_FUNC thread_excp_vect
600
601LOCAL_FUNC el0_svc , :
602	/* get pointer to current thread context in x0 */
603	get_thread_ctx sp, 0, 1, 2
604	/* load saved kernel sp */
605	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
606	/* Keep pointer to initial recod in x1 */
607	mov	x1, sp
608	/* Switch to SP_EL0 and restore kernel sp */
609	msr	spsel, #0
610	mov	x2, sp	/* Save SP_EL0 */
611	mov	sp, x0
612
613	/* Make room for struct thread_svc_regs */
614	sub	sp, sp, #THREAD_SVC_REG_SIZE
615	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
616
617	/* Restore x0-x3 */
618	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
619	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
620
621	/* Prepare the argument for the handler */
622	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
623	mrs	x0, elr_el1
624	mrs	x1, spsr_el1
625	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
626	mov	x0, sp
627
628	/*
629	 * Unmask native interrupts, Serror, and debug exceptions since we have
630	 * nothing left in sp_el1. Note that the SVC handler is excepted to
631	 * re-enable foreign interrupts by itself.
632	 */
633#if defined(CFG_ARM_GICV3)
634	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
635#else
636	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
637#endif
638
639	/* Call the handler */
640	bl	thread_svc_handler
641
642	/* Mask all maskable exceptions since we're switching back to sp_el1 */
643	msr	daifset, #DAIFBIT_ALL
644
645	/*
646	 * Save kernel sp we'll had at the beginning of this function.
647	 * This is when this TA has called another TA because
648	 * __thread_enter_user_mode() also saves the stack pointer in this
649	 * field.
650	 */
651	msr	spsel, #1
652	get_thread_ctx sp, 0, 1, 2
653	msr	spsel, #0
654	add	x1, sp, #THREAD_SVC_REG_SIZE
655	str	x1, [x0, #THREAD_CTX_KERN_SP]
656
657	/* Restore registers to the required state and return*/
658	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
659	msr	elr_el1, x0
660	msr	spsr_el1, x1
661	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
662	mov	x30, sp
663	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
664	mov	sp, x0
665	b_if_spsr_is_el0 w1, 1f
666	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
667	ldr	x30, [x30, #THREAD_SVC_REG_X30]
668
669	return_from_exception
670
6711:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
672	ldr	x30, [x30, #THREAD_SVC_REG_X30]
673
674	msr	spsel, #1
675	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
676	b	eret_to_el0
677END_FUNC el0_svc
678
679LOCAL_FUNC el1_sync_abort , :
680	mov	x0, sp
681	msr	spsel, #0
682	mov	x3, sp		/* Save original sp */
683
684	/*
685	 * Update core local flags.
686	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
687	 */
688	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
689	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
690	orr	w1, w1, #THREAD_CLF_ABORT
691	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
692			.Lsel_tmp_sp
693
694	/* Select abort stack */
695	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
696	b	.Lset_sp
697
698.Lsel_tmp_sp:
699	/* Select tmp stack */
700	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
701	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
702
703.Lset_sp:
704	mov	sp, x2
705	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
706
707	/*
708	 * Save state on stack
709	 */
710	sub	sp, sp, #THREAD_ABT_REGS_SIZE
711	mrs	x2, spsr_el1
712	/* Store spsr, sp_el0 */
713	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
714	/* Store original x0, x1 */
715	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
716	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
717	/* Store original x2, x3 and x4 to x29 */
718	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
719	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
720	/* Store x30, elr_el1 */
721	mrs	x0, elr_el1
722	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
723
724	/*
725	 * Call handler
726	 */
727	mov	x0, #0
728	mov	x1, sp
729	bl	abort_handler
730
731	/*
732	 * Restore state from stack
733	 */
734	/* Load x30, elr_el1 */
735	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
736	msr	elr_el1, x0
737	/* Load x0 to x29 */
738	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
739	/* Switch to SP_EL1 */
740	msr	spsel, #1
741	/* Save x0 to x3 in CORE_LOCAL */
742	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
743	/* Restore spsr_el1 and sp_el0 */
744	mrs	x3, sp_el0
745	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
746	msr	spsr_el1, x0
747	msr	sp_el0, x1
748
749	/* Update core local flags */
750	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
751	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
752	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
753
754	/* Restore x0 to x3 */
755	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
756
757	/* Return from exception */
758	return_from_exception
759END_FUNC el1_sync_abort
760
761	/* sp_el0 in x3 */
762LOCAL_FUNC el0_sync_abort , :
763	/*
764	 * Update core local flags
765	 */
766	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
767	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
768	orr	w1, w1, #THREAD_CLF_ABORT
769	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
770
771	/*
772	 * Save state on stack
773	 */
774
775	/* load abt_stack_va_end */
776	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
777	/* Keep pointer to initial record in x0 */
778	mov	x0, sp
779	/* Switch to SP_EL0 */
780	msr	spsel, #0
781	mov	sp, x1
782	sub	sp, sp, #THREAD_ABT_REGS_SIZE
783	mrs	x2, spsr_el1
784	/* Store spsr, sp_el0 */
785	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
786	/* Store original x0, x1 */
787	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
788	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
789	/* Store original x2, x3 and x4 to x29 */
790	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
791	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
792	/* Store x30, elr_el1 */
793	mrs	x0, elr_el1
794	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
795
796	/*
797	 * Call handler
798	 */
799	mov	x0, #0
800	mov	x1, sp
801	bl	abort_handler
802
803	/*
804	 * Restore state from stack
805	 */
806
807	/* Load x30, elr_el1 */
808	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
809	msr	elr_el1, x0
810	/* Load x0 to x29 */
811	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
812	/* Switch to SP_EL1 */
813	msr	spsel, #1
814	/* Save x0 to x3 in EL1_REC */
815	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
816	/* Restore spsr_el1 and sp_el0 */
817	mrs	x3, sp_el0
818	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
819	msr	spsr_el1, x0
820	msr	sp_el0, x1
821
822	/* Update core local flags */
823	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
824	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
825	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
826
827	/* Restore x2 to x3 */
828	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
829
830	b_if_spsr_is_el0 w0, 1f
831
832	/* Restore x0 to x1 */
833	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
834
835	/* Return from exception */
836	return_from_exception
8371:	b	eret_to_el0
838END_FUNC el0_sync_abort
839
840/* The handler of foreign interrupt. */
841.macro foreign_intr_handler mode:req
842	/*
843	 * Update core local flags
844	 */
845	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
846	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
847	orr	w1, w1, #THREAD_CLF_TMP
848	.ifc	\mode\(),fiq
849	orr	w1, w1, #THREAD_CLF_FIQ
850	.else
851	orr	w1, w1, #THREAD_CLF_IRQ
852	.endif
853	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
854
855	/* get pointer to current thread context in x0 */
856	get_thread_ctx sp, 0, 1, 2
857	/* Keep original SP_EL0 */
858	mrs	x2, sp_el0
859
860	/* Store original sp_el0 */
861	str	x2, [x0, #THREAD_CTX_REGS_SP]
862	/* store x4..x30 */
863	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
864	/* Load original x0..x3 into x10..x13 */
865	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
866	/* Save original x0..x3 */
867	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
868
869	/* load tmp_stack_va_end */
870	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
871	/* Switch to SP_EL0 */
872	msr	spsel, #0
873	mov	sp, x1
874
875#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
876	/*
877	 * Prevent leaking information about which entries has been used in
878	 * cache. We're relying on the dispatcher in TF-A to take care of
879	 * the BTB.
880	 */
881	mov	x0, #DCACHE_OP_CLEAN_INV
882	bl	dcache_op_louis
883	ic	iallu
884#endif
885	/*
886	 * Mark current thread as suspended
887	 */
888	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
889	mrs	x1, spsr_el1
890	mrs	x2, elr_el1
891	bl	thread_state_suspend
892
893	/* Update core local flags */
894	/* Switch to SP_EL1 */
895	msr	spsel, #1
896	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
897	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
898	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
899	msr	spsel, #0
900
901	/*
902	 * Note that we're exiting with SP_EL0 selected since the entry
903	 * functions expects to have SP_EL0 selected with the tmp stack
904	 * set.
905	 */
906
907	/* Passing thread index in w0 */
908	b	thread_foreign_intr_exit
909.endm
910
911/*
912 * This struct is never used from C it's only here to visualize the
913 * layout.
914 *
915 * struct elx_nintr_rec {
916 * 	uint64_t x[19 - 4]; x4..x18
917 * 	uint64_t lr;
918 * 	uint64_t sp_el0;
919 * };
920 */
921#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
922#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
923#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
924#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
925
926/* The handler of native interrupt. */
927.macro native_intr_handler mode:req
928	/*
929	 * Update core local flags
930	 */
931	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
932	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
933	.ifc	\mode\(),fiq
934	orr	w1, w1, #THREAD_CLF_FIQ
935	.else
936	orr	w1, w1, #THREAD_CLF_IRQ
937	.endif
938	orr	w1, w1, #THREAD_CLF_TMP
939	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
940
941	/* load tmp_stack_va_end */
942	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
943	/* Keep original SP_EL0 */
944	mrs	x2, sp_el0
945	/* Switch to SP_EL0 */
946	msr	spsel, #0
947	mov	sp, x1
948
949	/*
950	 * Save registers on stack that can be corrupted by a call to
951	 * a C function
952	 */
953	/* Make room for struct elx_nintr_rec */
954	sub	sp, sp, #ELX_NINTR_REC_SIZE
955	/* Store x4..x18 */
956	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
957	/* Store lr and original sp_el0 */
958	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
959
960	bl	thread_check_canaries
961	bl	itr_core_handler
962
963	/*
964	 * Restore registers
965	 */
966	/* Restore x4..x18 */
967	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
968	/* Load  lr and original sp_el0 */
969	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
970	/* Restore SP_El0 */
971	mov	sp, x2
972	/* Switch back to SP_EL1 */
973	msr	spsel, #1
974
975	/* Update core local flags */
976	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
977	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
978	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
979
980	mrs	x0, spsr_el1
981	/* Restore x2..x3 */
982	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
983	b_if_spsr_is_el0 w0, 1f
984
985	/* Restore x0..x1 */
986	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
987
988	/* Return from exception */
989	return_from_exception
9901:	b	eret_to_el0
991.endm
992
993LOCAL_FUNC elx_irq , :
994#if defined(CFG_ARM_GICV3)
995	native_intr_handler	irq
996#else
997	foreign_intr_handler	irq
998#endif
999END_FUNC elx_irq
1000
1001LOCAL_FUNC elx_fiq , :
1002#if defined(CFG_ARM_GICV3)
1003	foreign_intr_handler	fiq
1004#else
1005	native_intr_handler	fiq
1006#endif
1007END_FUNC elx_fiq
1008