xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 378e007d13d178be2001da93ea8844357b45eecf)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread_defs.h>
13#include <mm/core_mmu.h>
14#include <smccc.h>
15
16#include "thread_private.h"
17
18	.macro get_thread_ctx core_local, res, tmp0, tmp1
19		ldr	w\tmp0, [\core_local, \
20				#THREAD_CORE_LOCAL_CURR_THREAD]
21		ldr	x\res, =threads
22		mov	x\tmp1, #THREAD_CTX_SIZE
23		madd	x\res, x\tmp0, x\tmp1, x\res
24	.endm
25
26	.macro b_if_spsr_is_el0 reg, label
27		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
28		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
29		b.eq	\label
30	.endm
31
32/* void thread_resume(struct thread_ctx_regs *regs) */
33FUNC thread_resume , :
34	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
35	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
36	mov	sp, x1
37	msr	elr_el1, x2
38	msr	spsr_el1, x3
39
40	b_if_spsr_is_el0 w3, 1f
41
42	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
43	ldr	x0, [x0, THREAD_CTX_REGS_X0]
44	eret
45
461:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
47	ldr	x0, [x0, THREAD_CTX_REGS_X0]
48
49	msr	spsel, #1
50	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
51	b	eret_to_el0
52END_FUNC thread_resume
53
54FUNC thread_smc , :
55	smc	#0
56	ret
57END_FUNC thread_smc
58
59FUNC thread_init_vbar , :
60	msr	vbar_el1, x0
61	ret
62END_FUNC thread_init_vbar
63KEEP_PAGER thread_init_vbar
64
65/*
66 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
67 *				     uint32_t *exit_status0,
68 *				     uint32_t *exit_status1);
69 *
70 * This function depends on being called with exceptions masked.
71 */
72FUNC __thread_enter_user_mode , :
73	/*
74	 * Create the and fill in the struct thread_user_mode_rec
75	 */
76	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
77	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
78	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
79
80	/*
81	 * Save kern sp in x19
82	 * Switch to SP_EL1
83	 */
84	mov	x19, sp
85	msr	spsel, #1
86
87	/*
88	 * Save the kernel stack pointer in the thread context
89	 */
90	/* get pointer to current thread context */
91	get_thread_ctx sp, 21, 20, 22
92	/*
93	 * Save kernel stack pointer to ensure that el0_svc() uses
94	 * correct stack pointer
95	 */
96	str	x19, [x21, #THREAD_CTX_KERN_SP]
97
98	/*
99	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
100	 */
101	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
102	msr	sp_el0, x1
103	msr	elr_el1, x2
104	msr	spsr_el1, x3
105
106	/*
107	 * Save the values for x0 and x1 in struct thread_core_local to be
108	 * restored later just before the eret.
109	 */
110	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
111	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
112
113	/* Load the rest of the general purpose registers */
114	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
115
116	/* Jump into user mode */
117	b eret_to_el0
118END_FUNC __thread_enter_user_mode
119KEEP_PAGER __thread_enter_user_mode
120
121/*
122 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
123 * 		uint32_t exit_status1);
124 * See description in thread.h
125 */
126FUNC thread_unwind_user_mode , :
127	/* Store the exit status */
128	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
129	str	w1, [x4]
130	str	w2, [x5]
131	/* Save x19..x30 */
132	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
133	/* Restore x19..x30 */
134	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
135	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
136	/* Return from the call of thread_enter_user_mode() */
137	ret
138END_FUNC thread_unwind_user_mode
139
140	/*
141	 * This macro verifies that the a given vector doesn't exceed the
142	 * architectural limit of 32 instructions. This is meant to be placed
143	 * immedately after the last instruction in the vector. It takes the
144	 * vector entry as the parameter
145	 */
146	.macro check_vector_size since
147	  .if (. - \since) > (32 * 4)
148	    .error "Vector exceeds 32 instructions"
149	  .endif
150	.endm
151
152	.macro restore_mapping
153#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
154		/* Temporarily save x0, x1 */
155		msr	tpidr_el1, x0
156		msr	tpidrro_el0, x1
157
158		/* Update the mapping to use the full kernel mapping */
159		mrs	x0, ttbr0_el1
160		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
161		/* switch to kernel mode ASID */
162		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
163		msr	ttbr0_el1, x0
164		isb
165
166		/* Jump into the full mapping and continue execution */
167		ldr	x0, =1f
168		br	x0
169	1:
170
171		/* Point to the vector into the full mapping */
172		adr	x0, thread_user_kcode_offset
173		ldr	x0, [x0]
174		mrs	x1, vbar_el1
175		add	x1, x1, x0
176		msr	vbar_el1, x1
177		isb
178
179#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
180		/*
181		 * Update the SP with thread_user_kdata_sp_offset as
182		 * described in init_user_kcode().
183		 */
184		adr	x0, thread_user_kdata_sp_offset
185		ldr	x0, [x0]
186		add	sp, sp, x0
187#endif
188
189		/* Restore x0, x1 */
190		mrs	x0, tpidr_el1
191		mrs	x1, tpidrro_el0
192		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
193#else
194		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
195		mrs	x0, ttbr0_el1
196		/* switch to kernel mode ASID */
197		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
198		msr	ttbr0_el1, x0
199		isb
200#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
201	.endm
202
203#define INV_INSN	0
204	.section .text.thread_excp_vect
205	.align	11, INV_INSN
206FUNC thread_excp_vect , :
207	/* -----------------------------------------------------
208	 * EL1 with SP0 : 0x0 - 0x180
209	 * -----------------------------------------------------
210	 */
211	.align	7, INV_INSN
212el1_sync_sp0:
213	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
214	b	el1_sync_abort
215	check_vector_size el1_sync_sp0
216
217	.align	7, INV_INSN
218el1_irq_sp0:
219	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
220	b	elx_irq
221	check_vector_size el1_irq_sp0
222
223	.align	7, INV_INSN
224el1_fiq_sp0:
225	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
226	b	elx_fiq
227	check_vector_size el1_fiq_sp0
228
229	.align	7, INV_INSN
230el1_serror_sp0:
231	b	el1_serror_sp0
232	check_vector_size el1_serror_sp0
233
234	/* -----------------------------------------------------
235	 * Current EL with SP1: 0x200 - 0x380
236	 * -----------------------------------------------------
237	 */
238	.align	7, INV_INSN
239el1_sync_sp1:
240	b	el1_sync_sp1
241	check_vector_size el1_sync_sp1
242
243	.align	7, INV_INSN
244el1_irq_sp1:
245	b	el1_irq_sp1
246	check_vector_size el1_irq_sp1
247
248	.align	7, INV_INSN
249el1_fiq_sp1:
250	b	el1_fiq_sp1
251	check_vector_size el1_fiq_sp1
252
253	.align	7, INV_INSN
254el1_serror_sp1:
255	b	el1_serror_sp1
256	check_vector_size el1_serror_sp1
257
258	/* -----------------------------------------------------
259	 * Lower EL using AArch64 : 0x400 - 0x580
260	 * -----------------------------------------------------
261	 */
262	.align	7, INV_INSN
263el0_sync_a64:
264	restore_mapping
265
266	mrs	x2, esr_el1
267	mrs	x3, sp_el0
268	lsr	x2, x2, #ESR_EC_SHIFT
269	cmp	x2, #ESR_EC_AARCH64_SVC
270	b.eq	el0_svc
271	b	el0_sync_abort
272	check_vector_size el0_sync_a64
273
274	.align	7, INV_INSN
275el0_irq_a64:
276	restore_mapping
277
278	b	elx_irq
279	check_vector_size el0_irq_a64
280
281	.align	7, INV_INSN
282el0_fiq_a64:
283	restore_mapping
284
285	b	elx_fiq
286	check_vector_size el0_fiq_a64
287
288	.align	7, INV_INSN
289el0_serror_a64:
290	b   	el0_serror_a64
291	check_vector_size el0_serror_a64
292
293	/* -----------------------------------------------------
294	 * Lower EL using AArch32 : 0x0 - 0x180
295	 * -----------------------------------------------------
296	 */
297	.align	7, INV_INSN
298el0_sync_a32:
299	restore_mapping
300
301	mrs	x2, esr_el1
302	mrs	x3, sp_el0
303	lsr	x2, x2, #ESR_EC_SHIFT
304	cmp	x2, #ESR_EC_AARCH32_SVC
305	b.eq	el0_svc
306	b	el0_sync_abort
307	check_vector_size el0_sync_a32
308
309	.align	7, INV_INSN
310el0_irq_a32:
311	restore_mapping
312
313	b	elx_irq
314	check_vector_size el0_irq_a32
315
316	.align	7, INV_INSN
317el0_fiq_a32:
318	restore_mapping
319
320	b	elx_fiq
321	check_vector_size el0_fiq_a32
322
323	.align	7, INV_INSN
324el0_serror_a32:
325	b	el0_serror_a32
326	check_vector_size el0_serror_a32
327
328#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
329	.macro invalidate_branch_predictor
330		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
331		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
332		smc	#0
333		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
334	.endm
335
336	.align	11, INV_INSN
337	.global thread_excp_vect_workaround
338thread_excp_vect_workaround:
339	/* -----------------------------------------------------
340	 * EL1 with SP0 : 0x0 - 0x180
341	 * -----------------------------------------------------
342	 */
343	.align	7, INV_INSN
344workaround_el1_sync_sp0:
345	b	el1_sync_sp0
346	check_vector_size workaround_el1_sync_sp0
347
348	.align	7, INV_INSN
349workaround_el1_irq_sp0:
350	b	el1_irq_sp0
351	check_vector_size workaround_el1_irq_sp0
352
353	.align	7, INV_INSN
354workaround_el1_fiq_sp0:
355	b	el1_fiq_sp0
356	check_vector_size workaround_el1_fiq_sp0
357
358	.align	7, INV_INSN
359workaround_el1_serror_sp0:
360	b	el1_serror_sp0
361	check_vector_size workaround_el1_serror_sp0
362
363	/* -----------------------------------------------------
364	 * Current EL with SP1: 0x200 - 0x380
365	 * -----------------------------------------------------
366	 */
367	.align	7, INV_INSN
368workaround_el1_sync_sp1:
369	b	workaround_el1_sync_sp1
370	check_vector_size workaround_el1_sync_sp1
371
372	.align	7, INV_INSN
373workaround_el1_irq_sp1:
374	b	workaround_el1_irq_sp1
375	check_vector_size workaround_el1_irq_sp1
376
377	.align	7, INV_INSN
378workaround_el1_fiq_sp1:
379	b	workaround_el1_fiq_sp1
380	check_vector_size workaround_el1_fiq_sp1
381
382	.align	7, INV_INSN
383workaround_el1_serror_sp1:
384	b	workaround_el1_serror_sp1
385	check_vector_size workaround_el1_serror_sp1
386
387	/* -----------------------------------------------------
388	 * Lower EL using AArch64 : 0x400 - 0x580
389	 * -----------------------------------------------------
390	 */
391	.align	7, INV_INSN
392workaround_el0_sync_a64:
393	invalidate_branch_predictor
394	b	el0_sync_a64
395	check_vector_size workaround_el0_sync_a64
396
397	.align	7, INV_INSN
398workaround_el0_irq_a64:
399	invalidate_branch_predictor
400	b	el0_irq_a64
401	check_vector_size workaround_el0_irq_a64
402
403	.align	7, INV_INSN
404workaround_el0_fiq_a64:
405	invalidate_branch_predictor
406	b	el0_fiq_a64
407	check_vector_size workaround_el0_fiq_a64
408
409	.align	7, INV_INSN
410workaround_el0_serror_a64:
411	b   	workaround_el0_serror_a64
412	check_vector_size workaround_el0_serror_a64
413
414	/* -----------------------------------------------------
415	 * Lower EL using AArch32 : 0x0 - 0x180
416	 * -----------------------------------------------------
417	 */
418	.align	7, INV_INSN
419workaround_el0_sync_a32:
420	invalidate_branch_predictor
421	b	el0_sync_a32
422	check_vector_size workaround_el0_sync_a32
423
424	.align	7, INV_INSN
425workaround_el0_irq_a32:
426	invalidate_branch_predictor
427	b	el0_irq_a32
428	check_vector_size workaround_el0_irq_a32
429
430	.align	7, INV_INSN
431workaround_el0_fiq_a32:
432	invalidate_branch_predictor
433	b	el0_fiq_a32
434	check_vector_size workaround_el0_fiq_a32
435
436	.align	7, INV_INSN
437workaround_el0_serror_a32:
438	b	workaround_el0_serror_a32
439	check_vector_size workaround_el0_serror_a32
440#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
441
442/*
443 * We're keeping this code in the same section as the vector to make sure
444 * that it's always available.
445 */
446eret_to_el0:
447
448#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
449	/* Point to the vector into the reduced mapping */
450	adr	x0, thread_user_kcode_offset
451	ldr	x0, [x0]
452	mrs	x1, vbar_el1
453	sub	x1, x1, x0
454	msr	vbar_el1, x1
455	isb
456
457#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
458	/* Store the SP offset in tpidr_el1 to be used below to update SP */
459	adr	x1, thread_user_kdata_sp_offset
460	ldr	x1, [x1]
461	msr	tpidr_el1, x1
462#endif
463
464	/* Jump into the reduced mapping and continue execution */
465	ldr	x1, =1f
466	sub	x1, x1, x0
467	br	x1
4681:
469
470	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
471	msr	tpidrro_el0, x0
472
473	/* Update the mapping to exclude the full kernel mapping */
474	mrs	x0, ttbr0_el1
475	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
476	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
477	msr	ttbr0_el1, x0
478	isb
479
480#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
481	/*
482	 * Update the SP with thread_user_kdata_sp_offset as described in
483	 * init_user_kcode().
484	 */
485	mrs	x0, tpidr_el1
486	sub	sp, sp, x0
487#endif
488
489	mrs	x0, tpidrro_el0
490#else
491	mrs	x0, ttbr0_el1
492	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
493	msr	ttbr0_el1, x0
494	isb
495	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
496#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
497
498	eret
499
500	/*
501	 * void icache_inv_user_range(void *addr, size_t size);
502	 *
503	 * This function has to execute with the user space ASID active,
504	 * this means executing with reduced mapping and the code needs
505	 * to be located here together with the vector.
506	 */
507	.global icache_inv_user_range
508	.type icache_inv_user_range , %function
509icache_inv_user_range:
510	/* Mask all exceptions */
511	mrs	x6, daif	/* this register must be preserved */
512	msr	daifset, #DAIFBIT_ALL
513
514#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
515	/* Point to the vector into the reduced mapping */
516	adr	x2, thread_user_kcode_offset
517	ldr	x2, [x2]
518	mrs	x4, vbar_el1	/* this register must be preserved */
519	sub	x3, x4, x2
520	msr	vbar_el1, x3
521	isb
522
523	/* Jump into the reduced mapping and continue execution */
524	ldr	x3, =1f
525	sub	x3, x3, x2
526	br	x3
5271:
528
529	/* Update the mapping to exclude the full kernel mapping */
530	mrs	x5, ttbr0_el1	/* this register must be preserved */
531	add	x2, x5, #CORE_MMU_L1_TBL_OFFSET
532	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
533	msr	ttbr0_el1, x2
534	isb
535
536#else
537	mrs	x5, ttbr0_el1	/* this register must be preserved */
538	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
539	msr	ttbr0_el1, x2
540	isb
541#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
542
543	/*
544	 * Do the actual icache invalidation
545	 */
546
547	/* Calculate minimum icache line size, result in x2 */
548	mrs	x3, ctr_el0
549	and	x3, x3, #CTR_IMINLINE_MASK
550	mov	x2, #CTR_WORD_SIZE
551	lsl	x2, x2, x3
552
553	add	x1, x0, x1
554	sub	x3, x2, #1
555	bic	x0, x0, x3
5561:
557	ic	ivau, x0
558	add	x0, x0, x2
559	cmp	x0, x1
560	b.lo    1b
561	dsb	ish
562
563#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
564	/* Update the mapping to use the full kernel mapping and ASID */
565	msr	ttbr0_el1, x5
566	isb
567
568	/* Jump into the full mapping and continue execution */
569	ldr	x0, =1f
570	br	x0
5711:
572
573	/* Point to the vector into the full mapping */
574	msr	vbar_el1, x4
575	isb
576#else
577	/* switch to kernel mode ASID */
578	msr	ttbr0_el1, x5
579	isb
580#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
581
582	msr	daif, x6	/* restore exceptions */
583	ret	/* End of icache_inv_user_range() */
584
585	/*
586	 * Make sure that literals are placed before the
587	 * thread_excp_vect_end label.
588	 */
589	.pool
590	.global thread_excp_vect_end
591thread_excp_vect_end:
592END_FUNC thread_excp_vect
593
594LOCAL_FUNC el0_svc , :
595	/* get pointer to current thread context in x0 */
596	get_thread_ctx sp, 0, 1, 2
597	/* load saved kernel sp */
598	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
599	/* Keep pointer to initial recod in x1 */
600	mov	x1, sp
601	/* Switch to SP_EL0 and restore kernel sp */
602	msr	spsel, #0
603	mov	x2, sp	/* Save SP_EL0 */
604	mov	sp, x0
605
606	/* Make room for struct thread_svc_regs */
607	sub	sp, sp, #THREAD_SVC_REG_SIZE
608	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
609
610	/* Restore x0-x3 */
611	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
612	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
613
614	/* Prepare the argument for the handler */
615	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
616	mrs	x0, elr_el1
617	mrs	x1, spsr_el1
618	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
619	mov	x0, sp
620
621	/*
622	 * Unmask native interrupts, Serror, and debug exceptions since we have
623	 * nothing left in sp_el1. Note that the SVC handler is excepted to
624	 * re-enable foreign interrupts by itself.
625	 */
626#if defined(CFG_ARM_GICV3)
627	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
628#else
629	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
630#endif
631
632	/* Call the handler */
633	bl	thread_svc_handler
634
635	/* Mask all maskable exceptions since we're switching back to sp_el1 */
636	msr	daifset, #DAIFBIT_ALL
637
638	/*
639	 * Save kernel sp we'll had at the beginning of this function.
640	 * This is when this TA has called another TA because
641	 * __thread_enter_user_mode() also saves the stack pointer in this
642	 * field.
643	 */
644	msr	spsel, #1
645	get_thread_ctx sp, 0, 1, 2
646	msr	spsel, #0
647	add	x1, sp, #THREAD_SVC_REG_SIZE
648	str	x1, [x0, #THREAD_CTX_KERN_SP]
649
650	/* Restore registers to the required state and return*/
651	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
652	msr	elr_el1, x0
653	msr	spsr_el1, x1
654	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
655	mov	x30, sp
656	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
657	mov	sp, x0
658	b_if_spsr_is_el0 w1, 1f
659	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
660	ldr	x30, [x30, #THREAD_SVC_REG_X30]
661
662	eret
663
6641:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
665	ldr	x30, [x30, #THREAD_SVC_REG_X30]
666
667	msr	spsel, #1
668	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
669	b	eret_to_el0
670END_FUNC el0_svc
671
672LOCAL_FUNC el1_sync_abort , :
673	mov	x0, sp
674	msr	spsel, #0
675	mov	x3, sp		/* Save original sp */
676
677	/*
678	 * Update core local flags.
679	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
680	 */
681	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
682	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
683	orr	w1, w1, #THREAD_CLF_ABORT
684	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
685			.Lsel_tmp_sp
686
687	/* Select abort stack */
688	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
689	b	.Lset_sp
690
691.Lsel_tmp_sp:
692	/* Select tmp stack */
693	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
694	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
695
696.Lset_sp:
697	mov	sp, x2
698	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
699
700	/*
701	 * Save state on stack
702	 */
703	sub	sp, sp, #THREAD_ABT_REGS_SIZE
704	mrs	x2, spsr_el1
705	/* Store spsr, sp_el0 */
706	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
707	/* Store original x0, x1 */
708	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
709	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
710	/* Store original x2, x3 and x4 to x29 */
711	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
712	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
713	/* Store x30, elr_el1 */
714	mrs	x0, elr_el1
715	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
716
717	/*
718	 * Call handler
719	 */
720	mov	x0, #0
721	mov	x1, sp
722	bl	abort_handler
723
724	/*
725	 * Restore state from stack
726	 */
727	/* Load x30, elr_el1 */
728	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
729	msr	elr_el1, x0
730	/* Load x0 to x29 */
731	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
732	/* Switch to SP_EL1 */
733	msr	spsel, #1
734	/* Save x0 to x3 in CORE_LOCAL */
735	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
736	/* Restore spsr_el1 and sp_el0 */
737	mrs	x3, sp_el0
738	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
739	msr	spsr_el1, x0
740	msr	sp_el0, x1
741
742	/* Update core local flags */
743	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
744	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
745	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
746
747	/* Restore x0 to x3 */
748	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
749
750	/* Return from exception */
751	eret
752END_FUNC el1_sync_abort
753
754	/* sp_el0 in x3 */
755LOCAL_FUNC el0_sync_abort , :
756	/*
757	 * Update core local flags
758	 */
759	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
760	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
761	orr	w1, w1, #THREAD_CLF_ABORT
762	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
763
764	/*
765	 * Save state on stack
766	 */
767
768	/* load abt_stack_va_end */
769	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
770	/* Keep pointer to initial record in x0 */
771	mov	x0, sp
772	/* Switch to SP_EL0 */
773	msr	spsel, #0
774	mov	sp, x1
775	sub	sp, sp, #THREAD_ABT_REGS_SIZE
776	mrs	x2, spsr_el1
777	/* Store spsr, sp_el0 */
778	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
779	/* Store original x0, x1 */
780	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
781	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
782	/* Store original x2, x3 and x4 to x29 */
783	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
784	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
785	/* Store x30, elr_el1 */
786	mrs	x0, elr_el1
787	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
788
789	/*
790	 * Call handler
791	 */
792	mov	x0, #0
793	mov	x1, sp
794	bl	abort_handler
795
796	/*
797	 * Restore state from stack
798	 */
799
800	/* Load x30, elr_el1 */
801	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
802	msr	elr_el1, x0
803	/* Load x0 to x29 */
804	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
805	/* Switch to SP_EL1 */
806	msr	spsel, #1
807	/* Save x0 to x3 in EL1_REC */
808	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
809	/* Restore spsr_el1 and sp_el0 */
810	mrs	x3, sp_el0
811	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
812	msr	spsr_el1, x0
813	msr	sp_el0, x1
814
815	/* Update core local flags */
816	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
817	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
818	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
819
820	/* Restore x2 to x3 */
821	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
822
823	b_if_spsr_is_el0 w0, 1f
824
825	/* Restore x0 to x1 */
826	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
827
828	/* Return from exception */
829	eret
8301:	b	eret_to_el0
831END_FUNC el0_sync_abort
832
833/* The handler of foreign interrupt. */
834.macro foreign_intr_handler mode:req
835	/*
836	 * Update core local flags
837	 */
838	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
839	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
840	orr	w1, w1, #THREAD_CLF_TMP
841	.ifc	\mode\(),fiq
842	orr	w1, w1, #THREAD_CLF_FIQ
843	.else
844	orr	w1, w1, #THREAD_CLF_IRQ
845	.endif
846	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
847
848	/* get pointer to current thread context in x0 */
849	get_thread_ctx sp, 0, 1, 2
850	/* Keep original SP_EL0 */
851	mrs	x2, sp_el0
852
853	/* Store original sp_el0 */
854	str	x2, [x0, #THREAD_CTX_REGS_SP]
855	/* store x4..x30 */
856	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
857	/* Load original x0..x3 into x10..x13 */
858	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
859	/* Save original x0..x3 */
860	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
861
862	/* load tmp_stack_va_end */
863	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
864	/* Switch to SP_EL0 */
865	msr	spsel, #0
866	mov	sp, x1
867
868#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
869	/*
870	 * Prevent leaking information about which entries has been used in
871	 * cache. We're relying on the dispatcher in TF-A to take care of
872	 * the BTB.
873	 */
874	mov	x0, #DCACHE_OP_CLEAN_INV
875	bl	dcache_op_louis
876	ic	iallu
877#endif
878	/*
879	 * Mark current thread as suspended
880	 */
881	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
882	mrs	x1, spsr_el1
883	mrs	x2, elr_el1
884	bl	thread_state_suspend
885
886	/* Update core local flags */
887	/* Switch to SP_EL1 */
888	msr	spsel, #1
889	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
890	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
891	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
892	msr	spsel, #0
893
894	/*
895	 * Note that we're exiting with SP_EL0 selected since the entry
896	 * functions expects to have SP_EL0 selected with the tmp stack
897	 * set.
898	 */
899
900	/* Passing thread index in w0 */
901	b	thread_foreign_intr_exit
902.endm
903
904/*
905 * This struct is never used from C it's only here to visualize the
906 * layout.
907 *
908 * struct elx_nintr_rec {
909 * 	uint64_t x[19 - 4]; x4..x18
910 * 	uint64_t lr;
911 * 	uint64_t sp_el0;
912 * };
913 */
914#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
915#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
916#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
917#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
918
919/* The handler of native interrupt. */
920.macro native_intr_handler mode:req
921	/*
922	 * Update core local flags
923	 */
924	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
925	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
926	.ifc	\mode\(),fiq
927	orr	w1, w1, #THREAD_CLF_FIQ
928	.else
929	orr	w1, w1, #THREAD_CLF_IRQ
930	.endif
931	orr	w1, w1, #THREAD_CLF_TMP
932	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
933
934	/* load tmp_stack_va_end */
935	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
936	/* Keep original SP_EL0 */
937	mrs	x2, sp_el0
938	/* Switch to SP_EL0 */
939	msr	spsel, #0
940	mov	sp, x1
941
942	/*
943	 * Save registers on stack that can be corrupted by a call to
944	 * a C function
945	 */
946	/* Make room for struct elx_nintr_rec */
947	sub	sp, sp, #ELX_NINTR_REC_SIZE
948	/* Store x4..x18 */
949	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
950	/* Store lr and original sp_el0 */
951	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
952
953	bl	thread_check_canaries
954	bl	itr_core_handler
955
956	/*
957	 * Restore registers
958	 */
959	/* Restore x4..x18 */
960	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
961	/* Load  lr and original sp_el0 */
962	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
963	/* Restore SP_El0 */
964	mov	sp, x2
965	/* Switch back to SP_EL1 */
966	msr	spsel, #1
967
968	/* Update core local flags */
969	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
970	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
971	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
972
973	mrs	x0, spsr_el1
974	/* Restore x2..x3 */
975	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
976	b_if_spsr_is_el0 w0, 1f
977
978	/* Restore x0..x1 */
979	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
980
981	/* Return from exception */
982	eret
9831:	b	eret_to_el0
984.endm
985
986LOCAL_FUNC elx_irq , :
987#if defined(CFG_ARM_GICV3)
988	native_intr_handler	irq
989#else
990	foreign_intr_handler	irq
991#endif
992END_FUNC elx_irq
993
994LOCAL_FUNC elx_fiq , :
995#if defined(CFG_ARM_GICV3)
996	foreign_intr_handler	fiq
997#else
998	native_intr_handler	fiq
999#endif
1000END_FUNC elx_fiq
1001