xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 4af447d4084e293800d4e463d65003c016b91f29)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2020, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread_defs.h>
13#include <mm/core_mmu.h>
14#include <smccc.h>
15
16#include "thread_private.h"
17
18	.macro get_thread_ctx core_local, res, tmp0, tmp1
19		ldrh	w\tmp0, [\core_local, \
20				#THREAD_CORE_LOCAL_CURR_THREAD]
21		ldr	x\res, =threads
22		mov	x\tmp1, #THREAD_CTX_SIZE
23		madd	x\res, x\tmp0, x\tmp1, x\res
24	.endm
25
26	.macro return_from_exception
27		eret
28		/* Guard against speculation past ERET */
29		dsb nsh
30		isb
31	.endm
32
33	.macro b_if_spsr_is_el0 reg, label
34		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
35		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
36		b.eq	\label
37	.endm
38
39/* void thread_resume(struct thread_ctx_regs *regs) */
40FUNC thread_resume , :
41	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
42	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
43	mov	sp, x1
44	msr	elr_el1, x2
45	msr	spsr_el1, x3
46	ldr	x1, [x0, THREAD_CTX_REGS_TPIDR_EL0]
47	msr	tpidr_el0, x1
48
49	b_if_spsr_is_el0 w3, 1f
50
51	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
52	ldr	x0, [x0, THREAD_CTX_REGS_X0]
53	return_from_exception
54
551:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
56	ldr	x0, [x0, THREAD_CTX_REGS_X0]
57
58	msr	spsel, #1
59	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
60	b	eret_to_el0
61END_FUNC thread_resume
62
63FUNC thread_smc , :
64	smc	#0
65	ret
66END_FUNC thread_smc
67
68/* void thread_smccc(struct thread_smc_args *arg_res) */
69FUNC thread_smccc , :
70	push	x0, xzr
71	mov	x8, x0
72	load_xregs x8, 0, 0, 7
73#ifdef CFG_CORE_SEL2_SPMC
74	hvc	#0
75#else
76	smc	#0
77#endif
78	pop	x8, xzr
79	store_xregs x8, 0, 0, 7
80	ret
81END_FUNC thread_smccc
82
83FUNC thread_init_vbar , :
84	msr	vbar_el1, x0
85	ret
86END_FUNC thread_init_vbar
87DECLARE_KEEP_PAGER thread_init_vbar
88
89/*
90 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
91 *				     uint32_t *exit_status0,
92 *				     uint32_t *exit_status1);
93 *
94 * This function depends on being called with exceptions masked.
95 */
96FUNC __thread_enter_user_mode , :
97	/*
98	 * Create the and fill in the struct thread_user_mode_rec
99	 */
100	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
101	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
102	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
103
104	/*
105	 * Save kern sp in x19
106	 * Switch to SP_EL1
107	 */
108	mov	x19, sp
109	msr	spsel, #1
110
111	/*
112	 * Save the kernel stack pointer in the thread context
113	 */
114	/* get pointer to current thread context */
115	get_thread_ctx sp, 21, 20, 22
116	/*
117	 * Save kernel stack pointer to ensure that el0_svc() uses
118	 * correct stack pointer
119	 */
120	str	x19, [x21, #THREAD_CTX_KERN_SP]
121
122	/*
123	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
124	 */
125	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
126	msr	sp_el0, x1
127	msr	elr_el1, x2
128	msr	spsr_el1, x3
129
130	/*
131	 * Save the values for x0 and x1 in struct thread_core_local to be
132	 * restored later just before the eret.
133	 */
134	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
135	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
136
137	/* Load the rest of the general purpose registers */
138	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
139
140	/* Jump into user mode */
141	b eret_to_el0
142END_FUNC __thread_enter_user_mode
143DECLARE_KEEP_PAGER __thread_enter_user_mode
144
145/*
146 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
147 * 		uint32_t exit_status1);
148 * See description in thread.h
149 */
150FUNC thread_unwind_user_mode , :
151	/* Store the exit status */
152	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
153	str	w1, [x4]
154	str	w2, [x5]
155	/* Save x19..x30 */
156	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
157	/* Restore x19..x30 */
158	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
159	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
160	/* Return from the call of thread_enter_user_mode() */
161	ret
162END_FUNC thread_unwind_user_mode
163
164	/*
165	 * This macro verifies that the a given vector doesn't exceed the
166	 * architectural limit of 32 instructions. This is meant to be placed
167	 * immedately after the last instruction in the vector. It takes the
168	 * vector entry as the parameter
169	 */
170	.macro check_vector_size since
171	  .if (. - \since) > (32 * 4)
172	    .error "Vector exceeds 32 instructions"
173	  .endif
174	.endm
175
176	.macro restore_mapping
177#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
178		/* Temporarily save x0, x1 */
179		msr	tpidr_el1, x0
180		msr	tpidrro_el0, x1
181
182		/* Update the mapping to use the full kernel mapping */
183		mrs	x0, ttbr0_el1
184		sub	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
185		/* switch to kernel mode ASID */
186		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
187		msr	ttbr0_el1, x0
188		isb
189
190		/* Jump into the full mapping and continue execution */
191		ldr	x0, =1f
192		br	x0
193	1:
194
195		/* Point to the vector into the full mapping */
196		adr_l	x0, thread_user_kcode_offset
197		ldr	x0, [x0]
198		mrs	x1, vbar_el1
199		add	x1, x1, x0
200		msr	vbar_el1, x1
201		isb
202
203#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
204		/*
205		 * Update the SP with thread_user_kdata_sp_offset as
206		 * described in init_user_kcode().
207		 */
208		adr_l	x0, thread_user_kdata_sp_offset
209		ldr	x0, [x0]
210		add	sp, sp, x0
211#endif
212
213		/* Restore x0, x1 */
214		mrs	x0, tpidr_el1
215		mrs	x1, tpidrro_el0
216		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
217#else
218		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
219		mrs	x0, ttbr0_el1
220		/* switch to kernel mode ASID */
221		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
222		msr	ttbr0_el1, x0
223		isb
224#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
225	.endm
226
227#define INV_INSN	0
228FUNC thread_excp_vect , : align=2048
229	/* -----------------------------------------------------
230	 * EL1 with SP0 : 0x0 - 0x180
231	 * -----------------------------------------------------
232	 */
233	.balign	128, INV_INSN
234el1_sync_sp0:
235	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
236	b	el1_sync_abort
237	check_vector_size el1_sync_sp0
238
239	.balign	128, INV_INSN
240el1_irq_sp0:
241	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
242	b	elx_irq
243	check_vector_size el1_irq_sp0
244
245	.balign	128, INV_INSN
246el1_fiq_sp0:
247	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
248	b	elx_fiq
249	check_vector_size el1_fiq_sp0
250
251	.balign	128, INV_INSN
252el1_serror_sp0:
253	b	el1_serror_sp0
254	check_vector_size el1_serror_sp0
255
256	/* -----------------------------------------------------
257	 * Current EL with SP1: 0x200 - 0x380
258	 * -----------------------------------------------------
259	 */
260	.balign	128, INV_INSN
261el1_sync_sp1:
262	b	el1_sync_sp1
263	check_vector_size el1_sync_sp1
264
265	.balign	128, INV_INSN
266el1_irq_sp1:
267	b	el1_irq_sp1
268	check_vector_size el1_irq_sp1
269
270	.balign	128, INV_INSN
271el1_fiq_sp1:
272	b	el1_fiq_sp1
273	check_vector_size el1_fiq_sp1
274
275	.balign	128, INV_INSN
276el1_serror_sp1:
277	b	el1_serror_sp1
278	check_vector_size el1_serror_sp1
279
280	/* -----------------------------------------------------
281	 * Lower EL using AArch64 : 0x400 - 0x580
282	 * -----------------------------------------------------
283	 */
284	.balign	128, INV_INSN
285el0_sync_a64:
286	restore_mapping
287
288	mrs	x2, esr_el1
289	mrs	x3, sp_el0
290	lsr	x2, x2, #ESR_EC_SHIFT
291	cmp	x2, #ESR_EC_AARCH64_SVC
292	b.eq	el0_svc
293	b	el0_sync_abort
294	check_vector_size el0_sync_a64
295
296	.balign	128, INV_INSN
297el0_irq_a64:
298	restore_mapping
299
300	b	elx_irq
301	check_vector_size el0_irq_a64
302
303	.balign	128, INV_INSN
304el0_fiq_a64:
305	restore_mapping
306
307	b	elx_fiq
308	check_vector_size el0_fiq_a64
309
310	.balign	128, INV_INSN
311el0_serror_a64:
312	b   	el0_serror_a64
313	check_vector_size el0_serror_a64
314
315	/* -----------------------------------------------------
316	 * Lower EL using AArch32 : 0x0 - 0x180
317	 * -----------------------------------------------------
318	 */
319	.balign	128, INV_INSN
320el0_sync_a32:
321	restore_mapping
322
323	mrs	x2, esr_el1
324	mrs	x3, sp_el0
325	lsr	x2, x2, #ESR_EC_SHIFT
326	cmp	x2, #ESR_EC_AARCH32_SVC
327	b.eq	el0_svc
328	b	el0_sync_abort
329	check_vector_size el0_sync_a32
330
331	.balign	128, INV_INSN
332el0_irq_a32:
333	restore_mapping
334
335	b	elx_irq
336	check_vector_size el0_irq_a32
337
338	.balign	128, INV_INSN
339el0_fiq_a32:
340	restore_mapping
341
342	b	elx_fiq
343	check_vector_size el0_fiq_a32
344
345	.balign	128, INV_INSN
346el0_serror_a32:
347	b	el0_serror_a32
348	check_vector_size el0_serror_a32
349
350#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
351	.macro invalidate_branch_predictor
352		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
353		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
354		smc	#0
355		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
356	.endm
357
358	.balign	2048, INV_INSN
359	.global thread_excp_vect_workaround
360thread_excp_vect_workaround:
361	/* -----------------------------------------------------
362	 * EL1 with SP0 : 0x0 - 0x180
363	 * -----------------------------------------------------
364	 */
365	.balign	128, INV_INSN
366workaround_el1_sync_sp0:
367	b	el1_sync_sp0
368	check_vector_size workaround_el1_sync_sp0
369
370	.balign	128, INV_INSN
371workaround_el1_irq_sp0:
372	b	el1_irq_sp0
373	check_vector_size workaround_el1_irq_sp0
374
375	.balign	128, INV_INSN
376workaround_el1_fiq_sp0:
377	b	el1_fiq_sp0
378	check_vector_size workaround_el1_fiq_sp0
379
380	.balign	128, INV_INSN
381workaround_el1_serror_sp0:
382	b	el1_serror_sp0
383	check_vector_size workaround_el1_serror_sp0
384
385	/* -----------------------------------------------------
386	 * Current EL with SP1: 0x200 - 0x380
387	 * -----------------------------------------------------
388	 */
389	.balign	128, INV_INSN
390workaround_el1_sync_sp1:
391	b	workaround_el1_sync_sp1
392	check_vector_size workaround_el1_sync_sp1
393
394	.balign	128, INV_INSN
395workaround_el1_irq_sp1:
396	b	workaround_el1_irq_sp1
397	check_vector_size workaround_el1_irq_sp1
398
399	.balign	128, INV_INSN
400workaround_el1_fiq_sp1:
401	b	workaround_el1_fiq_sp1
402	check_vector_size workaround_el1_fiq_sp1
403
404	.balign	128, INV_INSN
405workaround_el1_serror_sp1:
406	b	workaround_el1_serror_sp1
407	check_vector_size workaround_el1_serror_sp1
408
409	/* -----------------------------------------------------
410	 * Lower EL using AArch64 : 0x400 - 0x580
411	 * -----------------------------------------------------
412	 */
413	.balign	128, INV_INSN
414workaround_el0_sync_a64:
415	invalidate_branch_predictor
416	b	el0_sync_a64
417	check_vector_size workaround_el0_sync_a64
418
419	.balign	128, INV_INSN
420workaround_el0_irq_a64:
421	invalidate_branch_predictor
422	b	el0_irq_a64
423	check_vector_size workaround_el0_irq_a64
424
425	.balign	128, INV_INSN
426workaround_el0_fiq_a64:
427	invalidate_branch_predictor
428	b	el0_fiq_a64
429	check_vector_size workaround_el0_fiq_a64
430
431	.balign	128, INV_INSN
432workaround_el0_serror_a64:
433	b   	workaround_el0_serror_a64
434	check_vector_size workaround_el0_serror_a64
435
436	/* -----------------------------------------------------
437	 * Lower EL using AArch32 : 0x0 - 0x180
438	 * -----------------------------------------------------
439	 */
440	.balign	128, INV_INSN
441workaround_el0_sync_a32:
442	invalidate_branch_predictor
443	b	el0_sync_a32
444	check_vector_size workaround_el0_sync_a32
445
446	.balign	128, INV_INSN
447workaround_el0_irq_a32:
448	invalidate_branch_predictor
449	b	el0_irq_a32
450	check_vector_size workaround_el0_irq_a32
451
452	.balign	128, INV_INSN
453workaround_el0_fiq_a32:
454	invalidate_branch_predictor
455	b	el0_fiq_a32
456	check_vector_size workaround_el0_fiq_a32
457
458	.balign	128, INV_INSN
459workaround_el0_serror_a32:
460	b	workaround_el0_serror_a32
461	check_vector_size workaround_el0_serror_a32
462#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
463
464/*
465 * We're keeping this code in the same section as the vector to make sure
466 * that it's always available.
467 */
468eret_to_el0:
469
470#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
471	/* Point to the vector into the reduced mapping */
472	adr_l	x0, thread_user_kcode_offset
473	ldr	x0, [x0]
474	mrs	x1, vbar_el1
475	sub	x1, x1, x0
476	msr	vbar_el1, x1
477	isb
478
479#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
480	/* Store the SP offset in tpidr_el1 to be used below to update SP */
481	adr_l	x1, thread_user_kdata_sp_offset
482	ldr	x1, [x1]
483	msr	tpidr_el1, x1
484#endif
485
486	/* Jump into the reduced mapping and continue execution */
487	ldr	x1, =1f
488	sub	x1, x1, x0
489	br	x1
4901:
491
492	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
493	msr	tpidrro_el0, x0
494
495	/* Update the mapping to exclude the full kernel mapping */
496	mrs	x0, ttbr0_el1
497	add	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
498	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
499	msr	ttbr0_el1, x0
500	isb
501
502#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
503	/*
504	 * Update the SP with thread_user_kdata_sp_offset as described in
505	 * init_user_kcode().
506	 */
507	mrs	x0, tpidr_el1
508	sub	sp, sp, x0
509#endif
510
511	mrs	x0, tpidrro_el0
512#else
513	mrs	x0, ttbr0_el1
514	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
515	msr	ttbr0_el1, x0
516	isb
517	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
518#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
519
520	return_from_exception
521
522	/*
523	 * void icache_inv_user_range(void *addr, size_t size);
524	 *
525	 * This function has to execute with the user space ASID active,
526	 * this means executing with reduced mapping and the code needs
527	 * to be located here together with the vector.
528	 */
529	.global icache_inv_user_range
530	.type icache_inv_user_range , %function
531icache_inv_user_range:
532	/* Mask all exceptions */
533	mrs	x6, daif	/* this register must be preserved */
534	msr	daifset, #DAIFBIT_ALL
535
536#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
537	/* Point to the vector into the reduced mapping */
538	adr_l	x2, thread_user_kcode_offset
539	ldr	x2, [x2]
540	mrs	x4, vbar_el1	/* this register must be preserved */
541	sub	x3, x4, x2
542	msr	vbar_el1, x3
543	isb
544
545	/* Jump into the reduced mapping and continue execution */
546	ldr	x3, =1f
547	sub	x3, x3, x2
548	br	x3
5491:
550
551	/* Update the mapping to exclude the full kernel mapping */
552	mrs	x5, ttbr0_el1	/* this register must be preserved */
553	add	x2, x5, #CORE_MMU_BASE_TABLE_OFFSET
554	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
555	msr	ttbr0_el1, x2
556	isb
557
558#else
559	mrs	x5, ttbr0_el1	/* this register must be preserved */
560	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
561	msr	ttbr0_el1, x2
562	isb
563#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
564
565	/*
566	 * Do the actual icache invalidation
567	 */
568
569	/* Calculate minimum icache line size, result in x2 */
570	mrs	x3, ctr_el0
571	and	x3, x3, #CTR_IMINLINE_MASK
572	mov	x2, #CTR_WORD_SIZE
573	lsl	x2, x2, x3
574
575	add	x1, x0, x1
576	sub	x3, x2, #1
577	bic	x0, x0, x3
5781:
579	ic	ivau, x0
580	add	x0, x0, x2
581	cmp	x0, x1
582	b.lo    1b
583	dsb	ish
584
585#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
586	/* Update the mapping to use the full kernel mapping and ASID */
587	msr	ttbr0_el1, x5
588	isb
589
590	/* Jump into the full mapping and continue execution */
591	ldr	x0, =1f
592	br	x0
5931:
594
595	/* Point to the vector into the full mapping */
596	msr	vbar_el1, x4
597	isb
598#else
599	/* switch to kernel mode ASID */
600	msr	ttbr0_el1, x5
601	isb
602#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
603
604	msr	daif, x6	/* restore exceptions */
605	ret	/* End of icache_inv_user_range() */
606
607	/*
608	 * Make sure that literals are placed before the
609	 * thread_excp_vect_end label.
610	 */
611	.pool
612	.global thread_excp_vect_end
613thread_excp_vect_end:
614END_FUNC thread_excp_vect
615
616LOCAL_FUNC el0_svc , :
617	/* get pointer to current thread context in x0 */
618	get_thread_ctx sp, 0, 1, 2
619	mrs	x1, tpidr_el0
620	str	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
621	/* load saved kernel sp */
622	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
623	/* Keep pointer to initial recod in x1 */
624	mov	x1, sp
625	/* Switch to SP_EL0 and restore kernel sp */
626	msr	spsel, #0
627	mov	x2, sp	/* Save SP_EL0 */
628	mov	sp, x0
629
630	/* Make room for struct thread_svc_regs */
631	sub	sp, sp, #THREAD_SVC_REG_SIZE
632	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
633
634	/* Restore x0-x3 */
635	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
636	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
637
638	/* Prepare the argument for the handler */
639	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
640	mrs	x0, elr_el1
641	mrs	x1, spsr_el1
642	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
643	mov	x0, sp
644
645	/*
646	 * Unmask native interrupts, Serror, and debug exceptions since we have
647	 * nothing left in sp_el1. Note that the SVC handler is excepted to
648	 * re-enable foreign interrupts by itself.
649	 */
650#if defined(CFG_ARM_GICV3)
651	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
652#else
653	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
654#endif
655
656	/* Call the handler */
657	bl	thread_svc_handler
658
659	/* Mask all maskable exceptions since we're switching back to sp_el1 */
660	msr	daifset, #DAIFBIT_ALL
661
662	/*
663	 * Save kernel sp we'll had at the beginning of this function.
664	 * This is when this TA has called another TA because
665	 * __thread_enter_user_mode() also saves the stack pointer in this
666	 * field.
667	 */
668	msr	spsel, #1
669	get_thread_ctx sp, 0, 1, 2
670	msr	spsel, #0
671	add	x1, sp, #THREAD_SVC_REG_SIZE
672	str	x1, [x0, #THREAD_CTX_KERN_SP]
673
674	/* Restore registers to the required state and return*/
675	ldr	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
676	msr	tpidr_el0, x1
677	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
678	msr	elr_el1, x0
679	msr	spsr_el1, x1
680	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
681	mov	x30, sp
682	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
683	mov	sp, x0
684	b_if_spsr_is_el0 w1, 1f
685	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
686	ldr	x30, [x30, #THREAD_SVC_REG_X30]
687
688	return_from_exception
689
6901:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
691	ldr	x30, [x30, #THREAD_SVC_REG_X30]
692
693	msr	spsel, #1
694	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
695	b	eret_to_el0
696END_FUNC el0_svc
697
698LOCAL_FUNC el1_sync_abort , :
699	mov	x0, sp
700	msr	spsel, #0
701	mov	x3, sp		/* Save original sp */
702
703	/*
704	 * Update core local flags.
705	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
706	 */
707	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
708	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
709	orr	w1, w1, #THREAD_CLF_ABORT
710	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
711			.Lsel_tmp_sp
712
713	/* Select abort stack */
714	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
715	b	.Lset_sp
716
717.Lsel_tmp_sp:
718	/* Select tmp stack */
719	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
720	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
721
722.Lset_sp:
723	mov	sp, x2
724	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
725
726	/*
727	 * Save state on stack
728	 */
729	sub	sp, sp, #THREAD_ABT_REGS_SIZE
730	mrs	x2, spsr_el1
731	/* Store spsr, sp_el0 */
732	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
733	/* Store original x0, x1 */
734	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
735	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
736	/* Store original x2, x3 and x4 to x29 */
737	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
738	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
739	/* Store x30, elr_el1 */
740	mrs	x0, elr_el1
741	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
742
743	/*
744	 * Call handler
745	 */
746	mov	x0, #0
747	mov	x1, sp
748	bl	abort_handler
749
750	/*
751	 * Restore state from stack
752	 */
753	/* Load x30, elr_el1 */
754	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
755	msr	elr_el1, x0
756	/* Load x0 to x29 */
757	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
758	/* Switch to SP_EL1 */
759	msr	spsel, #1
760	/* Save x0 to x3 in CORE_LOCAL */
761	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
762	/* Restore spsr_el1 and sp_el0 */
763	mrs	x3, sp_el0
764	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
765	msr	spsr_el1, x0
766	msr	sp_el0, x1
767
768	/* Update core local flags */
769	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
770	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
771	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
772
773	/* Restore x0 to x3 */
774	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
775
776	/* Return from exception */
777	return_from_exception
778END_FUNC el1_sync_abort
779
780	/* sp_el0 in x3 */
781LOCAL_FUNC el0_sync_abort , :
782	/*
783	 * Update core local flags
784	 */
785	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
786	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
787	orr	w1, w1, #THREAD_CLF_ABORT
788	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
789
790	/*
791	 * Save state on stack
792	 */
793
794	/* load abt_stack_va_end */
795	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
796	/* Keep pointer to initial record in x0 */
797	mov	x0, sp
798	/* Switch to SP_EL0 */
799	msr	spsel, #0
800	mov	sp, x1
801	sub	sp, sp, #THREAD_ABT_REGS_SIZE
802	mrs	x2, spsr_el1
803	/* Store spsr, sp_el0 */
804	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
805	/* Store original x0, x1 */
806	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
807	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
808	/* Store original x2, x3 and x4 to x29 */
809	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
810	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
811	/* Store x30, elr_el1 */
812	mrs	x0, elr_el1
813	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
814
815	/*
816	 * Call handler
817	 */
818	mov	x0, #0
819	mov	x1, sp
820	bl	abort_handler
821
822	/*
823	 * Restore state from stack
824	 */
825
826	/* Load x30, elr_el1 */
827	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
828	msr	elr_el1, x0
829	/* Load x0 to x29 */
830	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
831	/* Switch to SP_EL1 */
832	msr	spsel, #1
833	/* Save x0 to x3 in EL1_REC */
834	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
835	/* Restore spsr_el1 and sp_el0 */
836	mrs	x3, sp_el0
837	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
838	msr	spsr_el1, x0
839	msr	sp_el0, x1
840
841	/* Update core local flags */
842	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
843	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
844	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
845
846	/* Restore x2 to x3 */
847	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
848
849	b_if_spsr_is_el0 w0, 1f
850
851	/* Restore x0 to x1 */
852	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
853
854	/* Return from exception */
855	return_from_exception
8561:	b	eret_to_el0
857END_FUNC el0_sync_abort
858
859/* The handler of foreign interrupt. */
860.macro foreign_intr_handler mode:req
861	/*
862	 * Update core local flags
863	 */
864	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
865	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
866	orr	w1, w1, #THREAD_CLF_TMP
867	.ifc	\mode\(),fiq
868	orr	w1, w1, #THREAD_CLF_FIQ
869	.else
870	orr	w1, w1, #THREAD_CLF_IRQ
871	.endif
872	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
873
874	/* get pointer to current thread context in x0 */
875	get_thread_ctx sp, 0, 1, 2
876	/* Keep original SP_EL0 */
877	mrs	x2, sp_el0
878
879	/* Store original sp_el0 */
880	str	x2, [x0, #THREAD_CTX_REGS_SP]
881	/* Store tpidr_el0 */
882	mrs	x2, tpidr_el0
883	str	x2, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
884	/* Store x4..x30 */
885	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
886	/* Load original x0..x3 into x10..x13 */
887	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
888	/* Save original x0..x3 */
889	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
890
891	/* load tmp_stack_va_end */
892	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
893	/* Switch to SP_EL0 */
894	msr	spsel, #0
895	mov	sp, x1
896
897#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
898	/*
899	 * Prevent leaking information about which entries has been used in
900	 * cache. We're relying on the dispatcher in TF-A to take care of
901	 * the BTB.
902	 */
903	mov	x0, #DCACHE_OP_CLEAN_INV
904	bl	dcache_op_louis
905	ic	iallu
906#endif
907	/*
908	 * Mark current thread as suspended
909	 */
910	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
911	mrs	x1, spsr_el1
912	mrs	x2, elr_el1
913	bl	thread_state_suspend
914
915	/* Update core local flags */
916	/* Switch to SP_EL1 */
917	msr	spsel, #1
918	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
919	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
920	orr	w1, w1, #THREAD_CLF_TMP
921	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
922	msr	spsel, #0
923
924	/*
925	 * Note that we're exiting with SP_EL0 selected since the entry
926	 * functions expects to have SP_EL0 selected with the tmp stack
927	 * set.
928	 */
929
930	/* Passing thread index in w0 */
931	b	thread_foreign_intr_exit
932.endm
933
934/*
935 * This struct is never used from C it's only here to visualize the
936 * layout.
937 *
938 * struct elx_nintr_rec {
939 * 	uint64_t x[19 - 4]; x4..x18
940 * 	uint64_t lr;
941 * 	uint64_t sp_el0;
942 * };
943 */
944#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
945#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
946#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
947#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
948
949/* The handler of native interrupt. */
950.macro native_intr_handler mode:req
951	/*
952	 * Update core local flags
953	 */
954	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
955	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
956	.ifc	\mode\(),fiq
957	orr	w1, w1, #THREAD_CLF_FIQ
958	.else
959	orr	w1, w1, #THREAD_CLF_IRQ
960	.endif
961	orr	w1, w1, #THREAD_CLF_TMP
962	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
963
964	/* load tmp_stack_va_end */
965	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
966	/* Keep original SP_EL0 */
967	mrs	x2, sp_el0
968	/* Switch to SP_EL0 */
969	msr	spsel, #0
970	mov	sp, x1
971
972	/*
973	 * Save registers on stack that can be corrupted by a call to
974	 * a C function
975	 */
976	/* Make room for struct elx_nintr_rec */
977	sub	sp, sp, #ELX_NINTR_REC_SIZE
978	/* Store x4..x18 */
979	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
980	/* Store lr and original sp_el0 */
981	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
982
983	bl	thread_check_canaries
984	bl	itr_core_handler
985
986	/*
987	 * Restore registers
988	 */
989	/* Restore x4..x18 */
990	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
991	/* Load  lr and original sp_el0 */
992	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
993	/* Restore SP_El0 */
994	mov	sp, x2
995	/* Switch back to SP_EL1 */
996	msr	spsel, #1
997
998	/* Update core local flags */
999	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1000	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1001	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1002
1003	mrs	x0, spsr_el1
1004	/* Restore x2..x3 */
1005	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1006	b_if_spsr_is_el0 w0, 1f
1007
1008	/* Restore x0..x1 */
1009	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1010
1011	/* Return from exception */
1012	return_from_exception
10131:	b	eret_to_el0
1014.endm
1015
1016LOCAL_FUNC elx_irq , :
1017#if defined(CFG_ARM_GICV3)
1018	native_intr_handler	irq
1019#else
1020	foreign_intr_handler	irq
1021#endif
1022END_FUNC elx_irq
1023
1024LOCAL_FUNC elx_fiq , :
1025#if defined(CFG_ARM_GICV3)
1026	foreign_intr_handler	fiq
1027#else
1028	native_intr_handler	fiq
1029#endif
1030END_FUNC elx_fiq
1031