xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision c84eee6397bb8ae0745d9aa24b5228a58793378b)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2020, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread_defs.h>
13#include <mm/core_mmu.h>
14#include <smccc.h>
15
16#include "thread_private.h"
17
18	.macro get_thread_ctx core_local, res, tmp0, tmp1
19		ldrh	w\tmp0, [\core_local, \
20				#THREAD_CORE_LOCAL_CURR_THREAD]
21		ldr	x\res, =threads
22		mov	x\tmp1, #THREAD_CTX_SIZE
23		madd	x\res, x\tmp0, x\tmp1, x\res
24	.endm
25
26	.macro return_from_exception
27		eret
28		/* Guard against speculation past ERET */
29		dsb nsh
30		isb
31	.endm
32
33	.macro b_if_spsr_is_el0 reg, label
34		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
35		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
36		b.eq	\label
37	.endm
38
39/* void thread_resume(struct thread_ctx_regs *regs) */
40FUNC thread_resume , :
41	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
42	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
43	mov	sp, x1
44	msr	elr_el1, x2
45	msr	spsr_el1, x3
46	ldr	x1, [x0, THREAD_CTX_REGS_TPIDR_EL0]
47	msr	tpidr_el0, x1
48
49	b_if_spsr_is_el0 w3, 1f
50
51	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
52	ldr	x0, [x0, THREAD_CTX_REGS_X0]
53	return_from_exception
54
551:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
56	ldr	x0, [x0, THREAD_CTX_REGS_X0]
57
58	msr	spsel, #1
59	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
60	b	eret_to_el0
61END_FUNC thread_resume
62
63FUNC thread_smc , :
64	smc	#0
65	ret
66END_FUNC thread_smc
67
68FUNC thread_init_vbar , :
69	msr	vbar_el1, x0
70	ret
71END_FUNC thread_init_vbar
72DECLARE_KEEP_PAGER thread_init_vbar
73
74/*
75 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
76 *				     uint32_t *exit_status0,
77 *				     uint32_t *exit_status1);
78 *
79 * This function depends on being called with exceptions masked.
80 */
81FUNC __thread_enter_user_mode , :
82	/*
83	 * Create the and fill in the struct thread_user_mode_rec
84	 */
85	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
86	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
87	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
88
89	/*
90	 * Save kern sp in x19
91	 * Switch to SP_EL1
92	 */
93	mov	x19, sp
94	msr	spsel, #1
95
96	/*
97	 * Save the kernel stack pointer in the thread context
98	 */
99	/* get pointer to current thread context */
100	get_thread_ctx sp, 21, 20, 22
101	/*
102	 * Save kernel stack pointer to ensure that el0_svc() uses
103	 * correct stack pointer
104	 */
105	str	x19, [x21, #THREAD_CTX_KERN_SP]
106
107	/*
108	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
109	 */
110	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
111	msr	sp_el0, x1
112	msr	elr_el1, x2
113	msr	spsr_el1, x3
114
115	/*
116	 * Save the values for x0 and x1 in struct thread_core_local to be
117	 * restored later just before the eret.
118	 */
119	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
120	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
121
122	/* Load the rest of the general purpose registers */
123	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
124
125	/* Jump into user mode */
126	b eret_to_el0
127END_FUNC __thread_enter_user_mode
128DECLARE_KEEP_PAGER __thread_enter_user_mode
129
130/*
131 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
132 * 		uint32_t exit_status1);
133 * See description in thread.h
134 */
135FUNC thread_unwind_user_mode , :
136	/* Store the exit status */
137	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
138	str	w1, [x4]
139	str	w2, [x5]
140	/* Save x19..x30 */
141	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
142	/* Restore x19..x30 */
143	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
144	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
145	/* Return from the call of thread_enter_user_mode() */
146	ret
147END_FUNC thread_unwind_user_mode
148
149	/*
150	 * This macro verifies that the a given vector doesn't exceed the
151	 * architectural limit of 32 instructions. This is meant to be placed
152	 * immedately after the last instruction in the vector. It takes the
153	 * vector entry as the parameter
154	 */
155	.macro check_vector_size since
156	  .if (. - \since) > (32 * 4)
157	    .error "Vector exceeds 32 instructions"
158	  .endif
159	.endm
160
161	.macro restore_mapping
162#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
163		/* Temporarily save x0, x1 */
164		msr	tpidr_el1, x0
165		msr	tpidrro_el0, x1
166
167		/* Update the mapping to use the full kernel mapping */
168		mrs	x0, ttbr0_el1
169		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
170		/* switch to kernel mode ASID */
171		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
172		msr	ttbr0_el1, x0
173		isb
174
175		/* Jump into the full mapping and continue execution */
176		ldr	x0, =1f
177		br	x0
178	1:
179
180		/* Point to the vector into the full mapping */
181		adr_l	x0, thread_user_kcode_offset
182		ldr	x0, [x0]
183		mrs	x1, vbar_el1
184		add	x1, x1, x0
185		msr	vbar_el1, x1
186		isb
187
188#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
189		/*
190		 * Update the SP with thread_user_kdata_sp_offset as
191		 * described in init_user_kcode().
192		 */
193		adr_l	x0, thread_user_kdata_sp_offset
194		ldr	x0, [x0]
195		add	sp, sp, x0
196#endif
197
198		/* Restore x0, x1 */
199		mrs	x0, tpidr_el1
200		mrs	x1, tpidrro_el0
201		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
202#else
203		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
204		mrs	x0, ttbr0_el1
205		/* switch to kernel mode ASID */
206		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
207		msr	ttbr0_el1, x0
208		isb
209#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
210	.endm
211
212#define INV_INSN	0
213FUNC thread_excp_vect , : align=2048
214	/* -----------------------------------------------------
215	 * EL1 with SP0 : 0x0 - 0x180
216	 * -----------------------------------------------------
217	 */
218	.balign	128, INV_INSN
219el1_sync_sp0:
220	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
221	b	el1_sync_abort
222	check_vector_size el1_sync_sp0
223
224	.balign	128, INV_INSN
225el1_irq_sp0:
226	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
227	b	elx_irq
228	check_vector_size el1_irq_sp0
229
230	.balign	128, INV_INSN
231el1_fiq_sp0:
232	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
233	b	elx_fiq
234	check_vector_size el1_fiq_sp0
235
236	.balign	128, INV_INSN
237el1_serror_sp0:
238	b	el1_serror_sp0
239	check_vector_size el1_serror_sp0
240
241	/* -----------------------------------------------------
242	 * Current EL with SP1: 0x200 - 0x380
243	 * -----------------------------------------------------
244	 */
245	.balign	128, INV_INSN
246el1_sync_sp1:
247	b	el1_sync_sp1
248	check_vector_size el1_sync_sp1
249
250	.balign	128, INV_INSN
251el1_irq_sp1:
252	b	el1_irq_sp1
253	check_vector_size el1_irq_sp1
254
255	.balign	128, INV_INSN
256el1_fiq_sp1:
257	b	el1_fiq_sp1
258	check_vector_size el1_fiq_sp1
259
260	.balign	128, INV_INSN
261el1_serror_sp1:
262	b	el1_serror_sp1
263	check_vector_size el1_serror_sp1
264
265	/* -----------------------------------------------------
266	 * Lower EL using AArch64 : 0x400 - 0x580
267	 * -----------------------------------------------------
268	 */
269	.balign	128, INV_INSN
270el0_sync_a64:
271	restore_mapping
272
273	mrs	x2, esr_el1
274	mrs	x3, sp_el0
275	lsr	x2, x2, #ESR_EC_SHIFT
276	cmp	x2, #ESR_EC_AARCH64_SVC
277	b.eq	el0_svc
278	b	el0_sync_abort
279	check_vector_size el0_sync_a64
280
281	.balign	128, INV_INSN
282el0_irq_a64:
283	restore_mapping
284
285	b	elx_irq
286	check_vector_size el0_irq_a64
287
288	.balign	128, INV_INSN
289el0_fiq_a64:
290	restore_mapping
291
292	b	elx_fiq
293	check_vector_size el0_fiq_a64
294
295	.balign	128, INV_INSN
296el0_serror_a64:
297	b   	el0_serror_a64
298	check_vector_size el0_serror_a64
299
300	/* -----------------------------------------------------
301	 * Lower EL using AArch32 : 0x0 - 0x180
302	 * -----------------------------------------------------
303	 */
304	.balign	128, INV_INSN
305el0_sync_a32:
306	restore_mapping
307
308	mrs	x2, esr_el1
309	mrs	x3, sp_el0
310	lsr	x2, x2, #ESR_EC_SHIFT
311	cmp	x2, #ESR_EC_AARCH32_SVC
312	b.eq	el0_svc
313	b	el0_sync_abort
314	check_vector_size el0_sync_a32
315
316	.balign	128, INV_INSN
317el0_irq_a32:
318	restore_mapping
319
320	b	elx_irq
321	check_vector_size el0_irq_a32
322
323	.balign	128, INV_INSN
324el0_fiq_a32:
325	restore_mapping
326
327	b	elx_fiq
328	check_vector_size el0_fiq_a32
329
330	.balign	128, INV_INSN
331el0_serror_a32:
332	b	el0_serror_a32
333	check_vector_size el0_serror_a32
334
335#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
336	.macro invalidate_branch_predictor
337		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
338		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
339		smc	#0
340		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
341	.endm
342
343	.balign	2048, INV_INSN
344	.global thread_excp_vect_workaround
345thread_excp_vect_workaround:
346	/* -----------------------------------------------------
347	 * EL1 with SP0 : 0x0 - 0x180
348	 * -----------------------------------------------------
349	 */
350	.balign	128, INV_INSN
351workaround_el1_sync_sp0:
352	b	el1_sync_sp0
353	check_vector_size workaround_el1_sync_sp0
354
355	.balign	128, INV_INSN
356workaround_el1_irq_sp0:
357	b	el1_irq_sp0
358	check_vector_size workaround_el1_irq_sp0
359
360	.balign	128, INV_INSN
361workaround_el1_fiq_sp0:
362	b	el1_fiq_sp0
363	check_vector_size workaround_el1_fiq_sp0
364
365	.balign	128, INV_INSN
366workaround_el1_serror_sp0:
367	b	el1_serror_sp0
368	check_vector_size workaround_el1_serror_sp0
369
370	/* -----------------------------------------------------
371	 * Current EL with SP1: 0x200 - 0x380
372	 * -----------------------------------------------------
373	 */
374	.balign	128, INV_INSN
375workaround_el1_sync_sp1:
376	b	workaround_el1_sync_sp1
377	check_vector_size workaround_el1_sync_sp1
378
379	.balign	128, INV_INSN
380workaround_el1_irq_sp1:
381	b	workaround_el1_irq_sp1
382	check_vector_size workaround_el1_irq_sp1
383
384	.balign	128, INV_INSN
385workaround_el1_fiq_sp1:
386	b	workaround_el1_fiq_sp1
387	check_vector_size workaround_el1_fiq_sp1
388
389	.balign	128, INV_INSN
390workaround_el1_serror_sp1:
391	b	workaround_el1_serror_sp1
392	check_vector_size workaround_el1_serror_sp1
393
394	/* -----------------------------------------------------
395	 * Lower EL using AArch64 : 0x400 - 0x580
396	 * -----------------------------------------------------
397	 */
398	.balign	128, INV_INSN
399workaround_el0_sync_a64:
400	invalidate_branch_predictor
401	b	el0_sync_a64
402	check_vector_size workaround_el0_sync_a64
403
404	.balign	128, INV_INSN
405workaround_el0_irq_a64:
406	invalidate_branch_predictor
407	b	el0_irq_a64
408	check_vector_size workaround_el0_irq_a64
409
410	.balign	128, INV_INSN
411workaround_el0_fiq_a64:
412	invalidate_branch_predictor
413	b	el0_fiq_a64
414	check_vector_size workaround_el0_fiq_a64
415
416	.balign	128, INV_INSN
417workaround_el0_serror_a64:
418	b   	workaround_el0_serror_a64
419	check_vector_size workaround_el0_serror_a64
420
421	/* -----------------------------------------------------
422	 * Lower EL using AArch32 : 0x0 - 0x180
423	 * -----------------------------------------------------
424	 */
425	.balign	128, INV_INSN
426workaround_el0_sync_a32:
427	invalidate_branch_predictor
428	b	el0_sync_a32
429	check_vector_size workaround_el0_sync_a32
430
431	.balign	128, INV_INSN
432workaround_el0_irq_a32:
433	invalidate_branch_predictor
434	b	el0_irq_a32
435	check_vector_size workaround_el0_irq_a32
436
437	.balign	128, INV_INSN
438workaround_el0_fiq_a32:
439	invalidate_branch_predictor
440	b	el0_fiq_a32
441	check_vector_size workaround_el0_fiq_a32
442
443	.balign	128, INV_INSN
444workaround_el0_serror_a32:
445	b	workaround_el0_serror_a32
446	check_vector_size workaround_el0_serror_a32
447#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
448
449/*
450 * We're keeping this code in the same section as the vector to make sure
451 * that it's always available.
452 */
453eret_to_el0:
454
455#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
456	/* Point to the vector into the reduced mapping */
457	adr_l	x0, thread_user_kcode_offset
458	ldr	x0, [x0]
459	mrs	x1, vbar_el1
460	sub	x1, x1, x0
461	msr	vbar_el1, x1
462	isb
463
464#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
465	/* Store the SP offset in tpidr_el1 to be used below to update SP */
466	adr_l	x1, thread_user_kdata_sp_offset
467	ldr	x1, [x1]
468	msr	tpidr_el1, x1
469#endif
470
471	/* Jump into the reduced mapping and continue execution */
472	ldr	x1, =1f
473	sub	x1, x1, x0
474	br	x1
4751:
476
477	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
478	msr	tpidrro_el0, x0
479
480	/* Update the mapping to exclude the full kernel mapping */
481	mrs	x0, ttbr0_el1
482	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
483	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
484	msr	ttbr0_el1, x0
485	isb
486
487#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
488	/*
489	 * Update the SP with thread_user_kdata_sp_offset as described in
490	 * init_user_kcode().
491	 */
492	mrs	x0, tpidr_el1
493	sub	sp, sp, x0
494#endif
495
496	mrs	x0, tpidrro_el0
497#else
498	mrs	x0, ttbr0_el1
499	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
500	msr	ttbr0_el1, x0
501	isb
502	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
503#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
504
505	return_from_exception
506
507	/*
508	 * void icache_inv_user_range(void *addr, size_t size);
509	 *
510	 * This function has to execute with the user space ASID active,
511	 * this means executing with reduced mapping and the code needs
512	 * to be located here together with the vector.
513	 */
514	.global icache_inv_user_range
515	.type icache_inv_user_range , %function
516icache_inv_user_range:
517	/* Mask all exceptions */
518	mrs	x6, daif	/* this register must be preserved */
519	msr	daifset, #DAIFBIT_ALL
520
521#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
522	/* Point to the vector into the reduced mapping */
523	adr_l	x2, thread_user_kcode_offset
524	ldr	x2, [x2]
525	mrs	x4, vbar_el1	/* this register must be preserved */
526	sub	x3, x4, x2
527	msr	vbar_el1, x3
528	isb
529
530	/* Jump into the reduced mapping and continue execution */
531	ldr	x3, =1f
532	sub	x3, x3, x2
533	br	x3
5341:
535
536	/* Update the mapping to exclude the full kernel mapping */
537	mrs	x5, ttbr0_el1	/* this register must be preserved */
538	add	x2, x5, #CORE_MMU_L1_TBL_OFFSET
539	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
540	msr	ttbr0_el1, x2
541	isb
542
543#else
544	mrs	x5, ttbr0_el1	/* this register must be preserved */
545	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
546	msr	ttbr0_el1, x2
547	isb
548#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
549
550	/*
551	 * Do the actual icache invalidation
552	 */
553
554	/* Calculate minimum icache line size, result in x2 */
555	mrs	x3, ctr_el0
556	and	x3, x3, #CTR_IMINLINE_MASK
557	mov	x2, #CTR_WORD_SIZE
558	lsl	x2, x2, x3
559
560	add	x1, x0, x1
561	sub	x3, x2, #1
562	bic	x0, x0, x3
5631:
564	ic	ivau, x0
565	add	x0, x0, x2
566	cmp	x0, x1
567	b.lo    1b
568	dsb	ish
569
570#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
571	/* Update the mapping to use the full kernel mapping and ASID */
572	msr	ttbr0_el1, x5
573	isb
574
575	/* Jump into the full mapping and continue execution */
576	ldr	x0, =1f
577	br	x0
5781:
579
580	/* Point to the vector into the full mapping */
581	msr	vbar_el1, x4
582	isb
583#else
584	/* switch to kernel mode ASID */
585	msr	ttbr0_el1, x5
586	isb
587#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
588
589	msr	daif, x6	/* restore exceptions */
590	ret	/* End of icache_inv_user_range() */
591
592	/*
593	 * Make sure that literals are placed before the
594	 * thread_excp_vect_end label.
595	 */
596	.pool
597	.global thread_excp_vect_end
598thread_excp_vect_end:
599END_FUNC thread_excp_vect
600
601LOCAL_FUNC el0_svc , :
602	/* get pointer to current thread context in x0 */
603	get_thread_ctx sp, 0, 1, 2
604	mrs	x1, tpidr_el0
605	str	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
606	/* load saved kernel sp */
607	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
608	/* Keep pointer to initial recod in x1 */
609	mov	x1, sp
610	/* Switch to SP_EL0 and restore kernel sp */
611	msr	spsel, #0
612	mov	x2, sp	/* Save SP_EL0 */
613	mov	sp, x0
614
615	/* Make room for struct thread_svc_regs */
616	sub	sp, sp, #THREAD_SVC_REG_SIZE
617	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
618
619	/* Restore x0-x3 */
620	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
621	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
622
623	/* Prepare the argument for the handler */
624	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
625	mrs	x0, elr_el1
626	mrs	x1, spsr_el1
627	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
628	mov	x0, sp
629
630	/*
631	 * Unmask native interrupts, Serror, and debug exceptions since we have
632	 * nothing left in sp_el1. Note that the SVC handler is excepted to
633	 * re-enable foreign interrupts by itself.
634	 */
635#if defined(CFG_ARM_GICV3)
636	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
637#else
638	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
639#endif
640
641	/* Call the handler */
642	bl	thread_svc_handler
643
644	/* Mask all maskable exceptions since we're switching back to sp_el1 */
645	msr	daifset, #DAIFBIT_ALL
646
647	/*
648	 * Save kernel sp we'll had at the beginning of this function.
649	 * This is when this TA has called another TA because
650	 * __thread_enter_user_mode() also saves the stack pointer in this
651	 * field.
652	 */
653	msr	spsel, #1
654	get_thread_ctx sp, 0, 1, 2
655	msr	spsel, #0
656	add	x1, sp, #THREAD_SVC_REG_SIZE
657	str	x1, [x0, #THREAD_CTX_KERN_SP]
658
659	/* Restore registers to the required state and return*/
660	ldr	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
661	msr	tpidr_el0, x1
662	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
663	msr	elr_el1, x0
664	msr	spsr_el1, x1
665	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
666	mov	x30, sp
667	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
668	mov	sp, x0
669	b_if_spsr_is_el0 w1, 1f
670	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
671	ldr	x30, [x30, #THREAD_SVC_REG_X30]
672
673	return_from_exception
674
6751:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
676	ldr	x30, [x30, #THREAD_SVC_REG_X30]
677
678	msr	spsel, #1
679	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
680	b	eret_to_el0
681END_FUNC el0_svc
682
683LOCAL_FUNC el1_sync_abort , :
684	mov	x0, sp
685	msr	spsel, #0
686	mov	x3, sp		/* Save original sp */
687
688	/*
689	 * Update core local flags.
690	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
691	 */
692	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
693	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
694	orr	w1, w1, #THREAD_CLF_ABORT
695	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
696			.Lsel_tmp_sp
697
698	/* Select abort stack */
699	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
700	b	.Lset_sp
701
702.Lsel_tmp_sp:
703	/* Select tmp stack */
704	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
705	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
706
707.Lset_sp:
708	mov	sp, x2
709	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
710
711	/*
712	 * Save state on stack
713	 */
714	sub	sp, sp, #THREAD_ABT_REGS_SIZE
715	mrs	x2, spsr_el1
716	/* Store spsr, sp_el0 */
717	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
718	/* Store original x0, x1 */
719	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
720	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
721	/* Store original x2, x3 and x4 to x29 */
722	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
723	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
724	/* Store x30, elr_el1 */
725	mrs	x0, elr_el1
726	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
727
728	/*
729	 * Call handler
730	 */
731	mov	x0, #0
732	mov	x1, sp
733	bl	abort_handler
734
735	/*
736	 * Restore state from stack
737	 */
738	/* Load x30, elr_el1 */
739	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
740	msr	elr_el1, x0
741	/* Load x0 to x29 */
742	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
743	/* Switch to SP_EL1 */
744	msr	spsel, #1
745	/* Save x0 to x3 in CORE_LOCAL */
746	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
747	/* Restore spsr_el1 and sp_el0 */
748	mrs	x3, sp_el0
749	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
750	msr	spsr_el1, x0
751	msr	sp_el0, x1
752
753	/* Update core local flags */
754	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
755	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
756	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
757
758	/* Restore x0 to x3 */
759	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
760
761	/* Return from exception */
762	return_from_exception
763END_FUNC el1_sync_abort
764
765	/* sp_el0 in x3 */
766LOCAL_FUNC el0_sync_abort , :
767	/*
768	 * Update core local flags
769	 */
770	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
771	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
772	orr	w1, w1, #THREAD_CLF_ABORT
773	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
774
775	/*
776	 * Save state on stack
777	 */
778
779	/* load abt_stack_va_end */
780	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
781	/* Keep pointer to initial record in x0 */
782	mov	x0, sp
783	/* Switch to SP_EL0 */
784	msr	spsel, #0
785	mov	sp, x1
786	sub	sp, sp, #THREAD_ABT_REGS_SIZE
787	mrs	x2, spsr_el1
788	/* Store spsr, sp_el0 */
789	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
790	/* Store original x0, x1 */
791	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
792	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
793	/* Store original x2, x3 and x4 to x29 */
794	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
795	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
796	/* Store x30, elr_el1 */
797	mrs	x0, elr_el1
798	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
799
800	/*
801	 * Call handler
802	 */
803	mov	x0, #0
804	mov	x1, sp
805	bl	abort_handler
806
807	/*
808	 * Restore state from stack
809	 */
810
811	/* Load x30, elr_el1 */
812	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
813	msr	elr_el1, x0
814	/* Load x0 to x29 */
815	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
816	/* Switch to SP_EL1 */
817	msr	spsel, #1
818	/* Save x0 to x3 in EL1_REC */
819	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
820	/* Restore spsr_el1 and sp_el0 */
821	mrs	x3, sp_el0
822	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
823	msr	spsr_el1, x0
824	msr	sp_el0, x1
825
826	/* Update core local flags */
827	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
828	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
829	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
830
831	/* Restore x2 to x3 */
832	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
833
834	b_if_spsr_is_el0 w0, 1f
835
836	/* Restore x0 to x1 */
837	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
838
839	/* Return from exception */
840	return_from_exception
8411:	b	eret_to_el0
842END_FUNC el0_sync_abort
843
844/* The handler of foreign interrupt. */
845.macro foreign_intr_handler mode:req
846	/*
847	 * Update core local flags
848	 */
849	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
850	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
851	orr	w1, w1, #THREAD_CLF_TMP
852	.ifc	\mode\(),fiq
853	orr	w1, w1, #THREAD_CLF_FIQ
854	.else
855	orr	w1, w1, #THREAD_CLF_IRQ
856	.endif
857	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
858
859	/* get pointer to current thread context in x0 */
860	get_thread_ctx sp, 0, 1, 2
861	/* Keep original SP_EL0 */
862	mrs	x2, sp_el0
863
864	/* Store original sp_el0 */
865	str	x2, [x0, #THREAD_CTX_REGS_SP]
866	/* Store tpidr_el0 */
867	mrs	x2, tpidr_el0
868	str	x2, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
869	/* Store x4..x30 */
870	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
871	/* Load original x0..x3 into x10..x13 */
872	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
873	/* Save original x0..x3 */
874	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
875
876	/* load tmp_stack_va_end */
877	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
878	/* Switch to SP_EL0 */
879	msr	spsel, #0
880	mov	sp, x1
881
882#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
883	/*
884	 * Prevent leaking information about which entries has been used in
885	 * cache. We're relying on the dispatcher in TF-A to take care of
886	 * the BTB.
887	 */
888	mov	x0, #DCACHE_OP_CLEAN_INV
889	bl	dcache_op_louis
890	ic	iallu
891#endif
892	/*
893	 * Mark current thread as suspended
894	 */
895	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
896	mrs	x1, spsr_el1
897	mrs	x2, elr_el1
898	bl	thread_state_suspend
899
900	/* Update core local flags */
901	/* Switch to SP_EL1 */
902	msr	spsel, #1
903	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
904	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
905	orr	w1, w1, #THREAD_CLF_TMP
906	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
907	msr	spsel, #0
908
909	/*
910	 * Note that we're exiting with SP_EL0 selected since the entry
911	 * functions expects to have SP_EL0 selected with the tmp stack
912	 * set.
913	 */
914
915	/* Passing thread index in w0 */
916	b	thread_foreign_intr_exit
917.endm
918
919/*
920 * This struct is never used from C it's only here to visualize the
921 * layout.
922 *
923 * struct elx_nintr_rec {
924 * 	uint64_t x[19 - 4]; x4..x18
925 * 	uint64_t lr;
926 * 	uint64_t sp_el0;
927 * };
928 */
929#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
930#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
931#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
932#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
933
934/* The handler of native interrupt. */
935.macro native_intr_handler mode:req
936	/*
937	 * Update core local flags
938	 */
939	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
940	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
941	.ifc	\mode\(),fiq
942	orr	w1, w1, #THREAD_CLF_FIQ
943	.else
944	orr	w1, w1, #THREAD_CLF_IRQ
945	.endif
946	orr	w1, w1, #THREAD_CLF_TMP
947	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
948
949	/* load tmp_stack_va_end */
950	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
951	/* Keep original SP_EL0 */
952	mrs	x2, sp_el0
953	/* Switch to SP_EL0 */
954	msr	spsel, #0
955	mov	sp, x1
956
957	/*
958	 * Save registers on stack that can be corrupted by a call to
959	 * a C function
960	 */
961	/* Make room for struct elx_nintr_rec */
962	sub	sp, sp, #ELX_NINTR_REC_SIZE
963	/* Store x4..x18 */
964	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
965	/* Store lr and original sp_el0 */
966	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
967
968	bl	thread_check_canaries
969	bl	itr_core_handler
970
971	/*
972	 * Restore registers
973	 */
974	/* Restore x4..x18 */
975	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
976	/* Load  lr and original sp_el0 */
977	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
978	/* Restore SP_El0 */
979	mov	sp, x2
980	/* Switch back to SP_EL1 */
981	msr	spsel, #1
982
983	/* Update core local flags */
984	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
985	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
986	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
987
988	mrs	x0, spsr_el1
989	/* Restore x2..x3 */
990	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
991	b_if_spsr_is_el0 w0, 1f
992
993	/* Restore x0..x1 */
994	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
995
996	/* Return from exception */
997	return_from_exception
9981:	b	eret_to_el0
999.endm
1000
1001LOCAL_FUNC elx_irq , :
1002#if defined(CFG_ARM_GICV3)
1003	native_intr_handler	irq
1004#else
1005	foreign_intr_handler	irq
1006#endif
1007END_FUNC elx_irq
1008
1009LOCAL_FUNC elx_fiq , :
1010#if defined(CFG_ARM_GICV3)
1011	foreign_intr_handler	fiq
1012#else
1013	native_intr_handler	fiq
1014#endif
1015END_FUNC elx_fiq
1016