xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision a1d5c81f8834a9d2c6f4372cce2e59e70e709121)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2017, Linaro Limited
4 */
5
6#include <arm.h>
7#include <arm64_macros.S>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread_defs.h>
13#include <mm/core_mmu.h>
14#include <smccc.h>
15
16#include "thread_private.h"
17
18	.macro get_thread_ctx core_local, res, tmp0, tmp1
19		ldrh	w\tmp0, [\core_local, \
20				#THREAD_CORE_LOCAL_CURR_THREAD]
21		ldr	x\res, =threads
22		mov	x\tmp1, #THREAD_CTX_SIZE
23		madd	x\res, x\tmp0, x\tmp1, x\res
24	.endm
25
26	.macro return_from_exception
27		eret
28		/* Guard against speculation past ERET */
29		dsb nsh
30		isb
31	.endm
32
33	.macro b_if_spsr_is_el0 reg, label
34		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
35		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
36		b.eq	\label
37	.endm
38
39/* void thread_resume(struct thread_ctx_regs *regs) */
40FUNC thread_resume , :
41	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
42	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
43	mov	sp, x1
44	msr	elr_el1, x2
45	msr	spsr_el1, x3
46	ldr	x1, [x0, THREAD_CTX_REGS_TPIDR_EL0]
47	msr	tpidr_el0, x1
48
49	b_if_spsr_is_el0 w3, 1f
50
51	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
52	ldr	x0, [x0, THREAD_CTX_REGS_X0]
53	return_from_exception
54
551:	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
56	ldr	x0, [x0, THREAD_CTX_REGS_X0]
57
58	msr	spsel, #1
59	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
60	b	eret_to_el0
61END_FUNC thread_resume
62
63FUNC thread_smc , :
64	smc	#0
65	ret
66END_FUNC thread_smc
67
68FUNC thread_init_vbar , :
69	msr	vbar_el1, x0
70	ret
71END_FUNC thread_init_vbar
72DECLARE_KEEP_PAGER thread_init_vbar
73
74/*
75 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
76 *				     uint32_t *exit_status0,
77 *				     uint32_t *exit_status1);
78 *
79 * This function depends on being called with exceptions masked.
80 */
81FUNC __thread_enter_user_mode , :
82	/*
83	 * Create the and fill in the struct thread_user_mode_rec
84	 */
85	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
86	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
87	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
88
89	/*
90	 * Save kern sp in x19
91	 * Switch to SP_EL1
92	 */
93	mov	x19, sp
94	msr	spsel, #1
95
96	/*
97	 * Save the kernel stack pointer in the thread context
98	 */
99	/* get pointer to current thread context */
100	get_thread_ctx sp, 21, 20, 22
101	/*
102	 * Save kernel stack pointer to ensure that el0_svc() uses
103	 * correct stack pointer
104	 */
105	str	x19, [x21, #THREAD_CTX_KERN_SP]
106
107	/*
108	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
109	 */
110	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
111	msr	sp_el0, x1
112	msr	elr_el1, x2
113	msr	spsr_el1, x3
114
115	/*
116	 * Save the values for x0 and x1 in struct thread_core_local to be
117	 * restored later just before the eret.
118	 */
119	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
120	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
121
122	/* Load the rest of the general purpose registers */
123	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
124
125	/* Jump into user mode */
126	b eret_to_el0
127END_FUNC __thread_enter_user_mode
128DECLARE_KEEP_PAGER __thread_enter_user_mode
129
130/*
131 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
132 * 		uint32_t exit_status1);
133 * See description in thread.h
134 */
135FUNC thread_unwind_user_mode , :
136	/* Store the exit status */
137	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
138	str	w1, [x4]
139	str	w2, [x5]
140	/* Save x19..x30 */
141	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
142	/* Restore x19..x30 */
143	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
144	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
145	/* Return from the call of thread_enter_user_mode() */
146	ret
147END_FUNC thread_unwind_user_mode
148
149	/*
150	 * This macro verifies that the a given vector doesn't exceed the
151	 * architectural limit of 32 instructions. This is meant to be placed
152	 * immedately after the last instruction in the vector. It takes the
153	 * vector entry as the parameter
154	 */
155	.macro check_vector_size since
156	  .if (. - \since) > (32 * 4)
157	    .error "Vector exceeds 32 instructions"
158	  .endif
159	.endm
160
161	.macro restore_mapping
162#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
163		/* Temporarily save x0, x1 */
164		msr	tpidr_el1, x0
165		msr	tpidrro_el0, x1
166
167		/* Update the mapping to use the full kernel mapping */
168		mrs	x0, ttbr0_el1
169		sub	x0, x0, #CORE_MMU_L1_TBL_OFFSET
170		/* switch to kernel mode ASID */
171		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
172		msr	ttbr0_el1, x0
173		isb
174
175		/* Jump into the full mapping and continue execution */
176		ldr	x0, =1f
177		br	x0
178	1:
179
180		/* Point to the vector into the full mapping */
181		adr_l	x0, thread_user_kcode_offset
182		ldr	x0, [x0]
183		mrs	x1, vbar_el1
184		add	x1, x1, x0
185		msr	vbar_el1, x1
186		isb
187
188#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
189		/*
190		 * Update the SP with thread_user_kdata_sp_offset as
191		 * described in init_user_kcode().
192		 */
193		adr_l	x0, thread_user_kdata_sp_offset
194		ldr	x0, [x0]
195		add	sp, sp, x0
196#endif
197
198		/* Restore x0, x1 */
199		mrs	x0, tpidr_el1
200		mrs	x1, tpidrro_el0
201		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
202#else
203		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
204		mrs	x0, ttbr0_el1
205		/* switch to kernel mode ASID */
206		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
207		msr	ttbr0_el1, x0
208		isb
209#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
210	.endm
211
212#define INV_INSN	0
213	.align	11, INV_INSN
214FUNC thread_excp_vect , :
215	/* -----------------------------------------------------
216	 * EL1 with SP0 : 0x0 - 0x180
217	 * -----------------------------------------------------
218	 */
219	.align	7, INV_INSN
220el1_sync_sp0:
221	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
222	b	el1_sync_abort
223	check_vector_size el1_sync_sp0
224
225	.align	7, INV_INSN
226el1_irq_sp0:
227	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
228	b	elx_irq
229	check_vector_size el1_irq_sp0
230
231	.align	7, INV_INSN
232el1_fiq_sp0:
233	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
234	b	elx_fiq
235	check_vector_size el1_fiq_sp0
236
237	.align	7, INV_INSN
238el1_serror_sp0:
239	b	el1_serror_sp0
240	check_vector_size el1_serror_sp0
241
242	/* -----------------------------------------------------
243	 * Current EL with SP1: 0x200 - 0x380
244	 * -----------------------------------------------------
245	 */
246	.align	7, INV_INSN
247el1_sync_sp1:
248	b	el1_sync_sp1
249	check_vector_size el1_sync_sp1
250
251	.align	7, INV_INSN
252el1_irq_sp1:
253	b	el1_irq_sp1
254	check_vector_size el1_irq_sp1
255
256	.align	7, INV_INSN
257el1_fiq_sp1:
258	b	el1_fiq_sp1
259	check_vector_size el1_fiq_sp1
260
261	.align	7, INV_INSN
262el1_serror_sp1:
263	b	el1_serror_sp1
264	check_vector_size el1_serror_sp1
265
266	/* -----------------------------------------------------
267	 * Lower EL using AArch64 : 0x400 - 0x580
268	 * -----------------------------------------------------
269	 */
270	.align	7, INV_INSN
271el0_sync_a64:
272	restore_mapping
273
274	mrs	x2, esr_el1
275	mrs	x3, sp_el0
276	lsr	x2, x2, #ESR_EC_SHIFT
277	cmp	x2, #ESR_EC_AARCH64_SVC
278	b.eq	el0_svc
279	b	el0_sync_abort
280	check_vector_size el0_sync_a64
281
282	.align	7, INV_INSN
283el0_irq_a64:
284	restore_mapping
285
286	b	elx_irq
287	check_vector_size el0_irq_a64
288
289	.align	7, INV_INSN
290el0_fiq_a64:
291	restore_mapping
292
293	b	elx_fiq
294	check_vector_size el0_fiq_a64
295
296	.align	7, INV_INSN
297el0_serror_a64:
298	b   	el0_serror_a64
299	check_vector_size el0_serror_a64
300
301	/* -----------------------------------------------------
302	 * Lower EL using AArch32 : 0x0 - 0x180
303	 * -----------------------------------------------------
304	 */
305	.align	7, INV_INSN
306el0_sync_a32:
307	restore_mapping
308
309	mrs	x2, esr_el1
310	mrs	x3, sp_el0
311	lsr	x2, x2, #ESR_EC_SHIFT
312	cmp	x2, #ESR_EC_AARCH32_SVC
313	b.eq	el0_svc
314	b	el0_sync_abort
315	check_vector_size el0_sync_a32
316
317	.align	7, INV_INSN
318el0_irq_a32:
319	restore_mapping
320
321	b	elx_irq
322	check_vector_size el0_irq_a32
323
324	.align	7, INV_INSN
325el0_fiq_a32:
326	restore_mapping
327
328	b	elx_fiq
329	check_vector_size el0_fiq_a32
330
331	.align	7, INV_INSN
332el0_serror_a32:
333	b	el0_serror_a32
334	check_vector_size el0_serror_a32
335
336#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
337	.macro invalidate_branch_predictor
338		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
339		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
340		smc	#0
341		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
342	.endm
343
344	.align	11, INV_INSN
345	.global thread_excp_vect_workaround
346thread_excp_vect_workaround:
347	/* -----------------------------------------------------
348	 * EL1 with SP0 : 0x0 - 0x180
349	 * -----------------------------------------------------
350	 */
351	.align	7, INV_INSN
352workaround_el1_sync_sp0:
353	b	el1_sync_sp0
354	check_vector_size workaround_el1_sync_sp0
355
356	.align	7, INV_INSN
357workaround_el1_irq_sp0:
358	b	el1_irq_sp0
359	check_vector_size workaround_el1_irq_sp0
360
361	.align	7, INV_INSN
362workaround_el1_fiq_sp0:
363	b	el1_fiq_sp0
364	check_vector_size workaround_el1_fiq_sp0
365
366	.align	7, INV_INSN
367workaround_el1_serror_sp0:
368	b	el1_serror_sp0
369	check_vector_size workaround_el1_serror_sp0
370
371	/* -----------------------------------------------------
372	 * Current EL with SP1: 0x200 - 0x380
373	 * -----------------------------------------------------
374	 */
375	.align	7, INV_INSN
376workaround_el1_sync_sp1:
377	b	workaround_el1_sync_sp1
378	check_vector_size workaround_el1_sync_sp1
379
380	.align	7, INV_INSN
381workaround_el1_irq_sp1:
382	b	workaround_el1_irq_sp1
383	check_vector_size workaround_el1_irq_sp1
384
385	.align	7, INV_INSN
386workaround_el1_fiq_sp1:
387	b	workaround_el1_fiq_sp1
388	check_vector_size workaround_el1_fiq_sp1
389
390	.align	7, INV_INSN
391workaround_el1_serror_sp1:
392	b	workaround_el1_serror_sp1
393	check_vector_size workaround_el1_serror_sp1
394
395	/* -----------------------------------------------------
396	 * Lower EL using AArch64 : 0x400 - 0x580
397	 * -----------------------------------------------------
398	 */
399	.align	7, INV_INSN
400workaround_el0_sync_a64:
401	invalidate_branch_predictor
402	b	el0_sync_a64
403	check_vector_size workaround_el0_sync_a64
404
405	.align	7, INV_INSN
406workaround_el0_irq_a64:
407	invalidate_branch_predictor
408	b	el0_irq_a64
409	check_vector_size workaround_el0_irq_a64
410
411	.align	7, INV_INSN
412workaround_el0_fiq_a64:
413	invalidate_branch_predictor
414	b	el0_fiq_a64
415	check_vector_size workaround_el0_fiq_a64
416
417	.align	7, INV_INSN
418workaround_el0_serror_a64:
419	b   	workaround_el0_serror_a64
420	check_vector_size workaround_el0_serror_a64
421
422	/* -----------------------------------------------------
423	 * Lower EL using AArch32 : 0x0 - 0x180
424	 * -----------------------------------------------------
425	 */
426	.align	7, INV_INSN
427workaround_el0_sync_a32:
428	invalidate_branch_predictor
429	b	el0_sync_a32
430	check_vector_size workaround_el0_sync_a32
431
432	.align	7, INV_INSN
433workaround_el0_irq_a32:
434	invalidate_branch_predictor
435	b	el0_irq_a32
436	check_vector_size workaround_el0_irq_a32
437
438	.align	7, INV_INSN
439workaround_el0_fiq_a32:
440	invalidate_branch_predictor
441	b	el0_fiq_a32
442	check_vector_size workaround_el0_fiq_a32
443
444	.align	7, INV_INSN
445workaround_el0_serror_a32:
446	b	workaround_el0_serror_a32
447	check_vector_size workaround_el0_serror_a32
448#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
449
450/*
451 * We're keeping this code in the same section as the vector to make sure
452 * that it's always available.
453 */
454eret_to_el0:
455
456#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
457	/* Point to the vector into the reduced mapping */
458	adr_l	x0, thread_user_kcode_offset
459	ldr	x0, [x0]
460	mrs	x1, vbar_el1
461	sub	x1, x1, x0
462	msr	vbar_el1, x1
463	isb
464
465#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
466	/* Store the SP offset in tpidr_el1 to be used below to update SP */
467	adr_l	x1, thread_user_kdata_sp_offset
468	ldr	x1, [x1]
469	msr	tpidr_el1, x1
470#endif
471
472	/* Jump into the reduced mapping and continue execution */
473	ldr	x1, =1f
474	sub	x1, x1, x0
475	br	x1
4761:
477
478	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
479	msr	tpidrro_el0, x0
480
481	/* Update the mapping to exclude the full kernel mapping */
482	mrs	x0, ttbr0_el1
483	add	x0, x0, #CORE_MMU_L1_TBL_OFFSET
484	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
485	msr	ttbr0_el1, x0
486	isb
487
488#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
489	/*
490	 * Update the SP with thread_user_kdata_sp_offset as described in
491	 * init_user_kcode().
492	 */
493	mrs	x0, tpidr_el1
494	sub	sp, sp, x0
495#endif
496
497	mrs	x0, tpidrro_el0
498#else
499	mrs	x0, ttbr0_el1
500	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
501	msr	ttbr0_el1, x0
502	isb
503	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
504#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
505
506	return_from_exception
507
508	/*
509	 * void icache_inv_user_range(void *addr, size_t size);
510	 *
511	 * This function has to execute with the user space ASID active,
512	 * this means executing with reduced mapping and the code needs
513	 * to be located here together with the vector.
514	 */
515	.global icache_inv_user_range
516	.type icache_inv_user_range , %function
517icache_inv_user_range:
518	/* Mask all exceptions */
519	mrs	x6, daif	/* this register must be preserved */
520	msr	daifset, #DAIFBIT_ALL
521
522#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
523	/* Point to the vector into the reduced mapping */
524	adr_l	x2, thread_user_kcode_offset
525	ldr	x2, [x2]
526	mrs	x4, vbar_el1	/* this register must be preserved */
527	sub	x3, x4, x2
528	msr	vbar_el1, x3
529	isb
530
531	/* Jump into the reduced mapping and continue execution */
532	ldr	x3, =1f
533	sub	x3, x3, x2
534	br	x3
5351:
536
537	/* Update the mapping to exclude the full kernel mapping */
538	mrs	x5, ttbr0_el1	/* this register must be preserved */
539	add	x2, x5, #CORE_MMU_L1_TBL_OFFSET
540	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
541	msr	ttbr0_el1, x2
542	isb
543
544#else
545	mrs	x5, ttbr0_el1	/* this register must be preserved */
546	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
547	msr	ttbr0_el1, x2
548	isb
549#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
550
551	/*
552	 * Do the actual icache invalidation
553	 */
554
555	/* Calculate minimum icache line size, result in x2 */
556	mrs	x3, ctr_el0
557	and	x3, x3, #CTR_IMINLINE_MASK
558	mov	x2, #CTR_WORD_SIZE
559	lsl	x2, x2, x3
560
561	add	x1, x0, x1
562	sub	x3, x2, #1
563	bic	x0, x0, x3
5641:
565	ic	ivau, x0
566	add	x0, x0, x2
567	cmp	x0, x1
568	b.lo    1b
569	dsb	ish
570
571#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
572	/* Update the mapping to use the full kernel mapping and ASID */
573	msr	ttbr0_el1, x5
574	isb
575
576	/* Jump into the full mapping and continue execution */
577	ldr	x0, =1f
578	br	x0
5791:
580
581	/* Point to the vector into the full mapping */
582	msr	vbar_el1, x4
583	isb
584#else
585	/* switch to kernel mode ASID */
586	msr	ttbr0_el1, x5
587	isb
588#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
589
590	msr	daif, x6	/* restore exceptions */
591	ret	/* End of icache_inv_user_range() */
592
593	/*
594	 * Make sure that literals are placed before the
595	 * thread_excp_vect_end label.
596	 */
597	.pool
598	.global thread_excp_vect_end
599thread_excp_vect_end:
600END_FUNC thread_excp_vect
601
602LOCAL_FUNC el0_svc , :
603	/* get pointer to current thread context in x0 */
604	get_thread_ctx sp, 0, 1, 2
605	mrs	x1, tpidr_el0
606	str	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
607	/* load saved kernel sp */
608	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
609	/* Keep pointer to initial recod in x1 */
610	mov	x1, sp
611	/* Switch to SP_EL0 and restore kernel sp */
612	msr	spsel, #0
613	mov	x2, sp	/* Save SP_EL0 */
614	mov	sp, x0
615
616	/* Make room for struct thread_svc_regs */
617	sub	sp, sp, #THREAD_SVC_REG_SIZE
618	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
619
620	/* Restore x0-x3 */
621	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
622	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
623
624	/* Prepare the argument for the handler */
625	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
626	mrs	x0, elr_el1
627	mrs	x1, spsr_el1
628	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
629	mov	x0, sp
630
631	/*
632	 * Unmask native interrupts, Serror, and debug exceptions since we have
633	 * nothing left in sp_el1. Note that the SVC handler is excepted to
634	 * re-enable foreign interrupts by itself.
635	 */
636#if defined(CFG_ARM_GICV3)
637	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
638#else
639	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
640#endif
641
642	/* Call the handler */
643	bl	thread_svc_handler
644
645	/* Mask all maskable exceptions since we're switching back to sp_el1 */
646	msr	daifset, #DAIFBIT_ALL
647
648	/*
649	 * Save kernel sp we'll had at the beginning of this function.
650	 * This is when this TA has called another TA because
651	 * __thread_enter_user_mode() also saves the stack pointer in this
652	 * field.
653	 */
654	msr	spsel, #1
655	get_thread_ctx sp, 0, 1, 2
656	msr	spsel, #0
657	add	x1, sp, #THREAD_SVC_REG_SIZE
658	str	x1, [x0, #THREAD_CTX_KERN_SP]
659
660	/* Restore registers to the required state and return*/
661	ldr	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
662	msr	tpidr_el0, x1
663	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
664	msr	elr_el1, x0
665	msr	spsr_el1, x1
666	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
667	mov	x30, sp
668	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
669	mov	sp, x0
670	b_if_spsr_is_el0 w1, 1f
671	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
672	ldr	x30, [x30, #THREAD_SVC_REG_X30]
673
674	return_from_exception
675
6761:	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
677	ldr	x30, [x30, #THREAD_SVC_REG_X30]
678
679	msr	spsel, #1
680	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
681	b	eret_to_el0
682END_FUNC el0_svc
683
684LOCAL_FUNC el1_sync_abort , :
685	mov	x0, sp
686	msr	spsel, #0
687	mov	x3, sp		/* Save original sp */
688
689	/*
690	 * Update core local flags.
691	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
692	 */
693	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
694	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
695	orr	w1, w1, #THREAD_CLF_ABORT
696	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
697			.Lsel_tmp_sp
698
699	/* Select abort stack */
700	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
701	b	.Lset_sp
702
703.Lsel_tmp_sp:
704	/* Select tmp stack */
705	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
706	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
707
708.Lset_sp:
709	mov	sp, x2
710	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
711
712	/*
713	 * Save state on stack
714	 */
715	sub	sp, sp, #THREAD_ABT_REGS_SIZE
716	mrs	x2, spsr_el1
717	/* Store spsr, sp_el0 */
718	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
719	/* Store original x0, x1 */
720	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
721	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
722	/* Store original x2, x3 and x4 to x29 */
723	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
724	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
725	/* Store x30, elr_el1 */
726	mrs	x0, elr_el1
727	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
728
729	/*
730	 * Call handler
731	 */
732	mov	x0, #0
733	mov	x1, sp
734	bl	abort_handler
735
736	/*
737	 * Restore state from stack
738	 */
739	/* Load x30, elr_el1 */
740	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
741	msr	elr_el1, x0
742	/* Load x0 to x29 */
743	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
744	/* Switch to SP_EL1 */
745	msr	spsel, #1
746	/* Save x0 to x3 in CORE_LOCAL */
747	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
748	/* Restore spsr_el1 and sp_el0 */
749	mrs	x3, sp_el0
750	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
751	msr	spsr_el1, x0
752	msr	sp_el0, x1
753
754	/* Update core local flags */
755	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
756	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
757	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
758
759	/* Restore x0 to x3 */
760	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
761
762	/* Return from exception */
763	return_from_exception
764END_FUNC el1_sync_abort
765
766	/* sp_el0 in x3 */
767LOCAL_FUNC el0_sync_abort , :
768	/*
769	 * Update core local flags
770	 */
771	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
772	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
773	orr	w1, w1, #THREAD_CLF_ABORT
774	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
775
776	/*
777	 * Save state on stack
778	 */
779
780	/* load abt_stack_va_end */
781	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
782	/* Keep pointer to initial record in x0 */
783	mov	x0, sp
784	/* Switch to SP_EL0 */
785	msr	spsel, #0
786	mov	sp, x1
787	sub	sp, sp, #THREAD_ABT_REGS_SIZE
788	mrs	x2, spsr_el1
789	/* Store spsr, sp_el0 */
790	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
791	/* Store original x0, x1 */
792	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
793	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
794	/* Store original x2, x3 and x4 to x29 */
795	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
796	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
797	/* Store x30, elr_el1 */
798	mrs	x0, elr_el1
799	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
800
801	/*
802	 * Call handler
803	 */
804	mov	x0, #0
805	mov	x1, sp
806	bl	abort_handler
807
808	/*
809	 * Restore state from stack
810	 */
811
812	/* Load x30, elr_el1 */
813	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
814	msr	elr_el1, x0
815	/* Load x0 to x29 */
816	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
817	/* Switch to SP_EL1 */
818	msr	spsel, #1
819	/* Save x0 to x3 in EL1_REC */
820	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
821	/* Restore spsr_el1 and sp_el0 */
822	mrs	x3, sp_el0
823	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
824	msr	spsr_el1, x0
825	msr	sp_el0, x1
826
827	/* Update core local flags */
828	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
829	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
830	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
831
832	/* Restore x2 to x3 */
833	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
834
835	b_if_spsr_is_el0 w0, 1f
836
837	/* Restore x0 to x1 */
838	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
839
840	/* Return from exception */
841	return_from_exception
8421:	b	eret_to_el0
843END_FUNC el0_sync_abort
844
845/* The handler of foreign interrupt. */
846.macro foreign_intr_handler mode:req
847	/*
848	 * Update core local flags
849	 */
850	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
851	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
852	orr	w1, w1, #THREAD_CLF_TMP
853	.ifc	\mode\(),fiq
854	orr	w1, w1, #THREAD_CLF_FIQ
855	.else
856	orr	w1, w1, #THREAD_CLF_IRQ
857	.endif
858	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
859
860	/* get pointer to current thread context in x0 */
861	get_thread_ctx sp, 0, 1, 2
862	/* Keep original SP_EL0 */
863	mrs	x2, sp_el0
864
865	/* Store original sp_el0 */
866	str	x2, [x0, #THREAD_CTX_REGS_SP]
867	/* Store tpidr_el0 */
868	mrs	x2, tpidr_el0
869	str	x2, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
870	/* Store x4..x30 */
871	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
872	/* Load original x0..x3 into x10..x13 */
873	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
874	/* Save original x0..x3 */
875	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
876
877	/* load tmp_stack_va_end */
878	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
879	/* Switch to SP_EL0 */
880	msr	spsel, #0
881	mov	sp, x1
882
883#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
884	/*
885	 * Prevent leaking information about which entries has been used in
886	 * cache. We're relying on the dispatcher in TF-A to take care of
887	 * the BTB.
888	 */
889	mov	x0, #DCACHE_OP_CLEAN_INV
890	bl	dcache_op_louis
891	ic	iallu
892#endif
893	/*
894	 * Mark current thread as suspended
895	 */
896	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
897	mrs	x1, spsr_el1
898	mrs	x2, elr_el1
899	bl	thread_state_suspend
900
901	/* Update core local flags */
902	/* Switch to SP_EL1 */
903	msr	spsel, #1
904	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
905	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
906	orr	w1, w1, #THREAD_CLF_TMP
907	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
908	msr	spsel, #0
909
910	/*
911	 * Note that we're exiting with SP_EL0 selected since the entry
912	 * functions expects to have SP_EL0 selected with the tmp stack
913	 * set.
914	 */
915
916	/* Passing thread index in w0 */
917	b	thread_foreign_intr_exit
918.endm
919
920/*
921 * This struct is never used from C it's only here to visualize the
922 * layout.
923 *
924 * struct elx_nintr_rec {
925 * 	uint64_t x[19 - 4]; x4..x18
926 * 	uint64_t lr;
927 * 	uint64_t sp_el0;
928 * };
929 */
930#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
931#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
932#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
933#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
934
935/* The handler of native interrupt. */
936.macro native_intr_handler mode:req
937	/*
938	 * Update core local flags
939	 */
940	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
941	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
942	.ifc	\mode\(),fiq
943	orr	w1, w1, #THREAD_CLF_FIQ
944	.else
945	orr	w1, w1, #THREAD_CLF_IRQ
946	.endif
947	orr	w1, w1, #THREAD_CLF_TMP
948	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
949
950	/* load tmp_stack_va_end */
951	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
952	/* Keep original SP_EL0 */
953	mrs	x2, sp_el0
954	/* Switch to SP_EL0 */
955	msr	spsel, #0
956	mov	sp, x1
957
958	/*
959	 * Save registers on stack that can be corrupted by a call to
960	 * a C function
961	 */
962	/* Make room for struct elx_nintr_rec */
963	sub	sp, sp, #ELX_NINTR_REC_SIZE
964	/* Store x4..x18 */
965	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
966	/* Store lr and original sp_el0 */
967	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
968
969	bl	thread_check_canaries
970	bl	itr_core_handler
971
972	/*
973	 * Restore registers
974	 */
975	/* Restore x4..x18 */
976	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
977	/* Load  lr and original sp_el0 */
978	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
979	/* Restore SP_El0 */
980	mov	sp, x2
981	/* Switch back to SP_EL1 */
982	msr	spsel, #1
983
984	/* Update core local flags */
985	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
986	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
987	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
988
989	mrs	x0, spsr_el1
990	/* Restore x2..x3 */
991	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
992	b_if_spsr_is_el0 w0, 1f
993
994	/* Restore x0..x1 */
995	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
996
997	/* Return from exception */
998	return_from_exception
9991:	b	eret_to_el0
1000.endm
1001
1002LOCAL_FUNC elx_irq , :
1003#if defined(CFG_ARM_GICV3)
1004	native_intr_handler	irq
1005#else
1006	foreign_intr_handler	irq
1007#endif
1008END_FUNC elx_irq
1009
1010LOCAL_FUNC elx_fiq , :
1011#if defined(CFG_ARM_GICV3)
1012	foreign_intr_handler	fiq
1013#else
1014	native_intr_handler	fiq
1015#endif
1016END_FUNC elx_fiq
1017