xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision 997ff82731597ddcf8d6ad0fb3301adca8c0c6a8)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2020, Linaro Limited
4 */
5
6#include <arm64_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread.h>
13#include <kernel/thread_private.h>
14#include <mm/core_mmu.h>
15#include <smccc.h>
16
17	.macro get_thread_ctx core_local, res, tmp0, tmp1
18		ldrh	w\tmp0, [\core_local, \
19				#THREAD_CORE_LOCAL_CURR_THREAD]
20		ldr	x\res, =threads
21		mov	x\tmp1, #THREAD_CTX_SIZE
22		madd	x\res, x\tmp0, x\tmp1, x\res
23	.endm
24
25	.macro return_from_exception
26		eret
27		/* Guard against speculation past ERET */
28		dsb nsh
29		isb
30	.endm
31
32	.macro b_if_spsr_is_el0 reg, label
33		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
34		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
35		b.eq	\label
36	.endm
37
38	.macro disable_pauth reg
39#ifdef	CFG_TA_PAUTH
40		mrs	\reg, sctlr_el1
41		bic     \reg, \reg, #SCTLR_ENIA
42#ifdef CFG_TA_BTI
43		orr     \reg, \reg, #(SCTLR_BT0 | SCTLR_BT1)
44#endif
45		msr	sctlr_el1, \reg
46#endif
47	.endm
48
49	.macro enable_pauth reg
50#ifdef	CFG_TA_PAUTH
51		mrs	\reg, sctlr_el1
52		orr     \reg, \reg, #SCTLR_ENIA
53#ifdef CFG_TA_BTI
54		bic     \reg, \reg, #(SCTLR_BT0 | SCTLR_BT1)
55#endif
56		msr	sctlr_el1, \reg
57#endif
58	.endm
59
60/* void thread_resume(struct thread_ctx_regs *regs) */
61FUNC thread_resume , :
62	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
63	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
64	mov	sp, x1
65	msr	elr_el1, x2
66	msr	spsr_el1, x3
67	ldr	x1, [x0, THREAD_CTX_REGS_TPIDR_EL0]
68	msr	tpidr_el0, x1
69
70	b_if_spsr_is_el0 w3, 1f
71
72	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
73	ldr	x0, [x0, THREAD_CTX_REGS_X0]
74	return_from_exception
75
761:
77#ifdef	CFG_TA_PAUTH
78	/* Restore PAC keys before return to el0 */
79	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
80	write_apiakeyhi	x1
81	write_apiakeylo	x2
82#endif
83
84	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
85	ldr	x0, [x0, THREAD_CTX_REGS_X0]
86
87	msr	spsel, #1
88	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
89	b	eret_to_el0
90END_FUNC thread_resume
91
92FUNC thread_smc , :
93	smc	#0
94	ret
95END_FUNC thread_smc
96
97/* void thread_smccc(struct thread_smc_args *arg_res) */
98FUNC thread_smccc , :
99	push	x0, xzr
100	mov	x8, x0
101	load_xregs x8, 0, 0, 7
102#ifdef CFG_CORE_SEL2_SPMC
103	hvc	#0
104#else
105	smc	#0
106#endif
107	pop	x8, xzr
108	store_xregs x8, 0, 0, 7
109	ret
110END_FUNC thread_smccc
111
112FUNC thread_init_vbar , :
113	msr	vbar_el1, x0
114	ret
115END_FUNC thread_init_vbar
116DECLARE_KEEP_PAGER thread_init_vbar
117
118/*
119 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
120 *				     uint32_t *exit_status0,
121 *				     uint32_t *exit_status1);
122 *
123 * This function depends on being called with exceptions masked.
124 */
125FUNC __thread_enter_user_mode , :
126	/*
127	 * Create the and fill in the struct thread_user_mode_rec
128	 */
129	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
130	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
131	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
132
133	/*
134	 * Save kern sp in x19
135	 * Switch to SP_EL1
136	 */
137	mov	x19, sp
138	msr	spsel, #1
139
140	/*
141	 * Save the kernel stack pointer in the thread context
142	 */
143	/* get pointer to current thread context */
144	get_thread_ctx sp, 21, 20, 22
145	/*
146	 * Save kernel stack pointer to ensure that el0_svc() uses
147	 * correct stack pointer
148	 */
149	str	x19, [x21, #THREAD_CTX_KERN_SP]
150
151	/*
152	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
153	 */
154	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
155	msr	sp_el0, x1
156	msr	elr_el1, x2
157	msr	spsr_el1, x3
158
159#ifdef	CFG_TA_PAUTH
160	/* Load APIAKEY */
161	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
162	write_apiakeyhi	x1
163	write_apiakeylo	x2
164#endif
165
166	/*
167	 * Save the values for x0 and x1 in struct thread_core_local to be
168	 * restored later just before the eret.
169	 */
170	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
171	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
172
173	/* Load the rest of the general purpose registers */
174	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
175
176	/* Jump into user mode */
177	b eret_to_el0
178END_FUNC __thread_enter_user_mode
179DECLARE_KEEP_PAGER __thread_enter_user_mode
180
181/*
182 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
183 * 		uint32_t exit_status1);
184 * See description in thread.h
185 */
186FUNC thread_unwind_user_mode , :
187	/* Store the exit status */
188	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
189	str	w1, [x4]
190	str	w2, [x5]
191	/* Save x19..x30 */
192	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
193	/* Restore x19..x30 */
194	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
195	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
196	/* Return from the call of thread_enter_user_mode() */
197	ret
198END_FUNC thread_unwind_user_mode
199
200	/*
201	 * This macro verifies that the a given vector doesn't exceed the
202	 * architectural limit of 32 instructions. This is meant to be placed
203	 * immedately after the last instruction in the vector. It takes the
204	 * vector entry as the parameter
205	 */
206	.macro check_vector_size since
207	  .if (. - \since) > (32 * 4)
208	    .error "Vector exceeds 32 instructions"
209	  .endif
210	.endm
211
212	.macro restore_mapping
213#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
214		/* Temporarily save x0, x1 */
215		msr	tpidr_el1, x0
216		msr	tpidrro_el0, x1
217
218		/* Update the mapping to use the full kernel mapping */
219		mrs	x0, ttbr0_el1
220		sub	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
221		/* switch to kernel mode ASID */
222		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
223		msr	ttbr0_el1, x0
224		isb
225
226		/* Jump into the full mapping and continue execution */
227		ldr	x0, =1f
228		br	x0
229	1:
230BTI(		bti	j)
231		/* Point to the vector into the full mapping */
232		adr_l	x0, thread_user_kcode_offset
233		ldr	x0, [x0]
234		mrs	x1, vbar_el1
235		add	x1, x1, x0
236		msr	vbar_el1, x1
237		isb
238
239#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
240		/*
241		 * Update the SP with thread_user_kdata_sp_offset as
242		 * described in init_user_kcode().
243		 */
244		adr_l	x0, thread_user_kdata_sp_offset
245		ldr	x0, [x0]
246		add	sp, sp, x0
247#endif
248
249		/* Restore x0, x1 */
250		mrs	x0, tpidr_el1
251		mrs	x1, tpidrro_el0
252		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
253#else
254		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
255		mrs	x0, ttbr0_el1
256		/* switch to kernel mode ASID */
257		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
258		msr	ttbr0_el1, x0
259		isb
260#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
261	.endm
262
263#define INV_INSN	0
264FUNC thread_excp_vect , : , default, 2048, nobti
265	/* -----------------------------------------------------
266	 * EL1 with SP0 : 0x0 - 0x180
267	 * -----------------------------------------------------
268	 */
269	.balign	128, INV_INSN
270el1_sync_sp0:
271	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
272	b	el1_sync_abort
273	check_vector_size el1_sync_sp0
274
275	.balign	128, INV_INSN
276el1_irq_sp0:
277	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
278	b	elx_irq
279	check_vector_size el1_irq_sp0
280
281	.balign	128, INV_INSN
282el1_fiq_sp0:
283	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
284	b	elx_fiq
285	check_vector_size el1_fiq_sp0
286
287	.balign	128, INV_INSN
288el1_serror_sp0:
289	b	el1_serror_sp0
290	check_vector_size el1_serror_sp0
291
292	/* -----------------------------------------------------
293	 * Current EL with SP1: 0x200 - 0x380
294	 * -----------------------------------------------------
295	 */
296	.balign	128, INV_INSN
297el1_sync_sp1:
298	b	el1_sync_sp1
299	check_vector_size el1_sync_sp1
300
301	.balign	128, INV_INSN
302el1_irq_sp1:
303	b	el1_irq_sp1
304	check_vector_size el1_irq_sp1
305
306	.balign	128, INV_INSN
307el1_fiq_sp1:
308	b	el1_fiq_sp1
309	check_vector_size el1_fiq_sp1
310
311	.balign	128, INV_INSN
312el1_serror_sp1:
313	b	el1_serror_sp1
314	check_vector_size el1_serror_sp1
315
316	/* -----------------------------------------------------
317	 * Lower EL using AArch64 : 0x400 - 0x580
318	 * -----------------------------------------------------
319	 */
320	.balign	128, INV_INSN
321el0_sync_a64:
322	restore_mapping
323	/* PAuth will be disabled later else check_vector_size will fail */
324
325	mrs	x2, esr_el1
326	mrs	x3, sp_el0
327	lsr	x2, x2, #ESR_EC_SHIFT
328	cmp	x2, #ESR_EC_AARCH64_SVC
329	b.eq	el0_svc
330	b	el0_sync_abort
331	check_vector_size el0_sync_a64
332
333	.balign	128, INV_INSN
334el0_irq_a64:
335	restore_mapping
336	disable_pauth x1
337
338	b	elx_irq
339	check_vector_size el0_irq_a64
340
341	.balign	128, INV_INSN
342el0_fiq_a64:
343	restore_mapping
344	disable_pauth x1
345
346	b	elx_fiq
347	check_vector_size el0_fiq_a64
348
349	.balign	128, INV_INSN
350el0_serror_a64:
351	b   	el0_serror_a64
352	check_vector_size el0_serror_a64
353
354	/* -----------------------------------------------------
355	 * Lower EL using AArch32 : 0x0 - 0x180
356	 * -----------------------------------------------------
357	 */
358	.balign	128, INV_INSN
359el0_sync_a32:
360	restore_mapping
361
362	mrs	x2, esr_el1
363	mrs	x3, sp_el0
364	lsr	x2, x2, #ESR_EC_SHIFT
365	cmp	x2, #ESR_EC_AARCH32_SVC
366	b.eq	el0_svc
367	b	el0_sync_abort
368	check_vector_size el0_sync_a32
369
370	.balign	128, INV_INSN
371el0_irq_a32:
372	restore_mapping
373
374	b	elx_irq
375	check_vector_size el0_irq_a32
376
377	.balign	128, INV_INSN
378el0_fiq_a32:
379	restore_mapping
380
381	b	elx_fiq
382	check_vector_size el0_fiq_a32
383
384	.balign	128, INV_INSN
385el0_serror_a32:
386	b	el0_serror_a32
387	check_vector_size el0_serror_a32
388
389#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
390	.macro invalidate_branch_predictor
391		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
392		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
393		smc	#0
394		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
395	.endm
396
397	.balign	2048, INV_INSN
398	.global thread_excp_vect_workaround
399thread_excp_vect_workaround:
400	/* -----------------------------------------------------
401	 * EL1 with SP0 : 0x0 - 0x180
402	 * -----------------------------------------------------
403	 */
404	.balign	128, INV_INSN
405workaround_el1_sync_sp0:
406	b	el1_sync_sp0
407	check_vector_size workaround_el1_sync_sp0
408
409	.balign	128, INV_INSN
410workaround_el1_irq_sp0:
411	b	el1_irq_sp0
412	check_vector_size workaround_el1_irq_sp0
413
414	.balign	128, INV_INSN
415workaround_el1_fiq_sp0:
416	b	el1_fiq_sp0
417	check_vector_size workaround_el1_fiq_sp0
418
419	.balign	128, INV_INSN
420workaround_el1_serror_sp0:
421	b	el1_serror_sp0
422	check_vector_size workaround_el1_serror_sp0
423
424	/* -----------------------------------------------------
425	 * Current EL with SP1: 0x200 - 0x380
426	 * -----------------------------------------------------
427	 */
428	.balign	128, INV_INSN
429workaround_el1_sync_sp1:
430	b	workaround_el1_sync_sp1
431	check_vector_size workaround_el1_sync_sp1
432
433	.balign	128, INV_INSN
434workaround_el1_irq_sp1:
435	b	workaround_el1_irq_sp1
436	check_vector_size workaround_el1_irq_sp1
437
438	.balign	128, INV_INSN
439workaround_el1_fiq_sp1:
440	b	workaround_el1_fiq_sp1
441	check_vector_size workaround_el1_fiq_sp1
442
443	.balign	128, INV_INSN
444workaround_el1_serror_sp1:
445	b	workaround_el1_serror_sp1
446	check_vector_size workaround_el1_serror_sp1
447
448	/* -----------------------------------------------------
449	 * Lower EL using AArch64 : 0x400 - 0x580
450	 * -----------------------------------------------------
451	 */
452	.balign	128, INV_INSN
453workaround_el0_sync_a64:
454	invalidate_branch_predictor
455	b	el0_sync_a64
456	check_vector_size workaround_el0_sync_a64
457
458	.balign	128, INV_INSN
459workaround_el0_irq_a64:
460	invalidate_branch_predictor
461	b	el0_irq_a64
462	check_vector_size workaround_el0_irq_a64
463
464	.balign	128, INV_INSN
465workaround_el0_fiq_a64:
466	invalidate_branch_predictor
467	b	el0_fiq_a64
468	check_vector_size workaround_el0_fiq_a64
469
470	.balign	128, INV_INSN
471workaround_el0_serror_a64:
472	b   	workaround_el0_serror_a64
473	check_vector_size workaround_el0_serror_a64
474
475	/* -----------------------------------------------------
476	 * Lower EL using AArch32 : 0x0 - 0x180
477	 * -----------------------------------------------------
478	 */
479	.balign	128, INV_INSN
480workaround_el0_sync_a32:
481	invalidate_branch_predictor
482	b	el0_sync_a32
483	check_vector_size workaround_el0_sync_a32
484
485	.balign	128, INV_INSN
486workaround_el0_irq_a32:
487	invalidate_branch_predictor
488	b	el0_irq_a32
489	check_vector_size workaround_el0_irq_a32
490
491	.balign	128, INV_INSN
492workaround_el0_fiq_a32:
493	invalidate_branch_predictor
494	b	el0_fiq_a32
495	check_vector_size workaround_el0_fiq_a32
496
497	.balign	128, INV_INSN
498workaround_el0_serror_a32:
499	b	workaround_el0_serror_a32
500	check_vector_size workaround_el0_serror_a32
501#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
502
503/*
504 * We're keeping this code in the same section as the vector to make sure
505 * that it's always available.
506 */
507eret_to_el0:
508	enable_pauth x1
509
510#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
511	/* Point to the vector into the reduced mapping */
512	adr_l	x0, thread_user_kcode_offset
513	ldr	x0, [x0]
514	mrs	x1, vbar_el1
515	sub	x1, x1, x0
516	msr	vbar_el1, x1
517	isb
518
519#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
520	/* Store the SP offset in tpidr_el1 to be used below to update SP */
521	adr_l	x1, thread_user_kdata_sp_offset
522	ldr	x1, [x1]
523	msr	tpidr_el1, x1
524#endif
525
526	/* Jump into the reduced mapping and continue execution */
527	ldr	x1, =1f
528	sub	x1, x1, x0
529	br	x1
5301:
531BTI(	bti	j)
532	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
533	msr	tpidrro_el0, x0
534
535	/* Update the mapping to exclude the full kernel mapping */
536	mrs	x0, ttbr0_el1
537	add	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
538	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
539	msr	ttbr0_el1, x0
540	isb
541
542#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
543	/*
544	 * Update the SP with thread_user_kdata_sp_offset as described in
545	 * init_user_kcode().
546	 */
547	mrs	x0, tpidr_el1
548	sub	sp, sp, x0
549#endif
550
551	mrs	x0, tpidrro_el0
552#else
553	mrs	x0, ttbr0_el1
554	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
555	msr	ttbr0_el1, x0
556	isb
557	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
558#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
559
560	return_from_exception
561
562	/*
563	 * void icache_inv_user_range(void *addr, size_t size);
564	 *
565	 * This function has to execute with the user space ASID active,
566	 * this means executing with reduced mapping and the code needs
567	 * to be located here together with the vector.
568	 */
569	.global icache_inv_user_range
570	.type icache_inv_user_range , %function
571icache_inv_user_range:
572	/* Mask all exceptions */
573	mrs	x6, daif	/* this register must be preserved */
574	msr	daifset, #DAIFBIT_ALL
575
576#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
577	/* Point to the vector into the reduced mapping */
578	adr_l	x2, thread_user_kcode_offset
579	ldr	x2, [x2]
580	mrs	x4, vbar_el1	/* this register must be preserved */
581	sub	x3, x4, x2
582	msr	vbar_el1, x3
583	isb
584
585	/* Jump into the reduced mapping and continue execution */
586	ldr	x3, =1f
587	sub	x3, x3, x2
588	br	x3
5891:
590BTI(	bti	j)
591	/* Update the mapping to exclude the full kernel mapping */
592	mrs	x5, ttbr0_el1	/* this register must be preserved */
593	add	x2, x5, #CORE_MMU_BASE_TABLE_OFFSET
594	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
595	msr	ttbr0_el1, x2
596	isb
597
598#else
599	mrs	x5, ttbr0_el1	/* this register must be preserved */
600	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
601	msr	ttbr0_el1, x2
602	isb
603#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
604
605	/*
606	 * Do the actual icache invalidation
607	 */
608
609	/* Calculate minimum icache line size, result in x2 */
610	mrs	x3, ctr_el0
611	and	x3, x3, #CTR_IMINLINE_MASK
612	mov	x2, #CTR_WORD_SIZE
613	lsl	x2, x2, x3
614
615	add	x1, x0, x1
616	sub	x3, x2, #1
617	bic	x0, x0, x3
6181:
619	ic	ivau, x0
620	add	x0, x0, x2
621	cmp	x0, x1
622	b.lo    1b
623	dsb	ish
624
625#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
626	/* Update the mapping to use the full kernel mapping and ASID */
627	msr	ttbr0_el1, x5
628	isb
629
630	/* Jump into the full mapping and continue execution */
631	ldr	x0, =1f
632	br	x0
6331:
634BTI(	bti	j)
635	/* Point to the vector into the full mapping */
636	msr	vbar_el1, x4
637	isb
638#else
639	/* switch to kernel mode ASID */
640	msr	ttbr0_el1, x5
641	isb
642#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
643
644	msr	daif, x6	/* restore exceptions */
645	ret	/* End of icache_inv_user_range() */
646
647	/*
648	 * Make sure that literals are placed before the
649	 * thread_excp_vect_end label.
650	 */
651	.pool
652	.global thread_excp_vect_end
653thread_excp_vect_end:
654END_FUNC thread_excp_vect
655
656LOCAL_FUNC el0_svc , :
657	disable_pauth x1
658	/* get pointer to current thread context in x0 */
659	get_thread_ctx sp, 0, 1, 2
660	mrs	x1, tpidr_el0
661	str	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
662	/* load saved kernel sp */
663	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
664	/* Keep pointer to initial recod in x1 */
665	mov	x1, sp
666	/* Switch to SP_EL0 and restore kernel sp */
667	msr	spsel, #0
668	mov	x2, sp	/* Save SP_EL0 */
669	mov	sp, x0
670
671	/* Make room for struct thread_svc_regs */
672	sub	sp, sp, #THREAD_SVC_REG_SIZE
673	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
674
675	/* Restore x0-x3 */
676	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
677	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
678
679	/* Prepare the argument for the handler */
680	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
681	mrs	x0, elr_el1
682	mrs	x1, spsr_el1
683	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
684
685#ifdef CFG_TA_PAUTH
686	/* Save APIAKEY */
687	read_apiakeyhi	x0
688	read_apiakeylo	x1
689	store_xregs sp, THREAD_SVC_REG_APIAKEY_HI, 0, 1
690#endif
691
692	mov	x0, sp
693
694	/*
695	 * Unmask native interrupts, Serror, and debug exceptions since we have
696	 * nothing left in sp_el1. Note that the SVC handler is excepted to
697	 * re-enable foreign interrupts by itself.
698	 */
699#if defined(CFG_ARM_GICV3)
700	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
701#else
702	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
703#endif
704
705	/* Call the handler */
706	bl	thread_svc_handler
707
708	/* Mask all maskable exceptions since we're switching back to sp_el1 */
709	msr	daifset, #DAIFBIT_ALL
710
711	/*
712	 * Save kernel sp we'll had at the beginning of this function.
713	 * This is when this TA has called another TA because
714	 * __thread_enter_user_mode() also saves the stack pointer in this
715	 * field.
716	 */
717	msr	spsel, #1
718	get_thread_ctx sp, 0, 1, 2
719	msr	spsel, #0
720	add	x1, sp, #THREAD_SVC_REG_SIZE
721	str	x1, [x0, #THREAD_CTX_KERN_SP]
722
723	/* Restore registers to the required state and return*/
724	ldr	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
725	msr	tpidr_el0, x1
726	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
727	msr	elr_el1, x0
728	msr	spsr_el1, x1
729	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
730	mov	x30, sp
731	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
732	mov	sp, x0
733	b_if_spsr_is_el0 w1, 1f
734	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
735	ldr	x30, [x30, #THREAD_SVC_REG_X30]
736
737	return_from_exception
738
7391:
740#ifdef	CFG_TA_PAUTH
741	/* Restore APIAKEY */
742	load_xregs x30, THREAD_SVC_REG_APIAKEY_HI, 0, 1
743	write_apiakeyhi	x0
744	write_apiakeylo	x1
745#endif
746
747	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
748	ldr	x30, [x30, #THREAD_SVC_REG_X30]
749
750	msr	spsel, #1
751	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
752	b	eret_to_el0
753END_FUNC el0_svc
754
755LOCAL_FUNC el1_sync_abort , :
756	mov	x0, sp
757	msr	spsel, #0
758	mov	x3, sp		/* Save original sp */
759
760	/*
761	 * Update core local flags.
762	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
763	 */
764	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
765	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
766	orr	w1, w1, #THREAD_CLF_ABORT
767	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
768			.Lsel_tmp_sp
769
770	/* Select abort stack */
771	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
772	b	.Lset_sp
773
774.Lsel_tmp_sp:
775	/* Select tmp stack */
776	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
777	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
778
779.Lset_sp:
780	mov	sp, x2
781	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
782
783	/*
784	 * Save state on stack
785	 */
786	sub	sp, sp, #THREAD_ABT_REGS_SIZE
787	mrs	x2, spsr_el1
788	/* Store spsr, sp_el0 */
789	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
790	/* Store original x0, x1 */
791	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
792	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
793	/* Store original x2, x3 and x4 to x29 */
794	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
795	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
796	/* Store x30, elr_el1 */
797	mrs	x0, elr_el1
798	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
799
800	/*
801	 * Call handler
802	 */
803	mov	x0, #0
804	mov	x1, sp
805	bl	abort_handler
806
807	/*
808	 * Restore state from stack
809	 */
810	/* Load x30, elr_el1 */
811	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
812	msr	elr_el1, x0
813	/* Load x0 to x29 */
814	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
815	/* Switch to SP_EL1 */
816	msr	spsel, #1
817	/* Save x0 to x3 in CORE_LOCAL */
818	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
819	/* Restore spsr_el1 and sp_el0 */
820	mrs	x3, sp_el0
821	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
822	msr	spsr_el1, x0
823	msr	sp_el0, x1
824
825	/* Update core local flags */
826	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
827	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
828	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
829
830	/* Restore x0 to x3 */
831	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
832
833	/* Return from exception */
834	return_from_exception
835END_FUNC el1_sync_abort
836
837	/* sp_el0 in x3 */
838LOCAL_FUNC el0_sync_abort , :
839	disable_pauth x1
840	/*
841	 * Update core local flags
842	 */
843	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
844	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
845	orr	w1, w1, #THREAD_CLF_ABORT
846	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
847
848	/*
849	 * Save state on stack
850	 */
851
852	/* load abt_stack_va_end */
853	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
854	/* Keep pointer to initial record in x0 */
855	mov	x0, sp
856	/* Switch to SP_EL0 */
857	msr	spsel, #0
858	mov	sp, x1
859	sub	sp, sp, #THREAD_ABT_REGS_SIZE
860	mrs	x2, spsr_el1
861	/* Store spsr, sp_el0 */
862	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
863	/* Store original x0, x1 */
864	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
865	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
866	/* Store original x2, x3 and x4 to x29 */
867	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
868	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
869	/* Store x30, elr_el1 */
870	mrs	x0, elr_el1
871	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
872
873	/*
874	 * Call handler
875	 */
876	mov	x0, #0
877	mov	x1, sp
878	bl	abort_handler
879
880	/*
881	 * Restore state from stack
882	 */
883
884	/* Load x30, elr_el1 */
885	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
886	msr	elr_el1, x0
887	/* Load x0 to x29 */
888	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
889	/* Switch to SP_EL1 */
890	msr	spsel, #1
891	/* Save x0 to x3 in EL1_REC */
892	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
893	/* Restore spsr_el1 and sp_el0 */
894	mrs	x3, sp_el0
895	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
896	msr	spsr_el1, x0
897	msr	sp_el0, x1
898
899	/* Update core local flags */
900	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
901	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
902	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
903
904	/* Restore x2 to x3 */
905	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
906
907	b_if_spsr_is_el0 w0, 1f
908
909	/* Restore x0 to x1 */
910	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
911
912	/* Return from exception */
913	return_from_exception
9141:	b	eret_to_el0
915END_FUNC el0_sync_abort
916
917/* The handler of foreign interrupt. */
918.macro foreign_intr_handler mode:req
919	/*
920	 * Update core local flags
921	 */
922	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
923	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
924	orr	w1, w1, #THREAD_CLF_TMP
925	.ifc	\mode\(),fiq
926	orr	w1, w1, #THREAD_CLF_FIQ
927	.else
928	orr	w1, w1, #THREAD_CLF_IRQ
929	.endif
930	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
931
932	/* get pointer to current thread context in x0 */
933	get_thread_ctx sp, 0, 1, 2
934	/* Keep original SP_EL0 */
935	mrs	x2, sp_el0
936
937	/* Store original sp_el0 */
938	str	x2, [x0, #THREAD_CTX_REGS_SP]
939	/* Store tpidr_el0 */
940	mrs	x2, tpidr_el0
941	str	x2, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
942	/* Store x4..x30 */
943	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
944	/* Load original x0..x3 into x10..x13 */
945	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
946	/* Save original x0..x3 */
947	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
948
949	/* load tmp_stack_va_end */
950	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
951	/* Switch to SP_EL0 */
952	msr	spsel, #0
953	mov	sp, x1
954
955#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
956	/*
957	 * Prevent leaking information about which entries has been used in
958	 * cache. We're relying on the dispatcher in TF-A to take care of
959	 * the BTB.
960	 */
961	mov	x0, #DCACHE_OP_CLEAN_INV
962	bl	dcache_op_louis
963	ic	iallu
964#endif
965	/*
966	 * Mark current thread as suspended
967	 */
968	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
969	mrs	x1, spsr_el1
970	mrs	x2, elr_el1
971	bl	thread_state_suspend
972
973	/* Update core local flags */
974	/* Switch to SP_EL1 */
975	msr	spsel, #1
976	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
977	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
978	orr	w1, w1, #THREAD_CLF_TMP
979	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
980	msr	spsel, #0
981
982	/*
983	 * Note that we're exiting with SP_EL0 selected since the entry
984	 * functions expects to have SP_EL0 selected with the tmp stack
985	 * set.
986	 */
987
988	/* Passing thread index in w0 */
989	b	thread_foreign_intr_exit
990.endm
991
992/*
993 * This struct is never used from C it's only here to visualize the
994 * layout.
995 *
996 * struct elx_nintr_rec {
997 * 	uint64_t x[19 - 4]; x4..x18
998 * 	uint64_t lr;
999 * 	uint64_t sp_el0;
1000 * };
1001 */
1002#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1003#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1004#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1005#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1006
1007/* The handler of native interrupt. */
1008.macro native_intr_handler mode:req
1009	/*
1010	 * Update core local flags
1011	 */
1012	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1013	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1014	.ifc	\mode\(),fiq
1015	orr	w1, w1, #THREAD_CLF_FIQ
1016	.else
1017	orr	w1, w1, #THREAD_CLF_IRQ
1018	.endif
1019	orr	w1, w1, #THREAD_CLF_TMP
1020	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1021
1022	/* load tmp_stack_va_end */
1023	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1024	/* Keep original SP_EL0 */
1025	mrs	x2, sp_el0
1026	/* Switch to SP_EL0 */
1027	msr	spsel, #0
1028	mov	sp, x1
1029
1030	/*
1031	 * Save registers on stack that can be corrupted by a call to
1032	 * a C function
1033	 */
1034	/* Make room for struct elx_nintr_rec */
1035	sub	sp, sp, #ELX_NINTR_REC_SIZE
1036	/* Store x4..x18 */
1037	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1038	/* Store lr and original sp_el0 */
1039	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1040
1041	bl	thread_check_canaries
1042	bl	itr_core_handler
1043
1044	/*
1045	 * Restore registers
1046	 */
1047	/* Restore x4..x18 */
1048	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1049	/* Load  lr and original sp_el0 */
1050	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1051	/* Restore SP_El0 */
1052	mov	sp, x2
1053	/* Switch back to SP_EL1 */
1054	msr	spsel, #1
1055
1056	/* Update core local flags */
1057	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1058	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1059	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1060
1061	mrs	x0, spsr_el1
1062	/* Restore x2..x3 */
1063	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1064	b_if_spsr_is_el0 w0, 1f
1065
1066	/* Restore x0..x1 */
1067	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1068
1069	/* Return from exception */
1070	return_from_exception
10711:	b	eret_to_el0
1072.endm
1073
1074LOCAL_FUNC elx_irq , :
1075#if defined(CFG_ARM_GICV3)
1076	native_intr_handler	irq
1077#else
1078	foreign_intr_handler	irq
1079#endif
1080END_FUNC elx_irq
1081
1082LOCAL_FUNC elx_fiq , :
1083#if defined(CFG_ARM_GICV3)
1084	foreign_intr_handler	fiq
1085#else
1086	native_intr_handler	fiq
1087#endif
1088END_FUNC elx_fiq
1089
1090BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
1091