xref: /optee_os/core/arch/arm/kernel/thread_a64.S (revision c44d734b6366cbf4d12610310e809872db65f89d)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015-2020, Linaro Limited
4 */
5
6#include <arm64_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/cache_helpers.h>
12#include <kernel/thread.h>
13#include <kernel/thread_private.h>
14#include <mm/core_mmu.h>
15#include <smccc.h>
16
17	.macro get_thread_ctx core_local, res, tmp0, tmp1
18		ldrh	w\tmp0, [\core_local, \
19				#THREAD_CORE_LOCAL_CURR_THREAD]
20		adr_l	x\res, threads
21		mov	x\tmp1, #THREAD_CTX_SIZE
22		madd	x\res, x\tmp0, x\tmp1, x\res
23	.endm
24
25	.macro return_from_exception
26		eret
27		/* Guard against speculation past ERET */
28		dsb nsh
29		isb
30	.endm
31
32	.macro b_if_spsr_is_el0 reg, label
33		tbnz	\reg, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT), \label
34		tst	\reg, #(SPSR_64_MODE_EL_MASK << SPSR_64_MODE_EL_SHIFT)
35		b.eq	\label
36	.endm
37
38	.macro disable_pauth reg
39#ifdef	CFG_TA_PAUTH
40		mrs	\reg, sctlr_el1
41		bic     \reg, \reg, #SCTLR_ENIA
42#ifdef CFG_TA_BTI
43		orr     \reg, \reg, #(SCTLR_BT0 | SCTLR_BT1)
44#endif
45		msr	sctlr_el1, \reg
46#endif
47	.endm
48
49	.macro enable_pauth reg
50#ifdef	CFG_TA_PAUTH
51		mrs	\reg, sctlr_el1
52		orr     \reg, \reg, #SCTLR_ENIA
53#ifdef CFG_TA_BTI
54		bic     \reg, \reg, #(SCTLR_BT0 | SCTLR_BT1)
55#endif
56		msr	sctlr_el1, \reg
57#endif
58	.endm
59
60/* void thread_resume(struct thread_ctx_regs *regs) */
61FUNC thread_resume , :
62	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
63	load_xregs x0, THREAD_CTX_REGS_X4, 4, 30
64	mov	sp, x1
65	msr	elr_el1, x2
66	msr	spsr_el1, x3
67	ldr	x1, [x0, THREAD_CTX_REGS_TPIDR_EL0]
68	msr	tpidr_el0, x1
69
70	b_if_spsr_is_el0 w3, 1f
71
72	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
73	ldr	x0, [x0, THREAD_CTX_REGS_X0]
74	return_from_exception
75
761:
77#ifdef	CFG_TA_PAUTH
78	/* Restore PAC keys before return to el0 */
79	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
80	write_apiakeyhi	x1
81	write_apiakeylo	x2
82#endif
83
84	load_xregs x0, THREAD_CTX_REGS_X1, 1, 3
85	ldr	x0, [x0, THREAD_CTX_REGS_X0]
86
87	msr	spsel, #1
88	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
89	b	eret_to_el0
90END_FUNC thread_resume
91
92FUNC thread_smc , :
93	smc	#0
94	ret
95END_FUNC thread_smc
96
97/* void thread_smccc(struct thread_smc_args *arg_res) */
98FUNC thread_smccc , :
99	push	x0, xzr
100	mov	x8, x0
101	load_xregs x8, 0, 0, 7
102#ifdef CFG_CORE_SEL2_SPMC
103	hvc	#0
104#else
105	smc	#0
106#endif
107	pop	x8, xzr
108	store_xregs x8, 0, 0, 7
109	ret
110END_FUNC thread_smccc
111
112FUNC thread_init_vbar , :
113	msr	vbar_el1, x0
114	ret
115END_FUNC thread_init_vbar
116DECLARE_KEEP_PAGER thread_init_vbar
117
118/*
119 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
120 *				     uint32_t *exit_status0,
121 *				     uint32_t *exit_status1);
122 *
123 * This function depends on being called with exceptions masked.
124 */
125FUNC __thread_enter_user_mode , :
126	/*
127	 * Create the and fill in the struct thread_user_mode_rec
128	 */
129	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
130	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
131	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
132
133	/*
134	 * Save kern sp in x19
135	 * Switch to SP_EL1
136	 */
137	mov	x19, sp
138	msr	spsel, #1
139
140	/*
141	 * Save the kernel stack pointer in the thread context
142	 */
143	/* get pointer to current thread context */
144	get_thread_ctx sp, 21, 20, 22
145	/*
146	 * Save kernel stack pointer to ensure that el0_svc() uses
147	 * correct stack pointer
148	 */
149	str	x19, [x21, #THREAD_CTX_KERN_SP]
150
151	/*
152	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
153	 */
154	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
155	msr	sp_el0, x1
156	msr	elr_el1, x2
157	msr	spsr_el1, x3
158
159#ifdef	CFG_TA_PAUTH
160	/* Load APIAKEY */
161	load_xregs x0, THREAD_CTX_REGS_APIAKEY_HI, 1, 2
162	write_apiakeyhi	x1
163	write_apiakeylo	x2
164#endif
165
166	/*
167	 * Save the values for x0 and x1 in struct thread_core_local to be
168	 * restored later just before the eret.
169	 */
170	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
171	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
172
173	/* Load the rest of the general purpose registers */
174	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
175
176	/* Jump into user mode */
177	b eret_to_el0
178END_FUNC __thread_enter_user_mode
179DECLARE_KEEP_PAGER __thread_enter_user_mode
180
181/*
182 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
183 * 		uint32_t exit_status1);
184 * See description in thread.h
185 */
186FUNC thread_unwind_user_mode , :
187	/* Store the exit status */
188	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
189	str	w1, [x4]
190	str	w2, [x5]
191	/* Save x19..x30 */
192	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
193	/* Restore x19..x30 */
194	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
195	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
196	/* Return from the call of thread_enter_user_mode() */
197	ret
198END_FUNC thread_unwind_user_mode
199
200	/*
201	 * This macro verifies that the a given vector doesn't exceed the
202	 * architectural limit of 32 instructions. This is meant to be placed
203	 * immedately after the last instruction in the vector. It takes the
204	 * vector entry as the parameter
205	 */
206	.macro check_vector_size since
207	  .if (. - \since) > (32 * 4)
208	    .error "Vector exceeds 32 instructions"
209	  .endif
210	.endm
211
212	.macro restore_mapping
213#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
214		/* Temporarily save x0, x1 */
215		msr	tpidr_el1, x0
216		msr	tpidrro_el0, x1
217
218		/* Update the mapping to use the full kernel mapping */
219		mrs	x0, ttbr0_el1
220		sub	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
221		/* switch to kernel mode ASID */
222		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
223		msr	ttbr0_el1, x0
224		isb
225
226		/* Jump into the full mapping and continue execution */
227		adr	x0, 1f
228		ldr	x1, [sp, #THREAD_CORE_LOCAL_KCODE_OFFSET]
229		add	x0, x0, x1
230		br	x0
231	1:
232BTI(		bti	j)
233		/* Point to the vector into the full mapping */
234		adr_l	x0, thread_user_kcode_offset
235		ldr	x0, [x0]
236		mrs	x1, vbar_el1
237		add	x1, x1, x0
238		msr	vbar_el1, x1
239		isb
240
241#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
242		/*
243		 * Update the SP with thread_user_kdata_sp_offset as
244		 * described in init_user_kcode().
245		 */
246		adr_l	x0, thread_user_kdata_sp_offset
247		ldr	x0, [x0]
248		add	sp, sp, x0
249#endif
250
251		/* Restore x0, x1 */
252		mrs	x0, tpidr_el1
253		mrs	x1, tpidrro_el0
254		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
255#else
256		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
257		mrs	x0, ttbr0_el1
258		/* switch to kernel mode ASID */
259		bic	x0, x0, #BIT(TTBR_ASID_SHIFT)
260		msr	ttbr0_el1, x0
261		isb
262#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
263	.endm
264
265#define INV_INSN	0
266FUNC thread_excp_vect , : , default, 2048, nobti
267	/* -----------------------------------------------------
268	 * EL1 with SP0 : 0x0 - 0x180
269	 * -----------------------------------------------------
270	 */
271	.balign	128, INV_INSN
272el1_sync_sp0:
273	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
274	b	el1_sync_abort
275	check_vector_size el1_sync_sp0
276
277	.balign	128, INV_INSN
278el1_irq_sp0:
279	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
280	b	elx_irq
281	check_vector_size el1_irq_sp0
282
283	.balign	128, INV_INSN
284el1_fiq_sp0:
285	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
286	b	elx_fiq
287	check_vector_size el1_fiq_sp0
288
289	.balign	128, INV_INSN
290el1_serror_sp0:
291	b	el1_serror_sp0
292	check_vector_size el1_serror_sp0
293
294	/* -----------------------------------------------------
295	 * Current EL with SP1: 0x200 - 0x380
296	 * -----------------------------------------------------
297	 */
298	.balign	128, INV_INSN
299el1_sync_sp1:
300	b	el1_sync_sp1
301	check_vector_size el1_sync_sp1
302
303	.balign	128, INV_INSN
304el1_irq_sp1:
305	b	el1_irq_sp1
306	check_vector_size el1_irq_sp1
307
308	.balign	128, INV_INSN
309el1_fiq_sp1:
310	b	el1_fiq_sp1
311	check_vector_size el1_fiq_sp1
312
313	.balign	128, INV_INSN
314el1_serror_sp1:
315	b	el1_serror_sp1
316	check_vector_size el1_serror_sp1
317
318	/* -----------------------------------------------------
319	 * Lower EL using AArch64 : 0x400 - 0x580
320	 * -----------------------------------------------------
321	 */
322	.balign	128, INV_INSN
323el0_sync_a64:
324	restore_mapping
325	/* PAuth will be disabled later else check_vector_size will fail */
326
327	b	el0_sync_a64_finish
328	check_vector_size el0_sync_a64
329
330	.balign	128, INV_INSN
331el0_irq_a64:
332	restore_mapping
333	disable_pauth x1
334
335	b	elx_irq
336	check_vector_size el0_irq_a64
337
338	.balign	128, INV_INSN
339el0_fiq_a64:
340	restore_mapping
341	disable_pauth x1
342
343	b	elx_fiq
344	check_vector_size el0_fiq_a64
345
346	.balign	128, INV_INSN
347el0_serror_a64:
348	b   	el0_serror_a64
349	check_vector_size el0_serror_a64
350
351	/* -----------------------------------------------------
352	 * Lower EL using AArch32 : 0x0 - 0x180
353	 * -----------------------------------------------------
354	 */
355	.balign	128, INV_INSN
356el0_sync_a32:
357	restore_mapping
358
359	b 	el0_sync_a32_finish
360	check_vector_size el0_sync_a32
361
362	.balign	128, INV_INSN
363el0_irq_a32:
364	restore_mapping
365
366	b	elx_irq
367	check_vector_size el0_irq_a32
368
369	.balign	128, INV_INSN
370el0_fiq_a32:
371	restore_mapping
372
373	b	elx_fiq
374	check_vector_size el0_fiq_a32
375
376	.balign	128, INV_INSN
377el0_serror_a32:
378	b	el0_serror_a32
379	check_vector_size el0_serror_a32
380
381#if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
382	.macro invalidate_branch_predictor
383		store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
384		mov_imm	x0, SMCCC_ARCH_WORKAROUND_1
385		smc	#0
386		load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
387	.endm
388
389	.balign	2048, INV_INSN
390	.global thread_excp_vect_wa_spectre_v2
391thread_excp_vect_wa_spectre_v2:
392	/* -----------------------------------------------------
393	 * EL1 with SP0 : 0x0 - 0x180
394	 * -----------------------------------------------------
395	 */
396	.balign	128, INV_INSN
397wa_spectre_v2_el1_sync_sp0:
398	b	el1_sync_sp0
399	check_vector_size wa_spectre_v2_el1_sync_sp0
400
401	.balign	128, INV_INSN
402wa_spectre_v2_el1_irq_sp0:
403	b	el1_irq_sp0
404	check_vector_size wa_spectre_v2_el1_irq_sp0
405
406	.balign	128, INV_INSN
407wa_spectre_v2_el1_fiq_sp0:
408	b	el1_fiq_sp0
409	check_vector_size wa_spectre_v2_el1_fiq_sp0
410
411	.balign	128, INV_INSN
412wa_spectre_v2_el1_serror_sp0:
413	b	el1_serror_sp0
414	check_vector_size wa_spectre_v2_el1_serror_sp0
415
416	/* -----------------------------------------------------
417	 * Current EL with SP1: 0x200 - 0x380
418	 * -----------------------------------------------------
419	 */
420	.balign	128, INV_INSN
421wa_spectre_v2_el1_sync_sp1:
422	b	wa_spectre_v2_el1_sync_sp1
423	check_vector_size wa_spectre_v2_el1_sync_sp1
424
425	.balign	128, INV_INSN
426wa_spectre_v2_el1_irq_sp1:
427	b	wa_spectre_v2_el1_irq_sp1
428	check_vector_size wa_spectre_v2_el1_irq_sp1
429
430	.balign	128, INV_INSN
431wa_spectre_v2_el1_fiq_sp1:
432	b	wa_spectre_v2_el1_fiq_sp1
433	check_vector_size wa_spectre_v2_el1_fiq_sp1
434
435	.balign	128, INV_INSN
436wa_spectre_v2_el1_serror_sp1:
437	b	wa_spectre_v2_el1_serror_sp1
438	check_vector_size wa_spectre_v2_el1_serror_sp1
439
440	/* -----------------------------------------------------
441	 * Lower EL using AArch64 : 0x400 - 0x580
442	 * -----------------------------------------------------
443	 */
444	.balign	128, INV_INSN
445wa_spectre_v2_el0_sync_a64:
446	invalidate_branch_predictor
447	b	el0_sync_a64
448	check_vector_size wa_spectre_v2_el0_sync_a64
449
450	.balign	128, INV_INSN
451wa_spectre_v2_el0_irq_a64:
452	invalidate_branch_predictor
453	b	el0_irq_a64
454	check_vector_size wa_spectre_v2_el0_irq_a64
455
456	.balign	128, INV_INSN
457wa_spectre_v2_el0_fiq_a64:
458	invalidate_branch_predictor
459	b	el0_fiq_a64
460	check_vector_size wa_spectre_v2_el0_fiq_a64
461
462	.balign	128, INV_INSN
463wa_spectre_v2_el0_serror_a64:
464	b   	wa_spectre_v2_el0_serror_a64
465	check_vector_size wa_spectre_v2_el0_serror_a64
466
467	/* -----------------------------------------------------
468	 * Lower EL using AArch32 : 0x0 - 0x180
469	 * -----------------------------------------------------
470	 */
471	.balign	128, INV_INSN
472wa_spectre_v2_el0_sync_a32:
473	invalidate_branch_predictor
474	b	el0_sync_a32
475	check_vector_size wa_spectre_v2_el0_sync_a32
476
477	.balign	128, INV_INSN
478wa_spectre_v2_el0_irq_a32:
479	invalidate_branch_predictor
480	b	el0_irq_a32
481	check_vector_size wa_spectre_v2_el0_irq_a32
482
483	.balign	128, INV_INSN
484wa_spectre_v2_el0_fiq_a32:
485	invalidate_branch_predictor
486	b	el0_fiq_a32
487	check_vector_size wa_spectre_v2_el0_fiq_a32
488
489	.balign	128, INV_INSN
490wa_spectre_v2_el0_serror_a32:
491	b	wa_spectre_v2_el0_serror_a32
492	check_vector_size wa_spectre_v2_el0_serror_a32
493
494	.macro discard_branch_history
495		str	x0, [sp, #THREAD_CORE_LOCAL_X0]
496		ldrb	w0, [sp, #THREAD_CORE_LOCAL_BHB_LOOP_COUNT]
497	1:	b	2f
498	2:	subs	w0, w0, #1
499		bne	1b
500		dsb	sy
501		isb
502		ldr	x0, [sp, #THREAD_CORE_LOCAL_X0]
503	.endm
504
505	.balign	2048, INV_INSN
506	.global thread_excp_vect_wa_spectre_bhb
507thread_excp_vect_wa_spectre_bhb:
508	/* -----------------------------------------------------
509	 * EL1 with SP0 : 0x0 - 0x180
510	 * -----------------------------------------------------
511	 */
512	.balign	128, INV_INSN
513wa_spectre_bhb_el1_sync_sp0:
514	b	el1_sync_sp0
515	check_vector_size wa_spectre_bhb_el1_sync_sp0
516
517	.balign	128, INV_INSN
518wa_spectre_bhb_el1_irq_sp0:
519	b	el1_irq_sp0
520	check_vector_size wa_spectre_bhb_el1_irq_sp0
521
522	.balign	128, INV_INSN
523wa_spectre_bhb_el1_fiq_sp0:
524	b	el1_fiq_sp0
525	check_vector_size wa_spectre_bhb_el1_fiq_sp0
526
527	.balign	128, INV_INSN
528wa_spectre_bhb_el1_serror_sp0:
529	b	el1_serror_sp0
530	check_vector_size wa_spectre_bhb_el1_serror_sp0
531
532	/* -----------------------------------------------------
533	 * Current EL with SP1: 0x200 - 0x380
534	 * -----------------------------------------------------
535	 */
536	.balign	128, INV_INSN
537wa_spectre_bhb_el1_sync_sp1:
538	b	wa_spectre_bhb_el1_sync_sp1
539	check_vector_size wa_spectre_bhb_el1_sync_sp1
540
541	.balign	128, INV_INSN
542wa_spectre_bhb_el1_irq_sp1:
543	b	wa_spectre_bhb_el1_irq_sp1
544	check_vector_size wa_spectre_bhb_el1_irq_sp1
545
546	.balign	128, INV_INSN
547wa_spectre_bhb_el1_fiq_sp1:
548	b	wa_spectre_bhb_el1_fiq_sp1
549	check_vector_size wa_spectre_bhb_el1_fiq_sp1
550
551	.balign	128, INV_INSN
552wa_spectre_bhb_el1_serror_sp1:
553	b	wa_spectre_bhb_el1_serror_sp1
554	check_vector_size wa_spectre_bhb_el1_serror_sp1
555
556	/* -----------------------------------------------------
557	 * Lower EL using AArch64 : 0x400 - 0x580
558	 * -----------------------------------------------------
559	 */
560	.balign	128, INV_INSN
561wa_spectre_bhb_el0_sync_a64:
562	discard_branch_history
563	b	el0_sync_a64
564	check_vector_size wa_spectre_bhb_el0_sync_a64
565
566	.balign	128, INV_INSN
567wa_spectre_bhb_el0_irq_a64:
568	discard_branch_history
569	b	el0_irq_a64
570	check_vector_size wa_spectre_bhb_el0_irq_a64
571
572	.balign	128, INV_INSN
573wa_spectre_bhb_el0_fiq_a64:
574	discard_branch_history
575	b	el0_fiq_a64
576	check_vector_size wa_spectre_bhb_el0_fiq_a64
577
578	.balign	128, INV_INSN
579wa_spectre_bhb_el0_serror_a64:
580	b   	wa_spectre_bhb_el0_serror_a64
581	check_vector_size wa_spectre_bhb_el0_serror_a64
582
583	/* -----------------------------------------------------
584	 * Lower EL using AArch32 : 0x0 - 0x180
585	 * -----------------------------------------------------
586	 */
587	.balign	128, INV_INSN
588wa_spectre_bhb_el0_sync_a32:
589	discard_branch_history
590	b	el0_sync_a32
591	check_vector_size wa_spectre_bhb_el0_sync_a32
592
593	.balign	128, INV_INSN
594wa_spectre_bhb_el0_irq_a32:
595	discard_branch_history
596	b	el0_irq_a32
597	check_vector_size wa_spectre_bhb_el0_irq_a32
598
599	.balign	128, INV_INSN
600wa_spectre_bhb_el0_fiq_a32:
601	discard_branch_history
602	b	el0_fiq_a32
603	check_vector_size wa_spectre_bhb_el0_fiq_a32
604
605	.balign	128, INV_INSN
606wa_spectre_bhb_el0_serror_a32:
607	b	wa_spectre_bhb_el0_serror_a32
608	check_vector_size wa_spectre_bhb_el0_serror_a32
609#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
610
611/*
612 * We're keeping this code in the same section as the vector to make sure
613 * that it's always available.
614 */
615eret_to_el0:
616	enable_pauth x1
617
618#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
619	/* Point to the vector into the reduced mapping */
620	adr_l	x0, thread_user_kcode_offset
621	ldr	x0, [x0]
622	mrs	x1, vbar_el1
623	sub	x1, x1, x0
624	msr	vbar_el1, x1
625	isb
626
627#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
628	/* Store the SP offset in tpidr_el1 to be used below to update SP */
629	adr_l	x1, thread_user_kdata_sp_offset
630	ldr	x1, [x1]
631	msr	tpidr_el1, x1
632#endif
633
634	/* Jump into the reduced mapping and continue execution */
635	adr_l	x1, 1f
636	sub	x1, x1, x0
637	br	x1
6381:
639BTI(	bti	j)
640	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
641	msr	tpidrro_el0, x0
642
643	/* Update the mapping to exclude the full kernel mapping */
644	mrs	x0, ttbr0_el1
645	add	x0, x0, #CORE_MMU_BASE_TABLE_OFFSET
646	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
647	msr	ttbr0_el1, x0
648	isb
649
650#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
651	/*
652	 * Update the SP with thread_user_kdata_sp_offset as described in
653	 * init_user_kcode().
654	 */
655	mrs	x0, tpidr_el1
656	sub	sp, sp, x0
657#endif
658
659	mrs	x0, tpidrro_el0
660#else
661	mrs	x0, ttbr0_el1
662	orr	x0, x0, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
663	msr	ttbr0_el1, x0
664	isb
665	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
666#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
667
668	return_from_exception
669
670el0_sync_a64_finish:
671	mrs	x2, esr_el1
672	mrs	x3, sp_el0
673	lsr	x2, x2, #ESR_EC_SHIFT
674	cmp	x2, #ESR_EC_AARCH64_SVC
675	b.eq	el0_svc
676	b	el0_sync_abort
677
678el0_sync_a32_finish:
679	mrs	x2, esr_el1
680	mrs	x3, sp_el0
681	lsr	x2, x2, #ESR_EC_SHIFT
682	cmp	x2, #ESR_EC_AARCH32_SVC
683	b.eq	el0_svc
684	b	el0_sync_abort
685
686	/*
687	 * void icache_inv_user_range(void *addr, size_t size);
688	 *
689	 * This function has to execute with the user space ASID active,
690	 * this means executing with reduced mapping and the code needs
691	 * to be located here together with the vector.
692	 */
693	.global icache_inv_user_range
694	.type icache_inv_user_range , %function
695icache_inv_user_range:
696	/* Mask all exceptions */
697	mrs	x6, daif	/* this register must be preserved */
698	msr	daifset, #DAIFBIT_ALL
699
700#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
701	/* Point to the vector into the reduced mapping */
702	adr_l	x7, thread_user_kcode_offset
703	ldr	x7, [x7]	/* this register must be preserved */
704	mrs	x4, vbar_el1	/* this register must be preserved */
705	sub	x3, x4, x7
706	msr	vbar_el1, x3
707	isb
708
709	/* Jump into the reduced mapping and continue execution */
710	adr	x3, 1f
711	sub	x3, x3, x7
712	br	x3
7131:
714BTI(	bti	j)
715	/* Update the mapping to exclude the full kernel mapping */
716	mrs	x5, ttbr0_el1	/* this register must be preserved */
717	add	x2, x5, #CORE_MMU_BASE_TABLE_OFFSET
718	orr	x2, x2, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
719	msr	ttbr0_el1, x2
720	isb
721
722#else
723	mrs	x5, ttbr0_el1	/* this register must be preserved */
724	orr	x2, x5, #BIT(TTBR_ASID_SHIFT) /* switch to user mode ASID */
725	msr	ttbr0_el1, x2
726	isb
727#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
728
729	/*
730	 * Do the actual icache invalidation
731	 */
732
733	/* Calculate minimum icache line size, result in x2 */
734	mrs	x3, ctr_el0
735	and	x3, x3, #CTR_IMINLINE_MASK
736	mov	x2, #CTR_WORD_SIZE
737	lsl	x2, x2, x3
738
739	add	x1, x0, x1
740	sub	x3, x2, #1
741	bic	x0, x0, x3
7421:
743	ic	ivau, x0
744	add	x0, x0, x2
745	cmp	x0, x1
746	b.lo    1b
747	dsb	ish
748
749#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
750	/* Update the mapping to use the full kernel mapping and ASID */
751	msr	ttbr0_el1, x5
752	isb
753
754	/* Jump into the full mapping and continue execution */
755	adr	x0, 1f
756	add	x0, x0, x7
757	br	x0
7581:
759BTI(	bti	j)
760	/* Point to the vector into the full mapping */
761	msr	vbar_el1, x4
762	isb
763#else
764	/* switch to kernel mode ASID */
765	msr	ttbr0_el1, x5
766	isb
767#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
768
769	msr	daif, x6	/* restore exceptions */
770	ret	/* End of icache_inv_user_range() */
771
772	/*
773	 * Make sure that literals are placed before the
774	 * thread_excp_vect_end label.
775	 */
776	.pool
777	.global thread_excp_vect_end
778thread_excp_vect_end:
779END_FUNC thread_excp_vect
780
781LOCAL_FUNC el0_svc , :
782	disable_pauth x1
783	/* get pointer to current thread context in x0 */
784	get_thread_ctx sp, 0, 1, 2
785	mrs	x1, tpidr_el0
786	str	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
787	/* load saved kernel sp */
788	ldr	x0, [x0, #THREAD_CTX_KERN_SP]
789	/* Keep pointer to initial recod in x1 */
790	mov	x1, sp
791	/* Switch to SP_EL0 and restore kernel sp */
792	msr	spsel, #0
793	mov	x2, sp	/* Save SP_EL0 */
794	mov	sp, x0
795
796	/* Make room for struct thread_svc_regs */
797	sub	sp, sp, #THREAD_SVC_REG_SIZE
798	stp	x30,x2, [sp, #THREAD_SVC_REG_X30]
799
800	/* Restore x0-x3 */
801	ldp	x2, x3, [x1, #THREAD_CORE_LOCAL_X2]
802	ldp	x0, x1, [x1, #THREAD_CORE_LOCAL_X0]
803
804	/* Prepare the argument for the handler */
805	store_xregs sp, THREAD_SVC_REG_X0, 0, 14
806	mrs	x0, elr_el1
807	mrs	x1, spsr_el1
808	store_xregs sp, THREAD_SVC_REG_ELR, 0, 1
809
810#ifdef CFG_TA_PAUTH
811	/* Save APIAKEY */
812	read_apiakeyhi	x0
813	read_apiakeylo	x1
814	store_xregs sp, THREAD_SVC_REG_APIAKEY_HI, 0, 1
815#endif
816
817	mov	x0, sp
818
819	/*
820	 * Unmask native interrupts, Serror, and debug exceptions since we have
821	 * nothing left in sp_el1. Note that the SVC handler is excepted to
822	 * re-enable foreign interrupts by itself.
823	 */
824#if defined(CFG_ARM_GICV3)
825	msr	daifclr, #(DAIFBIT_IRQ | DAIFBIT_ABT | DAIFBIT_DBG)
826#else
827	msr	daifclr, #(DAIFBIT_FIQ | DAIFBIT_ABT | DAIFBIT_DBG)
828#endif
829
830	/* Call the handler */
831	bl	thread_svc_handler
832
833	/* Mask all maskable exceptions since we're switching back to sp_el1 */
834	msr	daifset, #DAIFBIT_ALL
835
836	/*
837	 * Save kernel sp we'll had at the beginning of this function.
838	 * This is when this TA has called another TA because
839	 * __thread_enter_user_mode() also saves the stack pointer in this
840	 * field.
841	 */
842	msr	spsel, #1
843	get_thread_ctx sp, 0, 1, 2
844	msr	spsel, #0
845	add	x1, sp, #THREAD_SVC_REG_SIZE
846	str	x1, [x0, #THREAD_CTX_KERN_SP]
847
848	/* Restore registers to the required state and return*/
849	ldr	x1, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
850	msr	tpidr_el0, x1
851	load_xregs sp, THREAD_SVC_REG_ELR, 0, 1
852	msr	elr_el1, x0
853	msr	spsr_el1, x1
854	load_xregs sp, THREAD_SVC_REG_X2, 2, 14
855	mov	x30, sp
856	ldr	x0, [x30, #THREAD_SVC_REG_SP_EL0]
857	mov	sp, x0
858	b_if_spsr_is_el0 w1, 1f
859	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
860	ldr	x30, [x30, #THREAD_SVC_REG_X30]
861
862	return_from_exception
863
8641:
865#ifdef	CFG_TA_PAUTH
866	/* Restore APIAKEY */
867	load_xregs x30, THREAD_SVC_REG_APIAKEY_HI, 0, 1
868	write_apiakeyhi	x0
869	write_apiakeylo	x1
870#endif
871
872	ldp	x0, x1, [x30, THREAD_SVC_REG_X0]
873	ldr	x30, [x30, #THREAD_SVC_REG_X30]
874
875	msr	spsel, #1
876	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
877	b	eret_to_el0
878END_FUNC el0_svc
879
880LOCAL_FUNC el1_sync_abort , :
881	mov	x0, sp
882	msr	spsel, #0
883	mov	x3, sp		/* Save original sp */
884
885	/*
886	 * Update core local flags.
887	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
888	 */
889	ldr	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
890	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
891	orr	w1, w1, #THREAD_CLF_ABORT
892	tbnz	w1, #(THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT), \
893			.Lsel_tmp_sp
894
895	/* Select abort stack */
896	ldr	x2, [x0, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
897	b	.Lset_sp
898
899.Lsel_tmp_sp:
900	/* Select tmp stack */
901	ldr	x2, [x0, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
902	orr	w1, w1, #THREAD_CLF_TMP	/* flags |= THREAD_CLF_TMP; */
903
904.Lset_sp:
905	mov	sp, x2
906	str	w1, [x0, #THREAD_CORE_LOCAL_FLAGS]
907
908	/*
909	 * Save state on stack
910	 */
911	sub	sp, sp, #THREAD_ABT_REGS_SIZE
912	mrs	x2, spsr_el1
913	/* Store spsr, sp_el0 */
914	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
915	/* Store original x0, x1 */
916	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
917	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
918	/* Store original x2, x3 and x4 to x29 */
919	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
920	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
921	/* Store x30, elr_el1 */
922	mrs	x0, elr_el1
923	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
924
925	/*
926	 * Call handler
927	 */
928	mov	x0, #0
929	mov	x1, sp
930	bl	abort_handler
931
932	/*
933	 * Restore state from stack
934	 */
935	/* Load x30, elr_el1 */
936	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
937	msr	elr_el1, x0
938	/* Load x0 to x29 */
939	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
940	/* Switch to SP_EL1 */
941	msr	spsel, #1
942	/* Save x0 to x3 in CORE_LOCAL */
943	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
944	/* Restore spsr_el1 and sp_el0 */
945	mrs	x3, sp_el0
946	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
947	msr	spsr_el1, x0
948	msr	sp_el0, x1
949
950	/* Update core local flags */
951	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
952	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
953	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
954
955	/* Restore x0 to x3 */
956	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
957
958	/* Return from exception */
959	return_from_exception
960END_FUNC el1_sync_abort
961
962	/* sp_el0 in x3 */
963LOCAL_FUNC el0_sync_abort , :
964	disable_pauth x1
965	/*
966	 * Update core local flags
967	 */
968	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
969	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
970	orr	w1, w1, #THREAD_CLF_ABORT
971	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
972
973	/*
974	 * Save state on stack
975	 */
976
977	/* load abt_stack_va_end */
978	ldr	x1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
979	/* Keep pointer to initial record in x0 */
980	mov	x0, sp
981	/* Switch to SP_EL0 */
982	msr	spsel, #0
983	mov	sp, x1
984	sub	sp, sp, #THREAD_ABT_REGS_SIZE
985	mrs	x2, spsr_el1
986	/* Store spsr, sp_el0 */
987	stp	x2, x3, [sp, #THREAD_ABT_REG_SPSR]
988	/* Store original x0, x1 */
989	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X0]
990	stp	x2, x3, [sp, #THREAD_ABT_REG_X0]
991	/* Store original x2, x3 and x4 to x29 */
992	ldp	x2, x3, [x0, #THREAD_CORE_LOCAL_X2]
993	store_xregs sp, THREAD_ABT_REG_X2, 2, 29
994	/* Store x30, elr_el1 */
995	mrs	x0, elr_el1
996	stp	x30, x0, [sp, #THREAD_ABT_REG_X30]
997
998	/*
999	 * Call handler
1000	 */
1001	mov	x0, #0
1002	mov	x1, sp
1003	bl	abort_handler
1004
1005	/*
1006	 * Restore state from stack
1007	 */
1008
1009	/* Load x30, elr_el1 */
1010	ldp	x30, x0, [sp, #THREAD_ABT_REG_X30]
1011	msr	elr_el1, x0
1012	/* Load x0 to x29 */
1013	load_xregs sp, THREAD_ABT_REG_X0, 0, 29
1014	/* Switch to SP_EL1 */
1015	msr	spsel, #1
1016	/* Save x0 to x3 in EL1_REC */
1017	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 3
1018	/* Restore spsr_el1 and sp_el0 */
1019	mrs	x3, sp_el0
1020	ldp	x0, x1, [x3, #THREAD_ABT_REG_SPSR]
1021	msr	spsr_el1, x0
1022	msr	sp_el0, x1
1023
1024	/* Update core local flags */
1025	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1026	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
1027	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1028
1029	/* Restore x2 to x3 */
1030	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1031
1032	b_if_spsr_is_el0 w0, 1f
1033
1034	/* Restore x0 to x1 */
1035	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1036
1037	/* Return from exception */
1038	return_from_exception
10391:	b	eret_to_el0
1040END_FUNC el0_sync_abort
1041
1042/* The handler of foreign interrupt. */
1043.macro foreign_intr_handler mode:req
1044	/*
1045	 * Update core local flags
1046	 */
1047	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1048	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1049	orr	w1, w1, #THREAD_CLF_TMP
1050	.ifc	\mode\(),fiq
1051	orr	w1, w1, #THREAD_CLF_FIQ
1052	.else
1053	orr	w1, w1, #THREAD_CLF_IRQ
1054	.endif
1055	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1056
1057	/* get pointer to current thread context in x0 */
1058	get_thread_ctx sp, 0, 1, 2
1059	/* Keep original SP_EL0 */
1060	mrs	x2, sp_el0
1061
1062	/* Store original sp_el0 */
1063	str	x2, [x0, #THREAD_CTX_REGS_SP]
1064	/* Store tpidr_el0 */
1065	mrs	x2, tpidr_el0
1066	str	x2, [x0, #THREAD_CTX_REGS_TPIDR_EL0]
1067	/* Store x4..x30 */
1068	store_xregs x0, THREAD_CTX_REGS_X4, 4, 30
1069	/* Load original x0..x3 into x10..x13 */
1070	load_xregs sp, THREAD_CORE_LOCAL_X0, 10, 13
1071	/* Save original x0..x3 */
1072	store_xregs x0, THREAD_CTX_REGS_X0, 10, 13
1073
1074	/* load tmp_stack_va_end */
1075	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1076	/* Switch to SP_EL0 */
1077	msr	spsel, #0
1078	mov	sp, x1
1079
1080#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
1081	/*
1082	 * Prevent leaking information about which entries has been used in
1083	 * cache. We're relying on the dispatcher in TF-A to take care of
1084	 * the BTB.
1085	 */
1086	mov	x0, #DCACHE_OP_CLEAN_INV
1087	bl	dcache_op_louis
1088	ic	iallu
1089#endif
1090	/*
1091	 * Mark current thread as suspended
1092	 */
1093	mov	w0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
1094	mrs	x1, spsr_el1
1095	mrs	x2, elr_el1
1096	bl	thread_state_suspend
1097
1098	/* Update core local flags */
1099	/* Switch to SP_EL1 */
1100	msr	spsel, #1
1101	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1102	lsr	w1, w1, #THREAD_CLF_SAVED_SHIFT
1103	orr	w1, w1, #THREAD_CLF_TMP
1104	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1105	msr	spsel, #0
1106
1107	/*
1108	 * Note that we're exiting with SP_EL0 selected since the entry
1109	 * functions expects to have SP_EL0 selected with the tmp stack
1110	 * set.
1111	 */
1112
1113	/* Passing thread index in w0 */
1114	b	thread_foreign_intr_exit
1115.endm
1116
1117/*
1118 * This struct is never used from C it's only here to visualize the
1119 * layout.
1120 *
1121 * struct elx_nintr_rec {
1122 * 	uint64_t x[19 - 4]; x4..x18
1123 * 	uint64_t lr;
1124 * 	uint64_t sp_el0;
1125 * };
1126 */
1127#define ELX_NINTR_REC_X(x)		(8 * ((x) - 4))
1128#define ELX_NINTR_REC_LR		(8 + ELX_NINTR_REC_X(19))
1129#define ELX_NINTR_REC_SP_EL0		(8 + ELX_NINTR_REC_LR)
1130#define ELX_NINTR_REC_SIZE		(8 + ELX_NINTR_REC_SP_EL0)
1131
1132/* The handler of native interrupt. */
1133.macro native_intr_handler mode:req
1134	/*
1135	 * Update core local flags
1136	 */
1137	ldr	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1138	lsl	w1, w1, #THREAD_CLF_SAVED_SHIFT
1139	.ifc	\mode\(),fiq
1140	orr	w1, w1, #THREAD_CLF_FIQ
1141	.else
1142	orr	w1, w1, #THREAD_CLF_IRQ
1143	.endif
1144	orr	w1, w1, #THREAD_CLF_TMP
1145	str	w1, [sp, #THREAD_CORE_LOCAL_FLAGS]
1146
1147	/* load tmp_stack_va_end */
1148	ldr	x1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
1149	/* Keep original SP_EL0 */
1150	mrs	x2, sp_el0
1151	/* Switch to SP_EL0 */
1152	msr	spsel, #0
1153	mov	sp, x1
1154
1155	/*
1156	 * Save registers on stack that can be corrupted by a call to
1157	 * a C function
1158	 */
1159	/* Make room for struct elx_nintr_rec */
1160	sub	sp, sp, #ELX_NINTR_REC_SIZE
1161	/* Store x4..x18 */
1162	store_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1163	/* Store lr and original sp_el0 */
1164	stp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1165
1166	bl	thread_check_canaries
1167	bl	itr_core_handler
1168
1169	/*
1170	 * Restore registers
1171	 */
1172	/* Restore x4..x18 */
1173	load_xregs sp, ELX_NINTR_REC_X(4), 4, 18
1174	/* Load  lr and original sp_el0 */
1175	ldp	x30, x2, [sp, #ELX_NINTR_REC_LR]
1176	/* Restore SP_El0 */
1177	mov	sp, x2
1178	/* Switch back to SP_EL1 */
1179	msr	spsel, #1
1180
1181	/* Update core local flags */
1182	ldr	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1183	lsr	w0, w0, #THREAD_CLF_SAVED_SHIFT
1184	str	w0, [sp, #THREAD_CORE_LOCAL_FLAGS]
1185
1186	mrs	x0, spsr_el1
1187	/* Restore x2..x3 */
1188	load_xregs sp, THREAD_CORE_LOCAL_X2, 2, 3
1189	b_if_spsr_is_el0 w0, 1f
1190
1191	/* Restore x0..x1 */
1192	load_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
1193
1194	/* Return from exception */
1195	return_from_exception
11961:	b	eret_to_el0
1197.endm
1198
1199LOCAL_FUNC elx_irq , :
1200#if defined(CFG_ARM_GICV3)
1201	native_intr_handler	irq
1202#else
1203	foreign_intr_handler	irq
1204#endif
1205END_FUNC elx_irq
1206
1207LOCAL_FUNC elx_fiq , :
1208#if defined(CFG_ARM_GICV3)
1209	foreign_intr_handler	fiq
1210#else
1211	native_intr_handler	fiq
1212#endif
1213END_FUNC elx_fiq
1214
1215BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
1216