xref: /rk3399_ARM-atf/lib/el3_runtime/aarch64/context.S (revision 6bb49c876c7593ed5f61c20ef3d989dcff8e8d8c)
1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <context.h>
11#include <el3_common_macros.S>
12
13#if CTX_INCLUDE_EL2_REGS
14	.global	el2_sysregs_context_save_common
15	.global	el2_sysregs_context_restore_common
16#if ENABLE_SPE_FOR_LOWER_ELS
17	.global	el2_sysregs_context_save_spe
18	.global	el2_sysregs_context_restore_spe
19#endif /* ENABLE_SPE_FOR_LOWER_ELS */
20#if CTX_INCLUDE_MTE_REGS
21	.global	el2_sysregs_context_save_mte
22	.global	el2_sysregs_context_restore_mte
23#endif /* CTX_INCLUDE_MTE_REGS */
24#if ENABLE_MPAM_FOR_LOWER_ELS
25	.global	el2_sysregs_context_save_mpam
26	.global	el2_sysregs_context_restore_mpam
27#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
28#if ENABLE_FEAT_ECV
29	.global	el2_sysregs_context_save_ecv
30	.global	el2_sysregs_context_restore_ecv
31#endif /* ENABLE_FEAT_ECV */
32#if ENABLE_FEAT_VHE
33	.global	el2_sysregs_context_save_vhe
34	.global	el2_sysregs_context_restore_vhe
35#endif /* ENABLE_FEAT_VHE */
36#if RAS_EXTENSION
37	.global	el2_sysregs_context_save_ras
38	.global	el2_sysregs_context_restore_ras
39#endif /* RAS_EXTENSION */
40#if CTX_INCLUDE_NEVE_REGS
41	.global	el2_sysregs_context_save_nv2
42	.global	el2_sysregs_context_restore_nv2
43#endif /* CTX_INCLUDE_NEVE_REGS */
44#if ENABLE_FEAT_CSV2_2
45	.global	el2_sysregs_context_save_csv2
46	.global	el2_sysregs_context_restore_csv2
47#endif /* ENABLE_FEAT_CSV2_2 */
48#endif /* CTX_INCLUDE_EL2_REGS */
49
50	.global	el1_sysregs_context_save
51	.global	el1_sysregs_context_restore
52#if CTX_INCLUDE_FPREGS
53	.global	fpregs_context_save
54	.global	fpregs_context_restore
55#endif /* CTX_INCLUDE_FPREGS */
56	.global	prepare_el3_entry
57	.global	restore_gp_pmcr_pauth_regs
58	.global save_and_update_ptw_el1_sys_regs
59	.global	el3_exit
60
61#if CTX_INCLUDE_EL2_REGS
62
63/* -----------------------------------------------------
64 * The following functions strictly follow the AArch64
65 * PCS to use x9-x16 (temporary caller-saved registers)
66 * to save/restore EL2 system register context.
67 * el2_sysregs_context_save/restore_common functions
68 * save and restore registers that are common to all
69 * configurations. The rest of the functions save and
70 * restore EL2 system registers that are present when a
71 * particular feature is enabled. All functions assume
72 * that 'x0' is pointing to a 'el2_sys_regs' structure
73 * where the register context will be saved/restored.
74 *
75 * The following registers are not added.
76 * AMEVCNTVOFF0<n>_EL2
77 * AMEVCNTVOFF1<n>_EL2
78 * ICH_AP0R<n>_EL2
79 * ICH_AP1R<n>_EL2
80 * ICH_LR<n>_EL2
81 * -----------------------------------------------------
82 */
83func el2_sysregs_context_save_common
84	mrs	x9, actlr_el2
85	mrs	x10, afsr0_el2
86	stp	x9, x10, [x0, #CTX_ACTLR_EL2]
87
88	mrs	x11, afsr1_el2
89	mrs	x12, amair_el2
90	stp	x11, x12, [x0, #CTX_AFSR1_EL2]
91
92	mrs	x13, cnthctl_el2
93	mrs	x14, cntvoff_el2
94	stp	x13, x14, [x0, #CTX_CNTHCTL_EL2]
95
96	mrs	x15, cptr_el2
97	str	x15, [x0, #CTX_CPTR_EL2]
98
99#if CTX_INCLUDE_AARCH32_REGS
100	mrs	x16, dbgvcr32_el2
101	str	x16, [x0, #CTX_DBGVCR32_EL2]
102#endif /* CTX_INCLUDE_AARCH32_REGS */
103
104	mrs	x9, elr_el2
105	mrs	x10, esr_el2
106	stp	x9, x10, [x0, #CTX_ELR_EL2]
107
108	mrs	x11, far_el2
109	mrs	x12, hacr_el2
110	stp	x11, x12, [x0, #CTX_FAR_EL2]
111
112	mrs	x13, hcr_el2
113	mrs	x14, hpfar_el2
114	stp	x13, x14, [x0, #CTX_HCR_EL2]
115
116	mrs	x15, hstr_el2
117	mrs	x16, ICC_SRE_EL2
118	stp	x15, x16, [x0, #CTX_HSTR_EL2]
119
120	mrs	x9, ICH_HCR_EL2
121	mrs	x10, ICH_VMCR_EL2
122	stp	x9, x10, [x0, #CTX_ICH_HCR_EL2]
123
124	mrs	x11, mair_el2
125	mrs	x12, mdcr_el2
126	stp	x11, x12, [x0, #CTX_MAIR_EL2]
127
128	mrs	x14, sctlr_el2
129	str	x14, [x0, #CTX_SCTLR_EL2]
130
131	mrs	x15, spsr_el2
132	mrs	x16, sp_el2
133	stp	x15, x16, [x0, #CTX_SPSR_EL2]
134
135	mrs	x9, tcr_el2
136	mrs	x10, tpidr_el2
137	stp	x9, x10, [x0, #CTX_TCR_EL2]
138
139	mrs	x11, ttbr0_el2
140	mrs	x12, vbar_el2
141	stp	x11, x12, [x0, #CTX_TTBR0_EL2]
142
143	mrs	x13, vmpidr_el2
144	mrs	x14, vpidr_el2
145	stp	x13, x14, [x0, #CTX_VMPIDR_EL2]
146
147	mrs	x15, vtcr_el2
148	mrs	x16, vttbr_el2
149	stp	x15, x16, [x0, #CTX_VTCR_EL2]
150	ret
151endfunc el2_sysregs_context_save_common
152
153func el2_sysregs_context_restore_common
154	ldp	x9, x10, [x0, #CTX_ACTLR_EL2]
155	msr	actlr_el2, x9
156	msr	afsr0_el2, x10
157
158	ldp	x11, x12, [x0, #CTX_AFSR1_EL2]
159	msr	afsr1_el2, x11
160	msr	amair_el2, x12
161
162	ldp	x13, x14, [x0, #CTX_CNTHCTL_EL2]
163	msr	cnthctl_el2, x13
164	msr	cntvoff_el2, x14
165
166	ldr	x15, [x0, #CTX_CPTR_EL2]
167	msr	cptr_el2, x15
168
169#if CTX_INCLUDE_AARCH32_REGS
170	ldr	x16, [x0, #CTX_DBGVCR32_EL2]
171	msr	dbgvcr32_el2, x16
172#endif /* CTX_INCLUDE_AARCH32_REGS */
173
174	ldp	x9, x10, [x0, #CTX_ELR_EL2]
175	msr	elr_el2, x9
176	msr	esr_el2, x10
177
178	ldp	x11, x12, [x0, #CTX_FAR_EL2]
179	msr	far_el2, x11
180	msr	hacr_el2, x12
181
182	ldp	x13, x14, [x0, #CTX_HCR_EL2]
183	msr	hcr_el2, x13
184	msr	hpfar_el2, x14
185
186	ldp	x15, x16, [x0, #CTX_HSTR_EL2]
187	msr	hstr_el2, x15
188	msr	ICC_SRE_EL2, x16
189
190	ldp	x9, x10, [x0, #CTX_ICH_HCR_EL2]
191	msr	ICH_HCR_EL2, x9
192	msr	ICH_VMCR_EL2, x10
193
194	ldp	x11, x12, [x0, #CTX_MAIR_EL2]
195	msr	mair_el2, x11
196	msr	mdcr_el2, x12
197
198	ldr	x14, [x0, #CTX_SCTLR_EL2]
199	msr	sctlr_el2, x14
200
201	ldp	x15, x16, [x0, #CTX_SPSR_EL2]
202	msr	spsr_el2, x15
203	msr	sp_el2, x16
204
205	ldp	x9, x10, [x0, #CTX_TCR_EL2]
206	msr	tcr_el2, x9
207	msr	tpidr_el2, x10
208
209	ldp	x11, x12, [x0, #CTX_TTBR0_EL2]
210	msr	ttbr0_el2, x11
211	msr	vbar_el2, x12
212
213	ldp	x13, x14, [x0, #CTX_VMPIDR_EL2]
214	msr	vmpidr_el2, x13
215	msr	vpidr_el2, x14
216
217	ldp	x15, x16, [x0, #CTX_VTCR_EL2]
218	msr	vtcr_el2, x15
219	msr	vttbr_el2, x16
220	ret
221endfunc el2_sysregs_context_restore_common
222
223#if ENABLE_SPE_FOR_LOWER_ELS
224func el2_sysregs_context_save_spe
225	mrs	x13, PMSCR_EL2
226	str	x13, [x0, #CTX_PMSCR_EL2]
227	ret
228endfunc el2_sysregs_context_save_spe
229
230func el2_sysregs_context_restore_spe
231	ldr	x13, [x0, #CTX_PMSCR_EL2]
232	msr	PMSCR_EL2, x13
233	ret
234endfunc el2_sysregs_context_restore_spe
235#endif /* ENABLE_SPE_FOR_LOWER_ELS */
236
237#if CTX_INCLUDE_MTE_REGS
238func el2_sysregs_context_save_mte
239	mrs	x9, TFSR_EL2
240	str	x9, [x0, #CTX_TFSR_EL2]
241	ret
242endfunc el2_sysregs_context_save_mte
243
244func el2_sysregs_context_restore_mte
245	ldr	x9, [x0, #CTX_TFSR_EL2]
246	msr	TFSR_EL2, x9
247	ret
248endfunc el2_sysregs_context_restore_mte
249#endif /* CTX_INCLUDE_MTE_REGS */
250
251#if ENABLE_MPAM_FOR_LOWER_ELS
252func el2_sysregs_context_save_mpam
253	mrs	x10, MPAM2_EL2
254	str	x10, [x0, #CTX_MPAM2_EL2]
255
256	mrs	x10, MPAMIDR_EL1
257
258	/*
259	 * The context registers that we intend to save would be part of the
260	 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
261	 */
262	tbz	w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
263
264	/*
265	 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
266	 * system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to save
267	 * the context of these registers.
268	 */
269	mrs	x11, MPAMHCR_EL2
270	mrs	x12, MPAMVPM0_EL2
271	stp	x11, x12, [x0, #CTX_MPAMHCR_EL2]
272
273	mrs	x13, MPAMVPMV_EL2
274	str	x13, [x0, #CTX_MPAMVPMV_EL2]
275
276	/*
277	 * MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
278	 * VPMR value. Proceed to save the context of registers from
279	 * MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. From MPAM spec,
280	 * VPMR_MAX should not be zero if HAS_HCR == 1.
281	 */
282	ubfx	x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT, \
283		#MPAMIDR_EL1_VPMR_MAX_WIDTH
284
285	/*
286	 * Once VPMR_MAX has been identified, calculate the offset relative to
287	 * PC to jump to so that relevant context can be saved. The offset is
288	 * calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
289	 * saving one VPM register) + (absolute address of label "1").
290	 */
291	mov	w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
292	sub	w10, w11, w10
293
294	/* Calculate the size of one block of MPAMVPM*_EL2 save */
295	adr	x11, 1f
296	adr	x12, 2f
297	sub	x12, x12, x11
298
299	madd	x10, x10, x12, x11
300	br	x10
301
302	/*
303	 * The branch above would land properly on one of the blocks following
304	 * label "1". Make sure that the order of save is retained.
305	 */
3061:
307#if ENABLE_BTI
308	bti	j
309#endif
310	mrs	x10, MPAMVPM7_EL2
311	str	x10, [x0, #CTX_MPAMVPM7_EL2]
3122:
313#if ENABLE_BTI
314	bti	j
315#endif
316	mrs	x11, MPAMVPM6_EL2
317	str	x11, [x0, #CTX_MPAMVPM6_EL2]
318
319#if ENABLE_BTI
320	bti	j
321#endif
322	mrs	x12, MPAMVPM5_EL2
323	str	x12, [x0, #CTX_MPAMVPM5_EL2]
324
325#if ENABLE_BTI
326	bti	j
327#endif
328	mrs	x13, MPAMVPM4_EL2
329	str	x13, [x0, #CTX_MPAMVPM4_EL2]
330
331#if ENABLE_BTI
332	bti	j
333#endif
334	mrs	x14, MPAMVPM3_EL2
335	str	x14, [x0, #CTX_MPAMVPM3_EL2]
336
337#if ENABLE_BTI
338	bti	j
339#endif
340	mrs	x15, MPAMVPM2_EL2
341	str	x15, [x0, #CTX_MPAMVPM2_EL2]
342
343#if ENABLE_BTI
344	bti	j
345#endif
346	mrs	x16, MPAMVPM1_EL2
347	str	x16, [x0, #CTX_MPAMVPM1_EL2]
348
3493:	ret
350endfunc el2_sysregs_context_save_mpam
351
352func el2_sysregs_context_restore_mpam
353	ldr	x10, [x0, #CTX_MPAM2_EL2]
354	msr	MPAM2_EL2, x10
355
356	mrs	x10, MPAMIDR_EL1
357	/*
358	 * The context registers that we intend to restore would be part of the
359	 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
360	 */
361	tbz	w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
362
363	/*
364	 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
365	 * system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to restore
366	 * the context of these registers
367	 */
368	ldp	x11, x12, [x0, #CTX_MPAMHCR_EL2]
369	msr	MPAMHCR_EL2, x11
370	msr	MPAMVPM0_EL2, x12
371
372	ldr	x13, [x0, #CTX_MPAMVPMV_EL2]
373	msr	MPAMVPMV_EL2, x13
374
375	/*
376	 * MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
377	 * VPMR value. Proceed to restore the context of registers from
378	 * MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. from MPAM spec,
379	 * VPMR_MAX should not be zero if HAS_HCR == 1.
380	 */
381	ubfx	x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT,	\
382		#MPAMIDR_EL1_VPMR_MAX_WIDTH
383
384	/*
385	 * Once VPMR_MAX has been identified, calculate the offset relative to
386	 * PC to jump to so that relevant context can be restored. The offset is
387	 * calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
388	 * restoring one VPM register) + (absolute address of label "1").
389	 */
390	mov	w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
391	sub	w10, w11, w10
392
393	/* Calculate the size of one block of MPAMVPM*_EL2 restore */
394	adr	x11, 1f
395	adr	x12, 2f
396	sub	x12, x12, x11
397
398	madd	x10, x10, x12, x11
399	br	x10
400
401	/*
402	 * The branch above would land properly on one of the blocks following
403	 * label "1". Make sure that the order of restore is retained.
404	 */
4051:
406
407#if ENABLE_BTI
408	bti	j
409#endif
410	ldr	x10, [x0, #CTX_MPAMVPM7_EL2]
411	msr	MPAMVPM7_EL2, x10
4122:
413#if ENABLE_BTI
414	bti	j
415#endif
416	ldr	x11, [x0, #CTX_MPAMVPM6_EL2]
417	msr	MPAMVPM6_EL2, x11
418
419#if ENABLE_BTI
420	bti	j
421#endif
422	ldr	x12, [x0, #CTX_MPAMVPM5_EL2]
423	msr	MPAMVPM5_EL2, x12
424
425#if ENABLE_BTI
426	bti	j
427#endif
428	ldr	x13, [x0, #CTX_MPAMVPM4_EL2]
429	msr	MPAMVPM4_EL2, x13
430
431#if ENABLE_BTI
432	bti	j
433#endif
434	ldr	x14, [x0, #CTX_MPAMVPM3_EL2]
435	msr	MPAMVPM3_EL2, x14
436
437#if ENABLE_BTI
438	bti	j
439#endif
440	ldr	x15, [x0, #CTX_MPAMVPM2_EL2]
441	msr	MPAMVPM2_EL2, x15
442
443#if ENABLE_BTI
444	bti	j
445#endif
446	ldr	x16, [x0, #CTX_MPAMVPM1_EL2]
447	msr	MPAMVPM1_EL2, x16
448
4493:	ret
450endfunc el2_sysregs_context_restore_mpam
451#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
452
453#if ENABLE_FEAT_ECV
454func el2_sysregs_context_save_ecv
455	mrs	x11, CNTPOFF_EL2
456	str	x11, [x0, #CTX_CNTPOFF_EL2]
457	ret
458endfunc el2_sysregs_context_save_ecv
459
460func el2_sysregs_context_restore_ecv
461	ldr	x11, [x0, #CTX_CNTPOFF_EL2]
462	msr	CNTPOFF_EL2, x11
463	ret
464endfunc el2_sysregs_context_restore_ecv
465#endif /* ENABLE_FEAT_ECV */
466
467#if ENABLE_FEAT_VHE
468func el2_sysregs_context_save_vhe
469	/*
470	 * CONTEXTIDR_EL2 register is saved only when FEAT_VHE or
471	 * FEAT_Debugv8p2 (currently not in TF-A) is supported.
472	 */
473	mrs	x9, contextidr_el2
474	mrs	x10, ttbr1_el2
475	stp	x9, x10, [x0, #CTX_CONTEXTIDR_EL2]
476	ret
477endfunc el2_sysregs_context_save_vhe
478
479func el2_sysregs_context_restore_vhe
480	/*
481	 * CONTEXTIDR_EL2 register is restored only when FEAT_VHE or
482	 * FEAT_Debugv8p2 (currently not in TF-A) is supported.
483	 */
484	ldp	x9, x10, [x0, #CTX_CONTEXTIDR_EL2]
485	msr	contextidr_el2, x9
486	msr	ttbr1_el2, x10
487	ret
488endfunc el2_sysregs_context_restore_vhe
489#endif /* ENABLE_FEAT_VHE */
490
491#if RAS_EXTENSION
492func el2_sysregs_context_save_ras
493	/*
494	 * VDISR_EL2 and VSESR_EL2 registers are saved only when
495	 * FEAT_RAS is supported.
496	 */
497	mrs	x11, vdisr_el2
498	mrs	x12, vsesr_el2
499	stp	x11, x12, [x0, #CTX_VDISR_EL2]
500	ret
501endfunc el2_sysregs_context_save_ras
502
503func el2_sysregs_context_restore_ras
504	/*
505	 * VDISR_EL2 and VSESR_EL2 registers are restored only when FEAT_RAS
506	 * is supported.
507	 */
508	ldp	x11, x12, [x0, #CTX_VDISR_EL2]
509	msr	vdisr_el2, x11
510	msr	vsesr_el2, x12
511	ret
512endfunc el2_sysregs_context_restore_ras
513#endif /* RAS_EXTENSION */
514
515#if CTX_INCLUDE_NEVE_REGS
516func el2_sysregs_context_save_nv2
517	/*
518	 * VNCR_EL2 register is saved only when FEAT_NV2 is supported.
519	 */
520	mrs	x16, vncr_el2
521	str	x16, [x0, #CTX_VNCR_EL2]
522	ret
523endfunc el2_sysregs_context_save_nv2
524
525func el2_sysregs_context_restore_nv2
526	/*
527	 * VNCR_EL2 register is restored only when FEAT_NV2 is supported.
528	 */
529	ldr	x16, [x0, #CTX_VNCR_EL2]
530	msr	vncr_el2, x16
531	ret
532endfunc el2_sysregs_context_restore_nv2
533#endif /* CTX_INCLUDE_NEVE_REGS */
534
535#if ENABLE_FEAT_CSV2_2
536func el2_sysregs_context_save_csv2
537	/*
538	 * SCXTNUM_EL2 register is saved only when FEAT_CSV2_2 is supported.
539	 */
540	mrs	x13, scxtnum_el2
541	str	x13, [x0, #CTX_SCXTNUM_EL2]
542	ret
543endfunc el2_sysregs_context_save_csv2
544
545func el2_sysregs_context_restore_csv2
546	/*
547	 * SCXTNUM_EL2 register is restored only when FEAT_CSV2_2 is supported.
548	 */
549	ldr	x13, [x0, #CTX_SCXTNUM_EL2]
550	msr	scxtnum_el2, x13
551	ret
552endfunc el2_sysregs_context_restore_csv2
553#endif /* ENABLE_FEAT_CSV2_2 */
554
555#endif /* CTX_INCLUDE_EL2_REGS */
556
557/* ------------------------------------------------------------------
558 * The following function strictly follows the AArch64 PCS to use
559 * x9-x17 (temporary caller-saved registers) to save EL1 system
560 * register context. It assumes that 'x0' is pointing to a
561 * 'el1_sys_regs' structure where the register context will be saved.
562 * ------------------------------------------------------------------
563 */
564func el1_sysregs_context_save
565
566	mrs	x9, spsr_el1
567	mrs	x10, elr_el1
568	stp	x9, x10, [x0, #CTX_SPSR_EL1]
569
570#if !ERRATA_SPECULATIVE_AT
571	mrs	x15, sctlr_el1
572	mrs	x16, tcr_el1
573	stp	x15, x16, [x0, #CTX_SCTLR_EL1]
574#endif /* ERRATA_SPECULATIVE_AT */
575
576	mrs	x17, cpacr_el1
577	mrs	x9, csselr_el1
578	stp	x17, x9, [x0, #CTX_CPACR_EL1]
579
580	mrs	x10, sp_el1
581	mrs	x11, esr_el1
582	stp	x10, x11, [x0, #CTX_SP_EL1]
583
584	mrs	x12, ttbr0_el1
585	mrs	x13, ttbr1_el1
586	stp	x12, x13, [x0, #CTX_TTBR0_EL1]
587
588	mrs	x14, mair_el1
589	mrs	x15, amair_el1
590	stp	x14, x15, [x0, #CTX_MAIR_EL1]
591
592	mrs	x16, actlr_el1
593	mrs	x17, tpidr_el1
594	stp	x16, x17, [x0, #CTX_ACTLR_EL1]
595
596	mrs	x9, tpidr_el0
597	mrs	x10, tpidrro_el0
598	stp	x9, x10, [x0, #CTX_TPIDR_EL0]
599
600	mrs	x13, par_el1
601	mrs	x14, far_el1
602	stp	x13, x14, [x0, #CTX_PAR_EL1]
603
604	mrs	x15, afsr0_el1
605	mrs	x16, afsr1_el1
606	stp	x15, x16, [x0, #CTX_AFSR0_EL1]
607
608	mrs	x17, contextidr_el1
609	mrs	x9, vbar_el1
610	stp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
611
612	/* Save AArch32 system registers if the build has instructed so */
613#if CTX_INCLUDE_AARCH32_REGS
614	mrs	x11, spsr_abt
615	mrs	x12, spsr_und
616	stp	x11, x12, [x0, #CTX_SPSR_ABT]
617
618	mrs	x13, spsr_irq
619	mrs	x14, spsr_fiq
620	stp	x13, x14, [x0, #CTX_SPSR_IRQ]
621
622	mrs	x15, dacr32_el2
623	mrs	x16, ifsr32_el2
624	stp	x15, x16, [x0, #CTX_DACR32_EL2]
625#endif /* CTX_INCLUDE_AARCH32_REGS */
626
627	/* Save NS timer registers if the build has instructed so */
628#if NS_TIMER_SWITCH
629	mrs	x10, cntp_ctl_el0
630	mrs	x11, cntp_cval_el0
631	stp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
632
633	mrs	x12, cntv_ctl_el0
634	mrs	x13, cntv_cval_el0
635	stp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
636
637	mrs	x14, cntkctl_el1
638	str	x14, [x0, #CTX_CNTKCTL_EL1]
639#endif /* NS_TIMER_SWITCH */
640
641	/* Save MTE system registers if the build has instructed so */
642#if CTX_INCLUDE_MTE_REGS
643	mrs	x15, TFSRE0_EL1
644	mrs	x16, TFSR_EL1
645	stp	x15, x16, [x0, #CTX_TFSRE0_EL1]
646
647	mrs	x9, RGSR_EL1
648	mrs	x10, GCR_EL1
649	stp	x9, x10, [x0, #CTX_RGSR_EL1]
650#endif /* CTX_INCLUDE_MTE_REGS */
651
652	ret
653endfunc el1_sysregs_context_save
654
655/* ------------------------------------------------------------------
656 * The following function strictly follows the AArch64 PCS to use
657 * x9-x17 (temporary caller-saved registers) to restore EL1 system
658 * register context.  It assumes that 'x0' is pointing to a
659 * 'el1_sys_regs' structure from where the register context will be
660 * restored
661 * ------------------------------------------------------------------
662 */
663func el1_sysregs_context_restore
664
665	ldp	x9, x10, [x0, #CTX_SPSR_EL1]
666	msr	spsr_el1, x9
667	msr	elr_el1, x10
668
669#if !ERRATA_SPECULATIVE_AT
670	ldp	x15, x16, [x0, #CTX_SCTLR_EL1]
671	msr	sctlr_el1, x15
672	msr	tcr_el1, x16
673#endif /* ERRATA_SPECULATIVE_AT */
674
675	ldp	x17, x9, [x0, #CTX_CPACR_EL1]
676	msr	cpacr_el1, x17
677	msr	csselr_el1, x9
678
679	ldp	x10, x11, [x0, #CTX_SP_EL1]
680	msr	sp_el1, x10
681	msr	esr_el1, x11
682
683	ldp	x12, x13, [x0, #CTX_TTBR0_EL1]
684	msr	ttbr0_el1, x12
685	msr	ttbr1_el1, x13
686
687	ldp	x14, x15, [x0, #CTX_MAIR_EL1]
688	msr	mair_el1, x14
689	msr	amair_el1, x15
690
691	ldp 	x16, x17, [x0, #CTX_ACTLR_EL1]
692	msr	actlr_el1, x16
693	msr	tpidr_el1, x17
694
695	ldp	x9, x10, [x0, #CTX_TPIDR_EL0]
696	msr	tpidr_el0, x9
697	msr	tpidrro_el0, x10
698
699	ldp	x13, x14, [x0, #CTX_PAR_EL1]
700	msr	par_el1, x13
701	msr	far_el1, x14
702
703	ldp	x15, x16, [x0, #CTX_AFSR0_EL1]
704	msr	afsr0_el1, x15
705	msr	afsr1_el1, x16
706
707	ldp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
708	msr	contextidr_el1, x17
709	msr	vbar_el1, x9
710
711	/* Restore AArch32 system registers if the build has instructed so */
712#if CTX_INCLUDE_AARCH32_REGS
713	ldp	x11, x12, [x0, #CTX_SPSR_ABT]
714	msr	spsr_abt, x11
715	msr	spsr_und, x12
716
717	ldp	x13, x14, [x0, #CTX_SPSR_IRQ]
718	msr	spsr_irq, x13
719	msr	spsr_fiq, x14
720
721	ldp	x15, x16, [x0, #CTX_DACR32_EL2]
722	msr	dacr32_el2, x15
723	msr	ifsr32_el2, x16
724#endif /* CTX_INCLUDE_AARCH32_REGS */
725
726	/* Restore NS timer registers if the build has instructed so */
727#if NS_TIMER_SWITCH
728	ldp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
729	msr	cntp_ctl_el0, x10
730	msr	cntp_cval_el0, x11
731
732	ldp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
733	msr	cntv_ctl_el0, x12
734	msr	cntv_cval_el0, x13
735
736	ldr	x14, [x0, #CTX_CNTKCTL_EL1]
737	msr	cntkctl_el1, x14
738#endif /* NS_TIMER_SWITCH */
739
740	/* Restore MTE system registers if the build has instructed so */
741#if CTX_INCLUDE_MTE_REGS
742	ldp	x11, x12, [x0, #CTX_TFSRE0_EL1]
743	msr	TFSRE0_EL1, x11
744	msr	TFSR_EL1, x12
745
746	ldp	x13, x14, [x0, #CTX_RGSR_EL1]
747	msr	RGSR_EL1, x13
748	msr	GCR_EL1, x14
749#endif /* CTX_INCLUDE_MTE_REGS */
750
751	/* No explict ISB required here as ERET covers it */
752	ret
753endfunc el1_sysregs_context_restore
754
755/* ------------------------------------------------------------------
756 * The following function follows the aapcs_64 strictly to use
757 * x9-x17 (temporary caller-saved registers according to AArch64 PCS)
758 * to save floating point register context. It assumes that 'x0' is
759 * pointing to a 'fp_regs' structure where the register context will
760 * be saved.
761 *
762 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
763 * However currently we don't use VFP registers nor set traps in
764 * Trusted Firmware, and assume it's cleared.
765 *
766 * TODO: Revisit when VFP is used in secure world
767 * ------------------------------------------------------------------
768 */
769#if CTX_INCLUDE_FPREGS
770func fpregs_context_save
771	stp	q0, q1, [x0, #CTX_FP_Q0]
772	stp	q2, q3, [x0, #CTX_FP_Q2]
773	stp	q4, q5, [x0, #CTX_FP_Q4]
774	stp	q6, q7, [x0, #CTX_FP_Q6]
775	stp	q8, q9, [x0, #CTX_FP_Q8]
776	stp	q10, q11, [x0, #CTX_FP_Q10]
777	stp	q12, q13, [x0, #CTX_FP_Q12]
778	stp	q14, q15, [x0, #CTX_FP_Q14]
779	stp	q16, q17, [x0, #CTX_FP_Q16]
780	stp	q18, q19, [x0, #CTX_FP_Q18]
781	stp	q20, q21, [x0, #CTX_FP_Q20]
782	stp	q22, q23, [x0, #CTX_FP_Q22]
783	stp	q24, q25, [x0, #CTX_FP_Q24]
784	stp	q26, q27, [x0, #CTX_FP_Q26]
785	stp	q28, q29, [x0, #CTX_FP_Q28]
786	stp	q30, q31, [x0, #CTX_FP_Q30]
787
788	mrs	x9, fpsr
789	str	x9, [x0, #CTX_FP_FPSR]
790
791	mrs	x10, fpcr
792	str	x10, [x0, #CTX_FP_FPCR]
793
794#if CTX_INCLUDE_AARCH32_REGS
795	mrs	x11, fpexc32_el2
796	str	x11, [x0, #CTX_FP_FPEXC32_EL2]
797#endif /* CTX_INCLUDE_AARCH32_REGS */
798	ret
799endfunc fpregs_context_save
800
801/* ------------------------------------------------------------------
802 * The following function follows the aapcs_64 strictly to use x9-x17
803 * (temporary caller-saved registers according to AArch64 PCS) to
804 * restore floating point register context. It assumes that 'x0' is
805 * pointing to a 'fp_regs' structure from where the register context
806 * will be restored.
807 *
808 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
809 * However currently we don't use VFP registers nor set traps in
810 * Trusted Firmware, and assume it's cleared.
811 *
812 * TODO: Revisit when VFP is used in secure world
813 * ------------------------------------------------------------------
814 */
815func fpregs_context_restore
816	ldp	q0, q1, [x0, #CTX_FP_Q0]
817	ldp	q2, q3, [x0, #CTX_FP_Q2]
818	ldp	q4, q5, [x0, #CTX_FP_Q4]
819	ldp	q6, q7, [x0, #CTX_FP_Q6]
820	ldp	q8, q9, [x0, #CTX_FP_Q8]
821	ldp	q10, q11, [x0, #CTX_FP_Q10]
822	ldp	q12, q13, [x0, #CTX_FP_Q12]
823	ldp	q14, q15, [x0, #CTX_FP_Q14]
824	ldp	q16, q17, [x0, #CTX_FP_Q16]
825	ldp	q18, q19, [x0, #CTX_FP_Q18]
826	ldp	q20, q21, [x0, #CTX_FP_Q20]
827	ldp	q22, q23, [x0, #CTX_FP_Q22]
828	ldp	q24, q25, [x0, #CTX_FP_Q24]
829	ldp	q26, q27, [x0, #CTX_FP_Q26]
830	ldp	q28, q29, [x0, #CTX_FP_Q28]
831	ldp	q30, q31, [x0, #CTX_FP_Q30]
832
833	ldr	x9, [x0, #CTX_FP_FPSR]
834	msr	fpsr, x9
835
836	ldr	x10, [x0, #CTX_FP_FPCR]
837	msr	fpcr, x10
838
839#if CTX_INCLUDE_AARCH32_REGS
840	ldr	x11, [x0, #CTX_FP_FPEXC32_EL2]
841	msr	fpexc32_el2, x11
842#endif /* CTX_INCLUDE_AARCH32_REGS */
843
844	/*
845	 * No explict ISB required here as ERET to
846	 * switch to secure EL1 or non-secure world
847	 * covers it
848	 */
849
850	ret
851endfunc fpregs_context_restore
852#endif /* CTX_INCLUDE_FPREGS */
853
854	/*
855	 * Set SCR_EL3.EA bit to enable SErrors at EL3
856	 */
857	.macro enable_serror_at_el3
858	mrs     x8, scr_el3
859	orr     x8, x8, #SCR_EA_BIT
860	msr     scr_el3, x8
861	.endm
862
863	/*
864	 * Set the PSTATE bits not set when the exception was taken as
865	 * described in the AArch64.TakeException() pseudocode function
866	 * in ARM DDI 0487F.c page J1-7635 to a default value.
867	 */
868	.macro set_unset_pstate_bits
869	/*
870	 * If Data Independent Timing (DIT) functionality is implemented,
871	 * always enable DIT in EL3
872	 */
873#if ENABLE_FEAT_DIT
874	mov     x8, #DIT_BIT
875	msr     DIT, x8
876#endif /* ENABLE_FEAT_DIT */
877	.endm /* set_unset_pstate_bits */
878
879/* ------------------------------------------------------------------
880 * The following macro is used to save and restore all the general
881 * purpose and ARMv8.3-PAuth (if enabled) registers.
882 * It also checks if the Secure Cycle Counter (PMCCNTR_EL0)
883 * is disabled in EL3/Secure (ARMv8.5-PMU), wherein PMCCNTR_EL0
884 * needs not to be saved/restored during world switch.
885 *
886 * Ideally we would only save and restore the callee saved registers
887 * when a world switch occurs but that type of implementation is more
888 * complex. So currently we will always save and restore these
889 * registers on entry and exit of EL3.
890 * clobbers: x18
891 * ------------------------------------------------------------------
892 */
893	.macro save_gp_pmcr_pauth_regs
894	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
895	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
896	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
897	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
898	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
899	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
900	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
901	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
902	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
903	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
904	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
905	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
906	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
907	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
908	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
909	mrs	x18, sp_el0
910	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
911
912	/* ----------------------------------------------------------
913	 * Check if earlier initialization of MDCR_EL3.SCCD/MCCD to 1
914	 * has failed.
915	 *
916	 * MDCR_EL3:
917	 * MCCD bit set, Prohibits the Cycle Counter PMCCNTR_EL0 from
918	 * counting at EL3.
919	 * SCCD bit set, Secure Cycle Counter Disable. Prohibits PMCCNTR_EL0
920	 * from counting in Secure state.
921	 * If these bits are not set, meaning that FEAT_PMUv3p5/7 is
922	 * not implemented and PMCR_EL0 should be saved in non-secure
923	 * context.
924	 * ----------------------------------------------------------
925	 */
926	mov_imm	x10, (MDCR_SCCD_BIT | MDCR_MCCD_BIT)
927	mrs	x9, mdcr_el3
928	tst	x9, x10
929	bne	1f
930
931	/* ----------------------------------------------------------
932	 * If control reaches here, it ensures the Secure Cycle
933	 * Counter (PMCCNTR_EL0) is not prohibited from counting at
934	 * EL3 and in secure states.
935	 * Henceforth, PMCR_EL0 to be saved before world switch.
936	 * ----------------------------------------------------------
937	 */
938	mrs	x9, pmcr_el0
939
940	/* Check caller's security state */
941	mrs	x10, scr_el3
942	tst	x10, #SCR_NS_BIT
943	beq	2f
944
945	/* Save PMCR_EL0 if called from Non-secure state */
946	str	x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
947
948	/* Disable cycle counter when event counting is prohibited */
9492:	orr	x9, x9, #PMCR_EL0_DP_BIT
950	msr	pmcr_el0, x9
951	isb
9521:
953#if CTX_INCLUDE_PAUTH_REGS
954	/* ----------------------------------------------------------
955 	 * Save the ARMv8.3-PAuth keys as they are not banked
956 	 * by exception level
957	 * ----------------------------------------------------------
958	 */
959	add	x19, sp, #CTX_PAUTH_REGS_OFFSET
960
961	mrs	x20, APIAKeyLo_EL1	/* x21:x20 = APIAKey */
962	mrs	x21, APIAKeyHi_EL1
963	mrs	x22, APIBKeyLo_EL1	/* x23:x22 = APIBKey */
964	mrs	x23, APIBKeyHi_EL1
965	mrs	x24, APDAKeyLo_EL1	/* x25:x24 = APDAKey */
966	mrs	x25, APDAKeyHi_EL1
967	mrs	x26, APDBKeyLo_EL1	/* x27:x26 = APDBKey */
968	mrs	x27, APDBKeyHi_EL1
969	mrs	x28, APGAKeyLo_EL1	/* x29:x28 = APGAKey */
970	mrs	x29, APGAKeyHi_EL1
971
972	stp	x20, x21, [x19, #CTX_PACIAKEY_LO]
973	stp	x22, x23, [x19, #CTX_PACIBKEY_LO]
974	stp	x24, x25, [x19, #CTX_PACDAKEY_LO]
975	stp	x26, x27, [x19, #CTX_PACDBKEY_LO]
976	stp	x28, x29, [x19, #CTX_PACGAKEY_LO]
977#endif /* CTX_INCLUDE_PAUTH_REGS */
978	.endm /* save_gp_pmcr_pauth_regs */
979
980/* -----------------------------------------------------------------
981 * This function saves the context and sets the PSTATE to a known
982 * state, preparing entry to el3.
983 * Save all the general purpose and ARMv8.3-PAuth (if enabled)
984 * registers.
985 * Then set any of the PSTATE bits that are not set by hardware
986 * according to the Aarch64.TakeException pseudocode in the Arm
987 * Architecture Reference Manual to a default value for EL3.
988 * clobbers: x17
989 * -----------------------------------------------------------------
990 */
991func prepare_el3_entry
992	save_gp_pmcr_pauth_regs
993	enable_serror_at_el3
994	/*
995	 * Set the PSTATE bits not described in the Aarch64.TakeException
996	 * pseudocode to their default values.
997	 */
998	set_unset_pstate_bits
999	ret
1000endfunc prepare_el3_entry
1001
1002/* ------------------------------------------------------------------
1003 * This function restores ARMv8.3-PAuth (if enabled) and all general
1004 * purpose registers except x30 from the CPU context.
1005 * x30 register must be explicitly restored by the caller.
1006 * ------------------------------------------------------------------
1007 */
1008func restore_gp_pmcr_pauth_regs
1009#if CTX_INCLUDE_PAUTH_REGS
1010 	/* Restore the ARMv8.3 PAuth keys */
1011	add	x10, sp, #CTX_PAUTH_REGS_OFFSET
1012
1013	ldp	x0, x1, [x10, #CTX_PACIAKEY_LO]	/* x1:x0 = APIAKey */
1014	ldp	x2, x3, [x10, #CTX_PACIBKEY_LO]	/* x3:x2 = APIBKey */
1015	ldp	x4, x5, [x10, #CTX_PACDAKEY_LO]	/* x5:x4 = APDAKey */
1016	ldp	x6, x7, [x10, #CTX_PACDBKEY_LO]	/* x7:x6 = APDBKey */
1017	ldp	x8, x9, [x10, #CTX_PACGAKEY_LO]	/* x9:x8 = APGAKey */
1018
1019	msr	APIAKeyLo_EL1, x0
1020	msr	APIAKeyHi_EL1, x1
1021	msr	APIBKeyLo_EL1, x2
1022	msr	APIBKeyHi_EL1, x3
1023	msr	APDAKeyLo_EL1, x4
1024	msr	APDAKeyHi_EL1, x5
1025	msr	APDBKeyLo_EL1, x6
1026	msr	APDBKeyHi_EL1, x7
1027	msr	APGAKeyLo_EL1, x8
1028	msr	APGAKeyHi_EL1, x9
1029#endif /* CTX_INCLUDE_PAUTH_REGS */
1030
1031	/* ----------------------------------------------------------
1032	 * Restore PMCR_EL0 when returning to Non-secure state if
1033	 * Secure Cycle Counter is not disabled in MDCR_EL3 when
1034	 * ARMv8.5-PMU is implemented.
1035	 * ----------------------------------------------------------
1036	 */
1037	mrs	x0, scr_el3
1038	tst	x0, #SCR_NS_BIT
1039	beq	2f
1040
1041	/* ----------------------------------------------------------
1042	 * Back to Non-secure state.
1043	 * Check if earlier initialization MDCR_EL3.SCCD/MCCD to 1
1044	 * failed, meaning that FEAT_PMUv3p5/7 is not implemented and
1045	 * PMCR_EL0 should be restored from non-secure context.
1046	 * ----------------------------------------------------------
1047	 */
1048	mov_imm	x1, (MDCR_SCCD_BIT | MDCR_MCCD_BIT)
1049	mrs	x0, mdcr_el3
1050	tst	x0, x1
1051	bne	2f
1052	ldr	x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
1053	msr	pmcr_el0, x0
10542:
1055	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
1056	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
1057	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
1058	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
1059	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
1060	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
1061	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
1062	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
1063	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
1064	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
1065	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
1066	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
1067	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
1068	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
1069	ldr	x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
1070	msr	sp_el0, x28
1071	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
1072	ret
1073endfunc restore_gp_pmcr_pauth_regs
1074
1075/*
1076 * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1
1077 * registers and update EL1 registers to disable stage1 and stage2
1078 * page table walk
1079 */
1080func save_and_update_ptw_el1_sys_regs
1081	/* ----------------------------------------------------------
1082	 * Save only sctlr_el1 and tcr_el1 registers
1083	 * ----------------------------------------------------------
1084	 */
1085	mrs	x29, sctlr_el1
1086	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1)]
1087	mrs	x29, tcr_el1
1088	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_TCR_EL1)]
1089
1090	/* ------------------------------------------------------------
1091	 * Must follow below order in order to disable page table
1092	 * walk for lower ELs (EL1 and EL0). First step ensures that
1093	 * page table walk is disabled for stage1 and second step
1094	 * ensures that page table walker should use TCR_EL1.EPDx
1095	 * bits to perform address translation. ISB ensures that CPU
1096	 * does these 2 steps in order.
1097	 *
1098	 * 1. Update TCR_EL1.EPDx bits to disable page table walk by
1099	 *    stage1.
1100	 * 2. Enable MMU bit to avoid identity mapping via stage2
1101	 *    and force TCR_EL1.EPDx to be used by the page table
1102	 *    walker.
1103	 * ------------------------------------------------------------
1104	 */
1105	orr	x29, x29, #(TCR_EPD0_BIT)
1106	orr	x29, x29, #(TCR_EPD1_BIT)
1107	msr	tcr_el1, x29
1108	isb
1109	mrs	x29, sctlr_el1
1110	orr	x29, x29, #SCTLR_M_BIT
1111	msr	sctlr_el1, x29
1112	isb
1113
1114	ret
1115endfunc save_and_update_ptw_el1_sys_regs
1116
1117/* ------------------------------------------------------------------
1118 * This routine assumes that the SP_EL3 is pointing to a valid
1119 * context structure from where the gp regs and other special
1120 * registers can be retrieved.
1121 * ------------------------------------------------------------------
1122 */
1123func el3_exit
1124#if ENABLE_ASSERTIONS
1125	/* el3_exit assumes SP_EL0 on entry */
1126	mrs	x17, spsel
1127	cmp	x17, #MODE_SP_EL0
1128	ASM_ASSERT(eq)
1129#endif /* ENABLE_ASSERTIONS */
1130
1131	/* ----------------------------------------------------------
1132	 * Save the current SP_EL0 i.e. the EL3 runtime stack which
1133	 * will be used for handling the next SMC.
1134	 * Then switch to SP_EL3.
1135	 * ----------------------------------------------------------
1136	 */
1137	mov	x17, sp
1138	msr	spsel, #MODE_SP_ELX
1139	str	x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
1140
1141#if IMAGE_BL31
1142	/* ----------------------------------------------------------
1143	 * Restore CPTR_EL3.
1144	 * ZCR is only restored if SVE is supported and enabled.
1145	 * Synchronization is required before zcr_el3 is addressed.
1146	 * ----------------------------------------------------------
1147	 */
1148	ldp	x19, x20, [sp, #CTX_EL3STATE_OFFSET + CTX_CPTR_EL3]
1149	msr	cptr_el3, x19
1150
1151	ands	x19, x19, #CPTR_EZ_BIT
1152	beq	sve_not_enabled
1153
1154	isb
1155	msr	S3_6_C1_C2_0, x20 /* zcr_el3 */
1156sve_not_enabled:
1157#endif /* IMAGE_BL31 */
1158
1159#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
1160	/* ----------------------------------------------------------
1161	 * Restore mitigation state as it was on entry to EL3
1162	 * ----------------------------------------------------------
1163	 */
1164	ldr	x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
1165	cbz	x17, 1f
1166	blr	x17
11671:
1168#endif /* IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 */
1169
1170#if IMAGE_BL31 && RAS_EXTENSION
1171	/* ----------------------------------------------------------
1172	 * Issue Error Synchronization Barrier to synchronize SErrors
1173	 * before exiting EL3. We're running with EAs unmasked, so
1174	 * any synchronized errors would be taken immediately;
1175	 * therefore no need to inspect DISR_EL1 register.
1176 	 * ----------------------------------------------------------
1177	 */
1178	esb
1179#else
1180	dsb	sy
1181#endif /* IMAGE_BL31 && RAS_EXTENSION */
1182
1183	/* ----------------------------------------------------------
1184	 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
1185	 * ----------------------------------------------------------
1186	 */
1187	ldr	x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
1188	ldp	x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
1189	msr	scr_el3, x18
1190	msr	spsr_el3, x16
1191	msr	elr_el3, x17
1192
1193	restore_ptw_el1_sys_regs
1194
1195	/* ----------------------------------------------------------
1196	 * Restore general purpose (including x30), PMCR_EL0 and
1197	 * ARMv8.3-PAuth registers.
1198	 * Exit EL3 via ERET to a lower exception level.
1199 	 * ----------------------------------------------------------
1200 	 */
1201	bl	restore_gp_pmcr_pauth_regs
1202	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
1203
1204#ifdef IMAGE_BL31
1205	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
1206#endif /* IMAGE_BL31 */
1207
1208	exception_return
1209
1210endfunc el3_exit
1211