xref: /rk3399_ARM-atf/lib/el3_runtime/aarch64/context.S (revision 023f1bed1dde23564e3b66a99c4a45b09e38992b)
1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <context.h>
11#include <el3_common_macros.S>
12
13#if CTX_INCLUDE_EL2_REGS
14	.global	el2_sysregs_context_save_common
15	.global	el2_sysregs_context_restore_common
16#if CTX_INCLUDE_MTE_REGS
17	.global	el2_sysregs_context_save_mte
18	.global	el2_sysregs_context_restore_mte
19#endif /* CTX_INCLUDE_MTE_REGS */
20#if ENABLE_MPAM_FOR_LOWER_ELS
21	.global	el2_sysregs_context_save_mpam
22	.global	el2_sysregs_context_restore_mpam
23#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
24#if ENABLE_FEAT_ECV
25	.global	el2_sysregs_context_save_ecv
26	.global	el2_sysregs_context_restore_ecv
27#endif /* ENABLE_FEAT_ECV */
28#if ENABLE_FEAT_VHE
29	.global	el2_sysregs_context_save_vhe
30	.global	el2_sysregs_context_restore_vhe
31#endif /* ENABLE_FEAT_VHE */
32#if RAS_EXTENSION
33	.global	el2_sysregs_context_save_ras
34	.global	el2_sysregs_context_restore_ras
35#endif /* RAS_EXTENSION */
36#if CTX_INCLUDE_NEVE_REGS
37	.global	el2_sysregs_context_save_nv2
38	.global	el2_sysregs_context_restore_nv2
39#endif /* CTX_INCLUDE_NEVE_REGS */
40#if ENABLE_FEAT_CSV2_2
41	.global	el2_sysregs_context_save_csv2
42	.global	el2_sysregs_context_restore_csv2
43#endif /* ENABLE_FEAT_CSV2_2 */
44#endif /* CTX_INCLUDE_EL2_REGS */
45
46	.global	el1_sysregs_context_save
47	.global	el1_sysregs_context_restore
48#if CTX_INCLUDE_FPREGS
49	.global	fpregs_context_save
50	.global	fpregs_context_restore
51#endif /* CTX_INCLUDE_FPREGS */
52	.global	prepare_el3_entry
53	.global	restore_gp_pmcr_pauth_regs
54	.global save_and_update_ptw_el1_sys_regs
55	.global	el3_exit
56
57#if CTX_INCLUDE_EL2_REGS
58
59/* -----------------------------------------------------
60 * The following functions strictly follow the AArch64
61 * PCS to use x9-x16 (temporary caller-saved registers)
62 * to save/restore EL2 system register context.
63 * el2_sysregs_context_save/restore_common functions
64 * save and restore registers that are common to all
65 * configurations. The rest of the functions save and
66 * restore EL2 system registers that are present when a
67 * particular feature is enabled. All functions assume
68 * that 'x0' is pointing to a 'el2_sys_regs' structure
69 * where the register context will be saved/restored.
70 *
71 * The following registers are not added.
72 * AMEVCNTVOFF0<n>_EL2
73 * AMEVCNTVOFF1<n>_EL2
74 * ICH_AP0R<n>_EL2
75 * ICH_AP1R<n>_EL2
76 * ICH_LR<n>_EL2
77 * -----------------------------------------------------
78 */
79func el2_sysregs_context_save_common
80	mrs	x9, actlr_el2
81	mrs	x10, afsr0_el2
82	stp	x9, x10, [x0, #CTX_ACTLR_EL2]
83
84	mrs	x11, afsr1_el2
85	mrs	x12, amair_el2
86	stp	x11, x12, [x0, #CTX_AFSR1_EL2]
87
88	mrs	x13, cnthctl_el2
89	mrs	x14, cntvoff_el2
90	stp	x13, x14, [x0, #CTX_CNTHCTL_EL2]
91
92	mrs	x15, cptr_el2
93	str	x15, [x0, #CTX_CPTR_EL2]
94
95#if CTX_INCLUDE_AARCH32_REGS
96	mrs	x16, dbgvcr32_el2
97	str	x16, [x0, #CTX_DBGVCR32_EL2]
98#endif /* CTX_INCLUDE_AARCH32_REGS */
99
100	mrs	x9, elr_el2
101	mrs	x10, esr_el2
102	stp	x9, x10, [x0, #CTX_ELR_EL2]
103
104	mrs	x11, far_el2
105	mrs	x12, hacr_el2
106	stp	x11, x12, [x0, #CTX_FAR_EL2]
107
108	mrs	x13, hcr_el2
109	mrs	x14, hpfar_el2
110	stp	x13, x14, [x0, #CTX_HCR_EL2]
111
112	mrs	x15, hstr_el2
113	mrs	x16, ICC_SRE_EL2
114	stp	x15, x16, [x0, #CTX_HSTR_EL2]
115
116	mrs	x9, ICH_HCR_EL2
117	mrs	x10, ICH_VMCR_EL2
118	stp	x9, x10, [x0, #CTX_ICH_HCR_EL2]
119
120	mrs	x11, mair_el2
121	mrs	x12, mdcr_el2
122	stp	x11, x12, [x0, #CTX_MAIR_EL2]
123
124	mrs	x14, sctlr_el2
125	str	x14, [x0, #CTX_SCTLR_EL2]
126
127	mrs	x15, spsr_el2
128	mrs	x16, sp_el2
129	stp	x15, x16, [x0, #CTX_SPSR_EL2]
130
131	mrs	x9, tcr_el2
132	mrs	x10, tpidr_el2
133	stp	x9, x10, [x0, #CTX_TCR_EL2]
134
135	mrs	x11, ttbr0_el2
136	mrs	x12, vbar_el2
137	stp	x11, x12, [x0, #CTX_TTBR0_EL2]
138
139	mrs	x13, vmpidr_el2
140	mrs	x14, vpidr_el2
141	stp	x13, x14, [x0, #CTX_VMPIDR_EL2]
142
143	mrs	x15, vtcr_el2
144	mrs	x16, vttbr_el2
145	stp	x15, x16, [x0, #CTX_VTCR_EL2]
146	ret
147endfunc el2_sysregs_context_save_common
148
149func el2_sysregs_context_restore_common
150	ldp	x9, x10, [x0, #CTX_ACTLR_EL2]
151	msr	actlr_el2, x9
152	msr	afsr0_el2, x10
153
154	ldp	x11, x12, [x0, #CTX_AFSR1_EL2]
155	msr	afsr1_el2, x11
156	msr	amair_el2, x12
157
158	ldp	x13, x14, [x0, #CTX_CNTHCTL_EL2]
159	msr	cnthctl_el2, x13
160	msr	cntvoff_el2, x14
161
162	ldr	x15, [x0, #CTX_CPTR_EL2]
163	msr	cptr_el2, x15
164
165#if CTX_INCLUDE_AARCH32_REGS
166	ldr	x16, [x0, #CTX_DBGVCR32_EL2]
167	msr	dbgvcr32_el2, x16
168#endif /* CTX_INCLUDE_AARCH32_REGS */
169
170	ldp	x9, x10, [x0, #CTX_ELR_EL2]
171	msr	elr_el2, x9
172	msr	esr_el2, x10
173
174	ldp	x11, x12, [x0, #CTX_FAR_EL2]
175	msr	far_el2, x11
176	msr	hacr_el2, x12
177
178	ldp	x13, x14, [x0, #CTX_HCR_EL2]
179	msr	hcr_el2, x13
180	msr	hpfar_el2, x14
181
182	ldp	x15, x16, [x0, #CTX_HSTR_EL2]
183	msr	hstr_el2, x15
184	msr	ICC_SRE_EL2, x16
185
186	ldp	x9, x10, [x0, #CTX_ICH_HCR_EL2]
187	msr	ICH_HCR_EL2, x9
188	msr	ICH_VMCR_EL2, x10
189
190	ldp	x11, x12, [x0, #CTX_MAIR_EL2]
191	msr	mair_el2, x11
192	msr	mdcr_el2, x12
193
194	ldr	x14, [x0, #CTX_SCTLR_EL2]
195	msr	sctlr_el2, x14
196
197	ldp	x15, x16, [x0, #CTX_SPSR_EL2]
198	msr	spsr_el2, x15
199	msr	sp_el2, x16
200
201	ldp	x9, x10, [x0, #CTX_TCR_EL2]
202	msr	tcr_el2, x9
203	msr	tpidr_el2, x10
204
205	ldp	x11, x12, [x0, #CTX_TTBR0_EL2]
206	msr	ttbr0_el2, x11
207	msr	vbar_el2, x12
208
209	ldp	x13, x14, [x0, #CTX_VMPIDR_EL2]
210	msr	vmpidr_el2, x13
211	msr	vpidr_el2, x14
212
213	ldp	x15, x16, [x0, #CTX_VTCR_EL2]
214	msr	vtcr_el2, x15
215	msr	vttbr_el2, x16
216	ret
217endfunc el2_sysregs_context_restore_common
218
219#if CTX_INCLUDE_MTE_REGS
220func el2_sysregs_context_save_mte
221	mrs	x9, TFSR_EL2
222	str	x9, [x0, #CTX_TFSR_EL2]
223	ret
224endfunc el2_sysregs_context_save_mte
225
226func el2_sysregs_context_restore_mte
227	ldr	x9, [x0, #CTX_TFSR_EL2]
228	msr	TFSR_EL2, x9
229	ret
230endfunc el2_sysregs_context_restore_mte
231#endif /* CTX_INCLUDE_MTE_REGS */
232
233#if ENABLE_MPAM_FOR_LOWER_ELS
234func el2_sysregs_context_save_mpam
235	mrs	x10, MPAM2_EL2
236	str	x10, [x0, #CTX_MPAM2_EL2]
237
238	mrs	x10, MPAMIDR_EL1
239
240	/*
241	 * The context registers that we intend to save would be part of the
242	 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
243	 */
244	tbz	w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
245
246	/*
247	 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
248	 * system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to save
249	 * the context of these registers.
250	 */
251	mrs	x11, MPAMHCR_EL2
252	mrs	x12, MPAMVPM0_EL2
253	stp	x11, x12, [x0, #CTX_MPAMHCR_EL2]
254
255	mrs	x13, MPAMVPMV_EL2
256	str	x13, [x0, #CTX_MPAMVPMV_EL2]
257
258	/*
259	 * MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
260	 * VPMR value. Proceed to save the context of registers from
261	 * MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. From MPAM spec,
262	 * VPMR_MAX should not be zero if HAS_HCR == 1.
263	 */
264	ubfx	x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT, \
265		#MPAMIDR_EL1_VPMR_MAX_WIDTH
266
267	/*
268	 * Once VPMR_MAX has been identified, calculate the offset relative to
269	 * PC to jump to so that relevant context can be saved. The offset is
270	 * calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
271	 * saving one VPM register) + (absolute address of label "1").
272	 */
273	mov	w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
274	sub	w10, w11, w10
275
276	/* Calculate the size of one block of MPAMVPM*_EL2 save */
277	adr	x11, 1f
278	adr	x12, 2f
279	sub	x12, x12, x11
280
281	madd	x10, x10, x12, x11
282	br	x10
283
284	/*
285	 * The branch above would land properly on one of the blocks following
286	 * label "1". Make sure that the order of save is retained.
287	 */
2881:
289#if ENABLE_BTI
290	bti	j
291#endif
292	mrs	x10, MPAMVPM7_EL2
293	str	x10, [x0, #CTX_MPAMVPM7_EL2]
2942:
295#if ENABLE_BTI
296	bti	j
297#endif
298	mrs	x11, MPAMVPM6_EL2
299	str	x11, [x0, #CTX_MPAMVPM6_EL2]
300
301#if ENABLE_BTI
302	bti	j
303#endif
304	mrs	x12, MPAMVPM5_EL2
305	str	x12, [x0, #CTX_MPAMVPM5_EL2]
306
307#if ENABLE_BTI
308	bti	j
309#endif
310	mrs	x13, MPAMVPM4_EL2
311	str	x13, [x0, #CTX_MPAMVPM4_EL2]
312
313#if ENABLE_BTI
314	bti	j
315#endif
316	mrs	x14, MPAMVPM3_EL2
317	str	x14, [x0, #CTX_MPAMVPM3_EL2]
318
319#if ENABLE_BTI
320	bti	j
321#endif
322	mrs	x15, MPAMVPM2_EL2
323	str	x15, [x0, #CTX_MPAMVPM2_EL2]
324
325#if ENABLE_BTI
326	bti	j
327#endif
328	mrs	x16, MPAMVPM1_EL2
329	str	x16, [x0, #CTX_MPAMVPM1_EL2]
330
3313:	ret
332endfunc el2_sysregs_context_save_mpam
333
334func el2_sysregs_context_restore_mpam
335	ldr	x10, [x0, #CTX_MPAM2_EL2]
336	msr	MPAM2_EL2, x10
337
338	mrs	x10, MPAMIDR_EL1
339	/*
340	 * The context registers that we intend to restore would be part of the
341	 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
342	 */
343	tbz	w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
344
345	/*
346	 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
347	 * system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to restore
348	 * the context of these registers
349	 */
350	ldp	x11, x12, [x0, #CTX_MPAMHCR_EL2]
351	msr	MPAMHCR_EL2, x11
352	msr	MPAMVPM0_EL2, x12
353
354	ldr	x13, [x0, #CTX_MPAMVPMV_EL2]
355	msr	MPAMVPMV_EL2, x13
356
357	/*
358	 * MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
359	 * VPMR value. Proceed to restore the context of registers from
360	 * MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. from MPAM spec,
361	 * VPMR_MAX should not be zero if HAS_HCR == 1.
362	 */
363	ubfx	x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT,	\
364		#MPAMIDR_EL1_VPMR_MAX_WIDTH
365
366	/*
367	 * Once VPMR_MAX has been identified, calculate the offset relative to
368	 * PC to jump to so that relevant context can be restored. The offset is
369	 * calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
370	 * restoring one VPM register) + (absolute address of label "1").
371	 */
372	mov	w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
373	sub	w10, w11, w10
374
375	/* Calculate the size of one block of MPAMVPM*_EL2 restore */
376	adr	x11, 1f
377	adr	x12, 2f
378	sub	x12, x12, x11
379
380	madd	x10, x10, x12, x11
381	br	x10
382
383	/*
384	 * The branch above would land properly on one of the blocks following
385	 * label "1". Make sure that the order of restore is retained.
386	 */
3871:
388
389#if ENABLE_BTI
390	bti	j
391#endif
392	ldr	x10, [x0, #CTX_MPAMVPM7_EL2]
393	msr	MPAMVPM7_EL2, x10
3942:
395#if ENABLE_BTI
396	bti	j
397#endif
398	ldr	x11, [x0, #CTX_MPAMVPM6_EL2]
399	msr	MPAMVPM6_EL2, x11
400
401#if ENABLE_BTI
402	bti	j
403#endif
404	ldr	x12, [x0, #CTX_MPAMVPM5_EL2]
405	msr	MPAMVPM5_EL2, x12
406
407#if ENABLE_BTI
408	bti	j
409#endif
410	ldr	x13, [x0, #CTX_MPAMVPM4_EL2]
411	msr	MPAMVPM4_EL2, x13
412
413#if ENABLE_BTI
414	bti	j
415#endif
416	ldr	x14, [x0, #CTX_MPAMVPM3_EL2]
417	msr	MPAMVPM3_EL2, x14
418
419#if ENABLE_BTI
420	bti	j
421#endif
422	ldr	x15, [x0, #CTX_MPAMVPM2_EL2]
423	msr	MPAMVPM2_EL2, x15
424
425#if ENABLE_BTI
426	bti	j
427#endif
428	ldr	x16, [x0, #CTX_MPAMVPM1_EL2]
429	msr	MPAMVPM1_EL2, x16
430
4313:	ret
432endfunc el2_sysregs_context_restore_mpam
433#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
434
435#if ENABLE_FEAT_ECV
436func el2_sysregs_context_save_ecv
437	mrs	x11, CNTPOFF_EL2
438	str	x11, [x0, #CTX_CNTPOFF_EL2]
439	ret
440endfunc el2_sysregs_context_save_ecv
441
442func el2_sysregs_context_restore_ecv
443	ldr	x11, [x0, #CTX_CNTPOFF_EL2]
444	msr	CNTPOFF_EL2, x11
445	ret
446endfunc el2_sysregs_context_restore_ecv
447#endif /* ENABLE_FEAT_ECV */
448
449#if ENABLE_FEAT_VHE
450func el2_sysregs_context_save_vhe
451	/*
452	 * CONTEXTIDR_EL2 register is saved only when FEAT_VHE or
453	 * FEAT_Debugv8p2 (currently not in TF-A) is supported.
454	 */
455	mrs	x9, contextidr_el2
456	mrs	x10, ttbr1_el2
457	stp	x9, x10, [x0, #CTX_CONTEXTIDR_EL2]
458	ret
459endfunc el2_sysregs_context_save_vhe
460
461func el2_sysregs_context_restore_vhe
462	/*
463	 * CONTEXTIDR_EL2 register is restored only when FEAT_VHE or
464	 * FEAT_Debugv8p2 (currently not in TF-A) is supported.
465	 */
466	ldp	x9, x10, [x0, #CTX_CONTEXTIDR_EL2]
467	msr	contextidr_el2, x9
468	msr	ttbr1_el2, x10
469	ret
470endfunc el2_sysregs_context_restore_vhe
471#endif /* ENABLE_FEAT_VHE */
472
473#if RAS_EXTENSION
474func el2_sysregs_context_save_ras
475	/*
476	 * VDISR_EL2 and VSESR_EL2 registers are saved only when
477	 * FEAT_RAS is supported.
478	 */
479	mrs	x11, vdisr_el2
480	mrs	x12, vsesr_el2
481	stp	x11, x12, [x0, #CTX_VDISR_EL2]
482	ret
483endfunc el2_sysregs_context_save_ras
484
485func el2_sysregs_context_restore_ras
486	/*
487	 * VDISR_EL2 and VSESR_EL2 registers are restored only when FEAT_RAS
488	 * is supported.
489	 */
490	ldp	x11, x12, [x0, #CTX_VDISR_EL2]
491	msr	vdisr_el2, x11
492	msr	vsesr_el2, x12
493	ret
494endfunc el2_sysregs_context_restore_ras
495#endif /* RAS_EXTENSION */
496
497#if CTX_INCLUDE_NEVE_REGS
498func el2_sysregs_context_save_nv2
499	/*
500	 * VNCR_EL2 register is saved only when FEAT_NV2 is supported.
501	 */
502	mrs	x16, vncr_el2
503	str	x16, [x0, #CTX_VNCR_EL2]
504	ret
505endfunc el2_sysregs_context_save_nv2
506
507func el2_sysregs_context_restore_nv2
508	/*
509	 * VNCR_EL2 register is restored only when FEAT_NV2 is supported.
510	 */
511	ldr	x16, [x0, #CTX_VNCR_EL2]
512	msr	vncr_el2, x16
513	ret
514endfunc el2_sysregs_context_restore_nv2
515#endif /* CTX_INCLUDE_NEVE_REGS */
516
517#if ENABLE_FEAT_CSV2_2
518func el2_sysregs_context_save_csv2
519	/*
520	 * SCXTNUM_EL2 register is saved only when FEAT_CSV2_2 is supported.
521	 */
522	mrs	x13, scxtnum_el2
523	str	x13, [x0, #CTX_SCXTNUM_EL2]
524	ret
525endfunc el2_sysregs_context_save_csv2
526
527func el2_sysregs_context_restore_csv2
528	/*
529	 * SCXTNUM_EL2 register is restored only when FEAT_CSV2_2 is supported.
530	 */
531	ldr	x13, [x0, #CTX_SCXTNUM_EL2]
532	msr	scxtnum_el2, x13
533	ret
534endfunc el2_sysregs_context_restore_csv2
535#endif /* ENABLE_FEAT_CSV2_2 */
536
537#endif /* CTX_INCLUDE_EL2_REGS */
538
539/* ------------------------------------------------------------------
540 * The following function strictly follows the AArch64 PCS to use
541 * x9-x17 (temporary caller-saved registers) to save EL1 system
542 * register context. It assumes that 'x0' is pointing to a
543 * 'el1_sys_regs' structure where the register context will be saved.
544 * ------------------------------------------------------------------
545 */
546func el1_sysregs_context_save
547
548	mrs	x9, spsr_el1
549	mrs	x10, elr_el1
550	stp	x9, x10, [x0, #CTX_SPSR_EL1]
551
552#if !ERRATA_SPECULATIVE_AT
553	mrs	x15, sctlr_el1
554	mrs	x16, tcr_el1
555	stp	x15, x16, [x0, #CTX_SCTLR_EL1]
556#endif /* ERRATA_SPECULATIVE_AT */
557
558	mrs	x17, cpacr_el1
559	mrs	x9, csselr_el1
560	stp	x17, x9, [x0, #CTX_CPACR_EL1]
561
562	mrs	x10, sp_el1
563	mrs	x11, esr_el1
564	stp	x10, x11, [x0, #CTX_SP_EL1]
565
566	mrs	x12, ttbr0_el1
567	mrs	x13, ttbr1_el1
568	stp	x12, x13, [x0, #CTX_TTBR0_EL1]
569
570	mrs	x14, mair_el1
571	mrs	x15, amair_el1
572	stp	x14, x15, [x0, #CTX_MAIR_EL1]
573
574	mrs	x16, actlr_el1
575	mrs	x17, tpidr_el1
576	stp	x16, x17, [x0, #CTX_ACTLR_EL1]
577
578	mrs	x9, tpidr_el0
579	mrs	x10, tpidrro_el0
580	stp	x9, x10, [x0, #CTX_TPIDR_EL0]
581
582	mrs	x13, par_el1
583	mrs	x14, far_el1
584	stp	x13, x14, [x0, #CTX_PAR_EL1]
585
586	mrs	x15, afsr0_el1
587	mrs	x16, afsr1_el1
588	stp	x15, x16, [x0, #CTX_AFSR0_EL1]
589
590	mrs	x17, contextidr_el1
591	mrs	x9, vbar_el1
592	stp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
593
594	/* Save AArch32 system registers if the build has instructed so */
595#if CTX_INCLUDE_AARCH32_REGS
596	mrs	x11, spsr_abt
597	mrs	x12, spsr_und
598	stp	x11, x12, [x0, #CTX_SPSR_ABT]
599
600	mrs	x13, spsr_irq
601	mrs	x14, spsr_fiq
602	stp	x13, x14, [x0, #CTX_SPSR_IRQ]
603
604	mrs	x15, dacr32_el2
605	mrs	x16, ifsr32_el2
606	stp	x15, x16, [x0, #CTX_DACR32_EL2]
607#endif /* CTX_INCLUDE_AARCH32_REGS */
608
609	/* Save NS timer registers if the build has instructed so */
610#if NS_TIMER_SWITCH
611	mrs	x10, cntp_ctl_el0
612	mrs	x11, cntp_cval_el0
613	stp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
614
615	mrs	x12, cntv_ctl_el0
616	mrs	x13, cntv_cval_el0
617	stp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
618
619	mrs	x14, cntkctl_el1
620	str	x14, [x0, #CTX_CNTKCTL_EL1]
621#endif /* NS_TIMER_SWITCH */
622
623	/* Save MTE system registers if the build has instructed so */
624#if CTX_INCLUDE_MTE_REGS
625	mrs	x15, TFSRE0_EL1
626	mrs	x16, TFSR_EL1
627	stp	x15, x16, [x0, #CTX_TFSRE0_EL1]
628
629	mrs	x9, RGSR_EL1
630	mrs	x10, GCR_EL1
631	stp	x9, x10, [x0, #CTX_RGSR_EL1]
632#endif /* CTX_INCLUDE_MTE_REGS */
633
634	ret
635endfunc el1_sysregs_context_save
636
637/* ------------------------------------------------------------------
638 * The following function strictly follows the AArch64 PCS to use
639 * x9-x17 (temporary caller-saved registers) to restore EL1 system
640 * register context.  It assumes that 'x0' is pointing to a
641 * 'el1_sys_regs' structure from where the register context will be
642 * restored
643 * ------------------------------------------------------------------
644 */
645func el1_sysregs_context_restore
646
647	ldp	x9, x10, [x0, #CTX_SPSR_EL1]
648	msr	spsr_el1, x9
649	msr	elr_el1, x10
650
651#if !ERRATA_SPECULATIVE_AT
652	ldp	x15, x16, [x0, #CTX_SCTLR_EL1]
653	msr	sctlr_el1, x15
654	msr	tcr_el1, x16
655#endif /* ERRATA_SPECULATIVE_AT */
656
657	ldp	x17, x9, [x0, #CTX_CPACR_EL1]
658	msr	cpacr_el1, x17
659	msr	csselr_el1, x9
660
661	ldp	x10, x11, [x0, #CTX_SP_EL1]
662	msr	sp_el1, x10
663	msr	esr_el1, x11
664
665	ldp	x12, x13, [x0, #CTX_TTBR0_EL1]
666	msr	ttbr0_el1, x12
667	msr	ttbr1_el1, x13
668
669	ldp	x14, x15, [x0, #CTX_MAIR_EL1]
670	msr	mair_el1, x14
671	msr	amair_el1, x15
672
673	ldp 	x16, x17, [x0, #CTX_ACTLR_EL1]
674	msr	actlr_el1, x16
675	msr	tpidr_el1, x17
676
677	ldp	x9, x10, [x0, #CTX_TPIDR_EL0]
678	msr	tpidr_el0, x9
679	msr	tpidrro_el0, x10
680
681	ldp	x13, x14, [x0, #CTX_PAR_EL1]
682	msr	par_el1, x13
683	msr	far_el1, x14
684
685	ldp	x15, x16, [x0, #CTX_AFSR0_EL1]
686	msr	afsr0_el1, x15
687	msr	afsr1_el1, x16
688
689	ldp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
690	msr	contextidr_el1, x17
691	msr	vbar_el1, x9
692
693	/* Restore AArch32 system registers if the build has instructed so */
694#if CTX_INCLUDE_AARCH32_REGS
695	ldp	x11, x12, [x0, #CTX_SPSR_ABT]
696	msr	spsr_abt, x11
697	msr	spsr_und, x12
698
699	ldp	x13, x14, [x0, #CTX_SPSR_IRQ]
700	msr	spsr_irq, x13
701	msr	spsr_fiq, x14
702
703	ldp	x15, x16, [x0, #CTX_DACR32_EL2]
704	msr	dacr32_el2, x15
705	msr	ifsr32_el2, x16
706#endif /* CTX_INCLUDE_AARCH32_REGS */
707
708	/* Restore NS timer registers if the build has instructed so */
709#if NS_TIMER_SWITCH
710	ldp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
711	msr	cntp_ctl_el0, x10
712	msr	cntp_cval_el0, x11
713
714	ldp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
715	msr	cntv_ctl_el0, x12
716	msr	cntv_cval_el0, x13
717
718	ldr	x14, [x0, #CTX_CNTKCTL_EL1]
719	msr	cntkctl_el1, x14
720#endif /* NS_TIMER_SWITCH */
721
722	/* Restore MTE system registers if the build has instructed so */
723#if CTX_INCLUDE_MTE_REGS
724	ldp	x11, x12, [x0, #CTX_TFSRE0_EL1]
725	msr	TFSRE0_EL1, x11
726	msr	TFSR_EL1, x12
727
728	ldp	x13, x14, [x0, #CTX_RGSR_EL1]
729	msr	RGSR_EL1, x13
730	msr	GCR_EL1, x14
731#endif /* CTX_INCLUDE_MTE_REGS */
732
733	/* No explict ISB required here as ERET covers it */
734	ret
735endfunc el1_sysregs_context_restore
736
737/* ------------------------------------------------------------------
738 * The following function follows the aapcs_64 strictly to use
739 * x9-x17 (temporary caller-saved registers according to AArch64 PCS)
740 * to save floating point register context. It assumes that 'x0' is
741 * pointing to a 'fp_regs' structure where the register context will
742 * be saved.
743 *
744 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
745 * However currently we don't use VFP registers nor set traps in
746 * Trusted Firmware, and assume it's cleared.
747 *
748 * TODO: Revisit when VFP is used in secure world
749 * ------------------------------------------------------------------
750 */
751#if CTX_INCLUDE_FPREGS
752func fpregs_context_save
753	stp	q0, q1, [x0, #CTX_FP_Q0]
754	stp	q2, q3, [x0, #CTX_FP_Q2]
755	stp	q4, q5, [x0, #CTX_FP_Q4]
756	stp	q6, q7, [x0, #CTX_FP_Q6]
757	stp	q8, q9, [x0, #CTX_FP_Q8]
758	stp	q10, q11, [x0, #CTX_FP_Q10]
759	stp	q12, q13, [x0, #CTX_FP_Q12]
760	stp	q14, q15, [x0, #CTX_FP_Q14]
761	stp	q16, q17, [x0, #CTX_FP_Q16]
762	stp	q18, q19, [x0, #CTX_FP_Q18]
763	stp	q20, q21, [x0, #CTX_FP_Q20]
764	stp	q22, q23, [x0, #CTX_FP_Q22]
765	stp	q24, q25, [x0, #CTX_FP_Q24]
766	stp	q26, q27, [x0, #CTX_FP_Q26]
767	stp	q28, q29, [x0, #CTX_FP_Q28]
768	stp	q30, q31, [x0, #CTX_FP_Q30]
769
770	mrs	x9, fpsr
771	str	x9, [x0, #CTX_FP_FPSR]
772
773	mrs	x10, fpcr
774	str	x10, [x0, #CTX_FP_FPCR]
775
776#if CTX_INCLUDE_AARCH32_REGS
777	mrs	x11, fpexc32_el2
778	str	x11, [x0, #CTX_FP_FPEXC32_EL2]
779#endif /* CTX_INCLUDE_AARCH32_REGS */
780	ret
781endfunc fpregs_context_save
782
783/* ------------------------------------------------------------------
784 * The following function follows the aapcs_64 strictly to use x9-x17
785 * (temporary caller-saved registers according to AArch64 PCS) to
786 * restore floating point register context. It assumes that 'x0' is
787 * pointing to a 'fp_regs' structure from where the register context
788 * will be restored.
789 *
790 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
791 * However currently we don't use VFP registers nor set traps in
792 * Trusted Firmware, and assume it's cleared.
793 *
794 * TODO: Revisit when VFP is used in secure world
795 * ------------------------------------------------------------------
796 */
797func fpregs_context_restore
798	ldp	q0, q1, [x0, #CTX_FP_Q0]
799	ldp	q2, q3, [x0, #CTX_FP_Q2]
800	ldp	q4, q5, [x0, #CTX_FP_Q4]
801	ldp	q6, q7, [x0, #CTX_FP_Q6]
802	ldp	q8, q9, [x0, #CTX_FP_Q8]
803	ldp	q10, q11, [x0, #CTX_FP_Q10]
804	ldp	q12, q13, [x0, #CTX_FP_Q12]
805	ldp	q14, q15, [x0, #CTX_FP_Q14]
806	ldp	q16, q17, [x0, #CTX_FP_Q16]
807	ldp	q18, q19, [x0, #CTX_FP_Q18]
808	ldp	q20, q21, [x0, #CTX_FP_Q20]
809	ldp	q22, q23, [x0, #CTX_FP_Q22]
810	ldp	q24, q25, [x0, #CTX_FP_Q24]
811	ldp	q26, q27, [x0, #CTX_FP_Q26]
812	ldp	q28, q29, [x0, #CTX_FP_Q28]
813	ldp	q30, q31, [x0, #CTX_FP_Q30]
814
815	ldr	x9, [x0, #CTX_FP_FPSR]
816	msr	fpsr, x9
817
818	ldr	x10, [x0, #CTX_FP_FPCR]
819	msr	fpcr, x10
820
821#if CTX_INCLUDE_AARCH32_REGS
822	ldr	x11, [x0, #CTX_FP_FPEXC32_EL2]
823	msr	fpexc32_el2, x11
824#endif /* CTX_INCLUDE_AARCH32_REGS */
825
826	/*
827	 * No explict ISB required here as ERET to
828	 * switch to secure EL1 or non-secure world
829	 * covers it
830	 */
831
832	ret
833endfunc fpregs_context_restore
834#endif /* CTX_INCLUDE_FPREGS */
835
836	/*
837	 * Set SCR_EL3.EA bit to enable SErrors at EL3
838	 */
839	.macro enable_serror_at_el3
840	mrs     x8, scr_el3
841	orr     x8, x8, #SCR_EA_BIT
842	msr     scr_el3, x8
843	.endm
844
845	/*
846	 * Set the PSTATE bits not set when the exception was taken as
847	 * described in the AArch64.TakeException() pseudocode function
848	 * in ARM DDI 0487F.c page J1-7635 to a default value.
849	 */
850	.macro set_unset_pstate_bits
851	/*
852	 * If Data Independent Timing (DIT) functionality is implemented,
853	 * always enable DIT in EL3
854	 */
855#if ENABLE_FEAT_DIT
856	mov     x8, #DIT_BIT
857	msr     DIT, x8
858#endif /* ENABLE_FEAT_DIT */
859	.endm /* set_unset_pstate_bits */
860
861/* ------------------------------------------------------------------
862 * The following macro is used to save and restore all the general
863 * purpose and ARMv8.3-PAuth (if enabled) registers.
864 * It also checks if the Secure Cycle Counter (PMCCNTR_EL0)
865 * is disabled in EL3/Secure (ARMv8.5-PMU), wherein PMCCNTR_EL0
866 * needs not to be saved/restored during world switch.
867 *
868 * Ideally we would only save and restore the callee saved registers
869 * when a world switch occurs but that type of implementation is more
870 * complex. So currently we will always save and restore these
871 * registers on entry and exit of EL3.
872 * clobbers: x18
873 * ------------------------------------------------------------------
874 */
875	.macro save_gp_pmcr_pauth_regs
876	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
877	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
878	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
879	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
880	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
881	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
882	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
883	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
884	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
885	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
886	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
887	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
888	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
889	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
890	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
891	mrs	x18, sp_el0
892	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
893
894	/* ----------------------------------------------------------
895	 * Check if earlier initialization of MDCR_EL3.SCCD/MCCD to 1
896	 * has failed.
897	 *
898	 * MDCR_EL3:
899	 * MCCD bit set, Prohibits the Cycle Counter PMCCNTR_EL0 from
900	 * counting at EL3.
901	 * SCCD bit set, Secure Cycle Counter Disable. Prohibits PMCCNTR_EL0
902	 * from counting in Secure state.
903	 * If these bits are not set, meaning that FEAT_PMUv3p5/7 is
904	 * not implemented and PMCR_EL0 should be saved in non-secure
905	 * context.
906	 * ----------------------------------------------------------
907	 */
908	mov_imm	x10, (MDCR_SCCD_BIT | MDCR_MCCD_BIT)
909	mrs	x9, mdcr_el3
910	tst	x9, x10
911	bne	1f
912
913	/* ----------------------------------------------------------
914	 * If control reaches here, it ensures the Secure Cycle
915	 * Counter (PMCCNTR_EL0) is not prohibited from counting at
916	 * EL3 and in secure states.
917	 * Henceforth, PMCR_EL0 to be saved before world switch.
918	 * ----------------------------------------------------------
919	 */
920	mrs	x9, pmcr_el0
921
922	/* Check caller's security state */
923	mrs	x10, scr_el3
924	tst	x10, #SCR_NS_BIT
925	beq	2f
926
927	/* Save PMCR_EL0 if called from Non-secure state */
928	str	x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
929
930	/* Disable cycle counter when event counting is prohibited */
9312:	orr	x9, x9, #PMCR_EL0_DP_BIT
932	msr	pmcr_el0, x9
933	isb
9341:
935#if CTX_INCLUDE_PAUTH_REGS
936	/* ----------------------------------------------------------
937 	 * Save the ARMv8.3-PAuth keys as they are not banked
938 	 * by exception level
939	 * ----------------------------------------------------------
940	 */
941	add	x19, sp, #CTX_PAUTH_REGS_OFFSET
942
943	mrs	x20, APIAKeyLo_EL1	/* x21:x20 = APIAKey */
944	mrs	x21, APIAKeyHi_EL1
945	mrs	x22, APIBKeyLo_EL1	/* x23:x22 = APIBKey */
946	mrs	x23, APIBKeyHi_EL1
947	mrs	x24, APDAKeyLo_EL1	/* x25:x24 = APDAKey */
948	mrs	x25, APDAKeyHi_EL1
949	mrs	x26, APDBKeyLo_EL1	/* x27:x26 = APDBKey */
950	mrs	x27, APDBKeyHi_EL1
951	mrs	x28, APGAKeyLo_EL1	/* x29:x28 = APGAKey */
952	mrs	x29, APGAKeyHi_EL1
953
954	stp	x20, x21, [x19, #CTX_PACIAKEY_LO]
955	stp	x22, x23, [x19, #CTX_PACIBKEY_LO]
956	stp	x24, x25, [x19, #CTX_PACDAKEY_LO]
957	stp	x26, x27, [x19, #CTX_PACDBKEY_LO]
958	stp	x28, x29, [x19, #CTX_PACGAKEY_LO]
959#endif /* CTX_INCLUDE_PAUTH_REGS */
960	.endm /* save_gp_pmcr_pauth_regs */
961
962/* -----------------------------------------------------------------
963 * This function saves the context and sets the PSTATE to a known
964 * state, preparing entry to el3.
965 * Save all the general purpose and ARMv8.3-PAuth (if enabled)
966 * registers.
967 * Then set any of the PSTATE bits that are not set by hardware
968 * according to the Aarch64.TakeException pseudocode in the Arm
969 * Architecture Reference Manual to a default value for EL3.
970 * clobbers: x17
971 * -----------------------------------------------------------------
972 */
973func prepare_el3_entry
974	save_gp_pmcr_pauth_regs
975	enable_serror_at_el3
976	/*
977	 * Set the PSTATE bits not described in the Aarch64.TakeException
978	 * pseudocode to their default values.
979	 */
980	set_unset_pstate_bits
981	ret
982endfunc prepare_el3_entry
983
984/* ------------------------------------------------------------------
985 * This function restores ARMv8.3-PAuth (if enabled) and all general
986 * purpose registers except x30 from the CPU context.
987 * x30 register must be explicitly restored by the caller.
988 * ------------------------------------------------------------------
989 */
990func restore_gp_pmcr_pauth_regs
991#if CTX_INCLUDE_PAUTH_REGS
992 	/* Restore the ARMv8.3 PAuth keys */
993	add	x10, sp, #CTX_PAUTH_REGS_OFFSET
994
995	ldp	x0, x1, [x10, #CTX_PACIAKEY_LO]	/* x1:x0 = APIAKey */
996	ldp	x2, x3, [x10, #CTX_PACIBKEY_LO]	/* x3:x2 = APIBKey */
997	ldp	x4, x5, [x10, #CTX_PACDAKEY_LO]	/* x5:x4 = APDAKey */
998	ldp	x6, x7, [x10, #CTX_PACDBKEY_LO]	/* x7:x6 = APDBKey */
999	ldp	x8, x9, [x10, #CTX_PACGAKEY_LO]	/* x9:x8 = APGAKey */
1000
1001	msr	APIAKeyLo_EL1, x0
1002	msr	APIAKeyHi_EL1, x1
1003	msr	APIBKeyLo_EL1, x2
1004	msr	APIBKeyHi_EL1, x3
1005	msr	APDAKeyLo_EL1, x4
1006	msr	APDAKeyHi_EL1, x5
1007	msr	APDBKeyLo_EL1, x6
1008	msr	APDBKeyHi_EL1, x7
1009	msr	APGAKeyLo_EL1, x8
1010	msr	APGAKeyHi_EL1, x9
1011#endif /* CTX_INCLUDE_PAUTH_REGS */
1012
1013	/* ----------------------------------------------------------
1014	 * Restore PMCR_EL0 when returning to Non-secure state if
1015	 * Secure Cycle Counter is not disabled in MDCR_EL3 when
1016	 * ARMv8.5-PMU is implemented.
1017	 * ----------------------------------------------------------
1018	 */
1019	mrs	x0, scr_el3
1020	tst	x0, #SCR_NS_BIT
1021	beq	2f
1022
1023	/* ----------------------------------------------------------
1024	 * Back to Non-secure state.
1025	 * Check if earlier initialization MDCR_EL3.SCCD/MCCD to 1
1026	 * failed, meaning that FEAT_PMUv3p5/7 is not implemented and
1027	 * PMCR_EL0 should be restored from non-secure context.
1028	 * ----------------------------------------------------------
1029	 */
1030	mov_imm	x1, (MDCR_SCCD_BIT | MDCR_MCCD_BIT)
1031	mrs	x0, mdcr_el3
1032	tst	x0, x1
1033	bne	2f
1034	ldr	x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
1035	msr	pmcr_el0, x0
10362:
1037	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
1038	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
1039	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
1040	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
1041	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
1042	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
1043	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
1044	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
1045	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
1046	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
1047	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
1048	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
1049	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
1050	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
1051	ldr	x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
1052	msr	sp_el0, x28
1053	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
1054	ret
1055endfunc restore_gp_pmcr_pauth_regs
1056
1057/*
1058 * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1
1059 * registers and update EL1 registers to disable stage1 and stage2
1060 * page table walk
1061 */
1062func save_and_update_ptw_el1_sys_regs
1063	/* ----------------------------------------------------------
1064	 * Save only sctlr_el1 and tcr_el1 registers
1065	 * ----------------------------------------------------------
1066	 */
1067	mrs	x29, sctlr_el1
1068	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1)]
1069	mrs	x29, tcr_el1
1070	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_TCR_EL1)]
1071
1072	/* ------------------------------------------------------------
1073	 * Must follow below order in order to disable page table
1074	 * walk for lower ELs (EL1 and EL0). First step ensures that
1075	 * page table walk is disabled for stage1 and second step
1076	 * ensures that page table walker should use TCR_EL1.EPDx
1077	 * bits to perform address translation. ISB ensures that CPU
1078	 * does these 2 steps in order.
1079	 *
1080	 * 1. Update TCR_EL1.EPDx bits to disable page table walk by
1081	 *    stage1.
1082	 * 2. Enable MMU bit to avoid identity mapping via stage2
1083	 *    and force TCR_EL1.EPDx to be used by the page table
1084	 *    walker.
1085	 * ------------------------------------------------------------
1086	 */
1087	orr	x29, x29, #(TCR_EPD0_BIT)
1088	orr	x29, x29, #(TCR_EPD1_BIT)
1089	msr	tcr_el1, x29
1090	isb
1091	mrs	x29, sctlr_el1
1092	orr	x29, x29, #SCTLR_M_BIT
1093	msr	sctlr_el1, x29
1094	isb
1095
1096	ret
1097endfunc save_and_update_ptw_el1_sys_regs
1098
1099/* ------------------------------------------------------------------
1100 * This routine assumes that the SP_EL3 is pointing to a valid
1101 * context structure from where the gp regs and other special
1102 * registers can be retrieved.
1103 * ------------------------------------------------------------------
1104 */
1105func el3_exit
1106#if ENABLE_ASSERTIONS
1107	/* el3_exit assumes SP_EL0 on entry */
1108	mrs	x17, spsel
1109	cmp	x17, #MODE_SP_EL0
1110	ASM_ASSERT(eq)
1111#endif /* ENABLE_ASSERTIONS */
1112
1113	/* ----------------------------------------------------------
1114	 * Save the current SP_EL0 i.e. the EL3 runtime stack which
1115	 * will be used for handling the next SMC.
1116	 * Then switch to SP_EL3.
1117	 * ----------------------------------------------------------
1118	 */
1119	mov	x17, sp
1120	msr	spsel, #MODE_SP_ELX
1121	str	x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
1122
1123#if IMAGE_BL31
1124	/* ----------------------------------------------------------
1125	 * Restore CPTR_EL3.
1126	 * ZCR is only restored if SVE is supported and enabled.
1127	 * Synchronization is required before zcr_el3 is addressed.
1128	 * ----------------------------------------------------------
1129	 */
1130	ldp	x19, x20, [sp, #CTX_EL3STATE_OFFSET + CTX_CPTR_EL3]
1131	msr	cptr_el3, x19
1132
1133	ands	x19, x19, #CPTR_EZ_BIT
1134	beq	sve_not_enabled
1135
1136	isb
1137	msr	S3_6_C1_C2_0, x20 /* zcr_el3 */
1138sve_not_enabled:
1139#endif /* IMAGE_BL31 */
1140
1141#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
1142	/* ----------------------------------------------------------
1143	 * Restore mitigation state as it was on entry to EL3
1144	 * ----------------------------------------------------------
1145	 */
1146	ldr	x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
1147	cbz	x17, 1f
1148	blr	x17
11491:
1150#endif /* IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 */
1151
1152#if IMAGE_BL31 && RAS_EXTENSION
1153	/* ----------------------------------------------------------
1154	 * Issue Error Synchronization Barrier to synchronize SErrors
1155	 * before exiting EL3. We're running with EAs unmasked, so
1156	 * any synchronized errors would be taken immediately;
1157	 * therefore no need to inspect DISR_EL1 register.
1158 	 * ----------------------------------------------------------
1159	 */
1160	esb
1161#else
1162	dsb	sy
1163#endif /* IMAGE_BL31 && RAS_EXTENSION */
1164
1165	/* ----------------------------------------------------------
1166	 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
1167	 * ----------------------------------------------------------
1168	 */
1169	ldr	x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
1170	ldp	x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
1171	msr	scr_el3, x18
1172	msr	spsr_el3, x16
1173	msr	elr_el3, x17
1174
1175	restore_ptw_el1_sys_regs
1176
1177	/* ----------------------------------------------------------
1178	 * Restore general purpose (including x30), PMCR_EL0 and
1179	 * ARMv8.3-PAuth registers.
1180	 * Exit EL3 via ERET to a lower exception level.
1181 	 * ----------------------------------------------------------
1182 	 */
1183	bl	restore_gp_pmcr_pauth_regs
1184	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
1185
1186#ifdef IMAGE_BL31
1187	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
1188#endif /* IMAGE_BL31 */
1189
1190	exception_return
1191
1192endfunc el3_exit
1193