xref: /rk3399_ARM-atf/lib/el3_runtime/aarch64/context.S (revision f90fe02f061b8a203391e566682221396b656c6f)
1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <context.h>
11#include <el3_common_macros.S>
12
13#if CTX_INCLUDE_EL2_REGS
14	.global	el2_sysregs_context_save_common
15	.global	el2_sysregs_context_restore_common
16#if ENABLE_SPE_FOR_LOWER_ELS
17	.global	el2_sysregs_context_save_spe
18	.global	el2_sysregs_context_restore_spe
19#endif /* ENABLE_SPE_FOR_LOWER_ELS */
20#if CTX_INCLUDE_MTE_REGS
21	.global	el2_sysregs_context_save_mte
22	.global	el2_sysregs_context_restore_mte
23#endif /* CTX_INCLUDE_MTE_REGS */
24#if ENABLE_MPAM_FOR_LOWER_ELS
25	.global	el2_sysregs_context_save_mpam
26	.global	el2_sysregs_context_restore_mpam
27#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
28#if ENABLE_FEAT_ECV
29	.global	el2_sysregs_context_save_ecv
30	.global	el2_sysregs_context_restore_ecv
31#endif /* ENABLE_FEAT_ECV */
32#if ENABLE_FEAT_VHE
33	.global	el2_sysregs_context_save_vhe
34	.global	el2_sysregs_context_restore_vhe
35#endif /* ENABLE_FEAT_VHE */
36#if RAS_EXTENSION
37	.global	el2_sysregs_context_save_ras
38	.global	el2_sysregs_context_restore_ras
39#endif /* RAS_EXTENSION */
40#if CTX_INCLUDE_NEVE_REGS
41	.global	el2_sysregs_context_save_nv2
42	.global	el2_sysregs_context_restore_nv2
43#endif /* CTX_INCLUDE_NEVE_REGS */
44#if ENABLE_TRF_FOR_NS
45	.global	el2_sysregs_context_save_trf
46	.global	el2_sysregs_context_restore_trf
47#endif /* ENABLE_TRF_FOR_NS */
48#if ENABLE_FEAT_CSV2_2
49	.global	el2_sysregs_context_save_csv2
50	.global	el2_sysregs_context_restore_csv2
51#endif /* ENABLE_FEAT_CSV2_2 */
52#endif /* CTX_INCLUDE_EL2_REGS */
53
54	.global	el1_sysregs_context_save
55	.global	el1_sysregs_context_restore
56#if CTX_INCLUDE_FPREGS
57	.global	fpregs_context_save
58	.global	fpregs_context_restore
59#endif /* CTX_INCLUDE_FPREGS */
60	.global	prepare_el3_entry
61	.global	restore_gp_pmcr_pauth_regs
62	.global save_and_update_ptw_el1_sys_regs
63	.global	el3_exit
64
65#if CTX_INCLUDE_EL2_REGS
66
67/* -----------------------------------------------------
68 * The following functions strictly follow the AArch64
69 * PCS to use x9-x16 (temporary caller-saved registers)
70 * to save/restore EL2 system register context.
71 * el2_sysregs_context_save/restore_common functions
72 * save and restore registers that are common to all
73 * configurations. The rest of the functions save and
74 * restore EL2 system registers that are present when a
75 * particular feature is enabled. All functions assume
76 * that 'x0' is pointing to a 'el2_sys_regs' structure
77 * where the register context will be saved/restored.
78 *
79 * The following registers are not added.
80 * AMEVCNTVOFF0<n>_EL2
81 * AMEVCNTVOFF1<n>_EL2
82 * ICH_AP0R<n>_EL2
83 * ICH_AP1R<n>_EL2
84 * ICH_LR<n>_EL2
85 * -----------------------------------------------------
86 */
87func el2_sysregs_context_save_common
88	mrs	x9, actlr_el2
89	mrs	x10, afsr0_el2
90	stp	x9, x10, [x0, #CTX_ACTLR_EL2]
91
92	mrs	x11, afsr1_el2
93	mrs	x12, amair_el2
94	stp	x11, x12, [x0, #CTX_AFSR1_EL2]
95
96	mrs	x13, cnthctl_el2
97	mrs	x14, cntvoff_el2
98	stp	x13, x14, [x0, #CTX_CNTHCTL_EL2]
99
100	mrs	x15, cptr_el2
101	str	x15, [x0, #CTX_CPTR_EL2]
102
103#if CTX_INCLUDE_AARCH32_REGS
104	mrs	x16, dbgvcr32_el2
105	str	x16, [x0, #CTX_DBGVCR32_EL2]
106#endif /* CTX_INCLUDE_AARCH32_REGS */
107
108	mrs	x9, elr_el2
109	mrs	x10, esr_el2
110	stp	x9, x10, [x0, #CTX_ELR_EL2]
111
112	mrs	x11, far_el2
113	mrs	x12, hacr_el2
114	stp	x11, x12, [x0, #CTX_FAR_EL2]
115
116	mrs	x13, hcr_el2
117	mrs	x14, hpfar_el2
118	stp	x13, x14, [x0, #CTX_HCR_EL2]
119
120	mrs	x15, hstr_el2
121	mrs	x16, ICC_SRE_EL2
122	stp	x15, x16, [x0, #CTX_HSTR_EL2]
123
124	mrs	x9, ICH_HCR_EL2
125	mrs	x10, ICH_VMCR_EL2
126	stp	x9, x10, [x0, #CTX_ICH_HCR_EL2]
127
128	mrs	x11, mair_el2
129	mrs	x12, mdcr_el2
130	stp	x11, x12, [x0, #CTX_MAIR_EL2]
131
132	mrs	x14, sctlr_el2
133	str	x14, [x0, #CTX_SCTLR_EL2]
134
135	mrs	x15, spsr_el2
136	mrs	x16, sp_el2
137	stp	x15, x16, [x0, #CTX_SPSR_EL2]
138
139	mrs	x9, tcr_el2
140	mrs	x10, tpidr_el2
141	stp	x9, x10, [x0, #CTX_TCR_EL2]
142
143	mrs	x11, ttbr0_el2
144	mrs	x12, vbar_el2
145	stp	x11, x12, [x0, #CTX_TTBR0_EL2]
146
147	mrs	x13, vmpidr_el2
148	mrs	x14, vpidr_el2
149	stp	x13, x14, [x0, #CTX_VMPIDR_EL2]
150
151	mrs	x15, vtcr_el2
152	mrs	x16, vttbr_el2
153	stp	x15, x16, [x0, #CTX_VTCR_EL2]
154	ret
155endfunc el2_sysregs_context_save_common
156
157func el2_sysregs_context_restore_common
158	ldp	x9, x10, [x0, #CTX_ACTLR_EL2]
159	msr	actlr_el2, x9
160	msr	afsr0_el2, x10
161
162	ldp	x11, x12, [x0, #CTX_AFSR1_EL2]
163	msr	afsr1_el2, x11
164	msr	amair_el2, x12
165
166	ldp	x13, x14, [x0, #CTX_CNTHCTL_EL2]
167	msr	cnthctl_el2, x13
168	msr	cntvoff_el2, x14
169
170	ldr	x15, [x0, #CTX_CPTR_EL2]
171	msr	cptr_el2, x15
172
173#if CTX_INCLUDE_AARCH32_REGS
174	ldr	x16, [x0, #CTX_DBGVCR32_EL2]
175	msr	dbgvcr32_el2, x16
176#endif /* CTX_INCLUDE_AARCH32_REGS */
177
178	ldp	x9, x10, [x0, #CTX_ELR_EL2]
179	msr	elr_el2, x9
180	msr	esr_el2, x10
181
182	ldp	x11, x12, [x0, #CTX_FAR_EL2]
183	msr	far_el2, x11
184	msr	hacr_el2, x12
185
186	ldp	x13, x14, [x0, #CTX_HCR_EL2]
187	msr	hcr_el2, x13
188	msr	hpfar_el2, x14
189
190	ldp	x15, x16, [x0, #CTX_HSTR_EL2]
191	msr	hstr_el2, x15
192	msr	ICC_SRE_EL2, x16
193
194	ldp	x9, x10, [x0, #CTX_ICH_HCR_EL2]
195	msr	ICH_HCR_EL2, x9
196	msr	ICH_VMCR_EL2, x10
197
198	ldp	x11, x12, [x0, #CTX_MAIR_EL2]
199	msr	mair_el2, x11
200	msr	mdcr_el2, x12
201
202	ldr	x14, [x0, #CTX_SCTLR_EL2]
203	msr	sctlr_el2, x14
204
205	ldp	x15, x16, [x0, #CTX_SPSR_EL2]
206	msr	spsr_el2, x15
207	msr	sp_el2, x16
208
209	ldp	x9, x10, [x0, #CTX_TCR_EL2]
210	msr	tcr_el2, x9
211	msr	tpidr_el2, x10
212
213	ldp	x11, x12, [x0, #CTX_TTBR0_EL2]
214	msr	ttbr0_el2, x11
215	msr	vbar_el2, x12
216
217	ldp	x13, x14, [x0, #CTX_VMPIDR_EL2]
218	msr	vmpidr_el2, x13
219	msr	vpidr_el2, x14
220
221	ldp	x15, x16, [x0, #CTX_VTCR_EL2]
222	msr	vtcr_el2, x15
223	msr	vttbr_el2, x16
224	ret
225endfunc el2_sysregs_context_restore_common
226
227#if ENABLE_SPE_FOR_LOWER_ELS
228func el2_sysregs_context_save_spe
229	mrs	x13, PMSCR_EL2
230	str	x13, [x0, #CTX_PMSCR_EL2]
231	ret
232endfunc el2_sysregs_context_save_spe
233
234func el2_sysregs_context_restore_spe
235	ldr	x13, [x0, #CTX_PMSCR_EL2]
236	msr	PMSCR_EL2, x13
237	ret
238endfunc el2_sysregs_context_restore_spe
239#endif /* ENABLE_SPE_FOR_LOWER_ELS */
240
241#if CTX_INCLUDE_MTE_REGS
242func el2_sysregs_context_save_mte
243	mrs	x9, TFSR_EL2
244	str	x9, [x0, #CTX_TFSR_EL2]
245	ret
246endfunc el2_sysregs_context_save_mte
247
248func el2_sysregs_context_restore_mte
249	ldr	x9, [x0, #CTX_TFSR_EL2]
250	msr	TFSR_EL2, x9
251	ret
252endfunc el2_sysregs_context_restore_mte
253#endif /* CTX_INCLUDE_MTE_REGS */
254
255#if ENABLE_MPAM_FOR_LOWER_ELS
256func el2_sysregs_context_save_mpam
257	mrs	x10, MPAM2_EL2
258	str	x10, [x0, #CTX_MPAM2_EL2]
259
260	mrs	x10, MPAMIDR_EL1
261
262	/*
263	 * The context registers that we intend to save would be part of the
264	 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
265	 */
266	tbz	w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
267
268	/*
269	 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
270	 * system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to save
271	 * the context of these registers.
272	 */
273	mrs	x11, MPAMHCR_EL2
274	mrs	x12, MPAMVPM0_EL2
275	stp	x11, x12, [x0, #CTX_MPAMHCR_EL2]
276
277	mrs	x13, MPAMVPMV_EL2
278	str	x13, [x0, #CTX_MPAMVPMV_EL2]
279
280	/*
281	 * MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
282	 * VPMR value. Proceed to save the context of registers from
283	 * MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. From MPAM spec,
284	 * VPMR_MAX should not be zero if HAS_HCR == 1.
285	 */
286	ubfx	x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT, \
287		#MPAMIDR_EL1_VPMR_MAX_WIDTH
288
289	/*
290	 * Once VPMR_MAX has been identified, calculate the offset relative to
291	 * PC to jump to so that relevant context can be saved. The offset is
292	 * calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
293	 * saving one VPM register) + (absolute address of label "1").
294	 */
295	mov	w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
296	sub	w10, w11, w10
297
298	/* Calculate the size of one block of MPAMVPM*_EL2 save */
299	adr	x11, 1f
300	adr	x12, 2f
301	sub	x12, x12, x11
302
303	madd	x10, x10, x12, x11
304	br	x10
305
306	/*
307	 * The branch above would land properly on one of the blocks following
308	 * label "1". Make sure that the order of save is retained.
309	 */
3101:
311#if ENABLE_BTI
312	bti	j
313#endif
314	mrs	x10, MPAMVPM7_EL2
315	str	x10, [x0, #CTX_MPAMVPM7_EL2]
3162:
317#if ENABLE_BTI
318	bti	j
319#endif
320	mrs	x11, MPAMVPM6_EL2
321	str	x11, [x0, #CTX_MPAMVPM6_EL2]
322
323#if ENABLE_BTI
324	bti	j
325#endif
326	mrs	x12, MPAMVPM5_EL2
327	str	x12, [x0, #CTX_MPAMVPM5_EL2]
328
329#if ENABLE_BTI
330	bti	j
331#endif
332	mrs	x13, MPAMVPM4_EL2
333	str	x13, [x0, #CTX_MPAMVPM4_EL2]
334
335#if ENABLE_BTI
336	bti	j
337#endif
338	mrs	x14, MPAMVPM3_EL2
339	str	x14, [x0, #CTX_MPAMVPM3_EL2]
340
341#if ENABLE_BTI
342	bti	j
343#endif
344	mrs	x15, MPAMVPM2_EL2
345	str	x15, [x0, #CTX_MPAMVPM2_EL2]
346
347#if ENABLE_BTI
348	bti	j
349#endif
350	mrs	x16, MPAMVPM1_EL2
351	str	x16, [x0, #CTX_MPAMVPM1_EL2]
352
3533:	ret
354endfunc el2_sysregs_context_save_mpam
355
356func el2_sysregs_context_restore_mpam
357	ldr	x10, [x0, #CTX_MPAM2_EL2]
358	msr	MPAM2_EL2, x10
359
360	mrs	x10, MPAMIDR_EL1
361	/*
362	 * The context registers that we intend to restore would be part of the
363	 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
364	 */
365	tbz	w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
366
367	/*
368	 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
369	 * system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to restore
370	 * the context of these registers
371	 */
372	ldp	x11, x12, [x0, #CTX_MPAMHCR_EL2]
373	msr	MPAMHCR_EL2, x11
374	msr	MPAMVPM0_EL2, x12
375
376	ldr	x13, [x0, #CTX_MPAMVPMV_EL2]
377	msr	MPAMVPMV_EL2, x13
378
379	/*
380	 * MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
381	 * VPMR value. Proceed to restore the context of registers from
382	 * MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. from MPAM spec,
383	 * VPMR_MAX should not be zero if HAS_HCR == 1.
384	 */
385	ubfx	x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT,	\
386		#MPAMIDR_EL1_VPMR_MAX_WIDTH
387
388	/*
389	 * Once VPMR_MAX has been identified, calculate the offset relative to
390	 * PC to jump to so that relevant context can be restored. The offset is
391	 * calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
392	 * restoring one VPM register) + (absolute address of label "1").
393	 */
394	mov	w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
395	sub	w10, w11, w10
396
397	/* Calculate the size of one block of MPAMVPM*_EL2 restore */
398	adr	x11, 1f
399	adr	x12, 2f
400	sub	x12, x12, x11
401
402	madd	x10, x10, x12, x11
403	br	x10
404
405	/*
406	 * The branch above would land properly on one of the blocks following
407	 * label "1". Make sure that the order of restore is retained.
408	 */
4091:
410
411#if ENABLE_BTI
412	bti	j
413#endif
414	ldr	x10, [x0, #CTX_MPAMVPM7_EL2]
415	msr	MPAMVPM7_EL2, x10
4162:
417#if ENABLE_BTI
418	bti	j
419#endif
420	ldr	x11, [x0, #CTX_MPAMVPM6_EL2]
421	msr	MPAMVPM6_EL2, x11
422
423#if ENABLE_BTI
424	bti	j
425#endif
426	ldr	x12, [x0, #CTX_MPAMVPM5_EL2]
427	msr	MPAMVPM5_EL2, x12
428
429#if ENABLE_BTI
430	bti	j
431#endif
432	ldr	x13, [x0, #CTX_MPAMVPM4_EL2]
433	msr	MPAMVPM4_EL2, x13
434
435#if ENABLE_BTI
436	bti	j
437#endif
438	ldr	x14, [x0, #CTX_MPAMVPM3_EL2]
439	msr	MPAMVPM3_EL2, x14
440
441#if ENABLE_BTI
442	bti	j
443#endif
444	ldr	x15, [x0, #CTX_MPAMVPM2_EL2]
445	msr	MPAMVPM2_EL2, x15
446
447#if ENABLE_BTI
448	bti	j
449#endif
450	ldr	x16, [x0, #CTX_MPAMVPM1_EL2]
451	msr	MPAMVPM1_EL2, x16
452
4533:	ret
454endfunc el2_sysregs_context_restore_mpam
455#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
456
457#if ENABLE_FEAT_ECV
458func el2_sysregs_context_save_ecv
459	mrs	x11, CNTPOFF_EL2
460	str	x11, [x0, #CTX_CNTPOFF_EL2]
461	ret
462endfunc el2_sysregs_context_save_ecv
463
464func el2_sysregs_context_restore_ecv
465	ldr	x11, [x0, #CTX_CNTPOFF_EL2]
466	msr	CNTPOFF_EL2, x11
467	ret
468endfunc el2_sysregs_context_restore_ecv
469#endif /* ENABLE_FEAT_ECV */
470
471#if ENABLE_FEAT_VHE
472func el2_sysregs_context_save_vhe
473	/*
474	 * CONTEXTIDR_EL2 register is saved only when FEAT_VHE or
475	 * FEAT_Debugv8p2 (currently not in TF-A) is supported.
476	 */
477	mrs	x9, contextidr_el2
478	mrs	x10, ttbr1_el2
479	stp	x9, x10, [x0, #CTX_CONTEXTIDR_EL2]
480	ret
481endfunc el2_sysregs_context_save_vhe
482
483func el2_sysregs_context_restore_vhe
484	/*
485	 * CONTEXTIDR_EL2 register is restored only when FEAT_VHE or
486	 * FEAT_Debugv8p2 (currently not in TF-A) is supported.
487	 */
488	ldp	x9, x10, [x0, #CTX_CONTEXTIDR_EL2]
489	msr	contextidr_el2, x9
490	msr	ttbr1_el2, x10
491	ret
492endfunc el2_sysregs_context_restore_vhe
493#endif /* ENABLE_FEAT_VHE */
494
495#if RAS_EXTENSION
496func el2_sysregs_context_save_ras
497	/*
498	 * VDISR_EL2 and VSESR_EL2 registers are saved only when
499	 * FEAT_RAS is supported.
500	 */
501	mrs	x11, vdisr_el2
502	mrs	x12, vsesr_el2
503	stp	x11, x12, [x0, #CTX_VDISR_EL2]
504	ret
505endfunc el2_sysregs_context_save_ras
506
507func el2_sysregs_context_restore_ras
508	/*
509	 * VDISR_EL2 and VSESR_EL2 registers are restored only when FEAT_RAS
510	 * is supported.
511	 */
512	ldp	x11, x12, [x0, #CTX_VDISR_EL2]
513	msr	vdisr_el2, x11
514	msr	vsesr_el2, x12
515	ret
516endfunc el2_sysregs_context_restore_ras
517#endif /* RAS_EXTENSION */
518
519#if CTX_INCLUDE_NEVE_REGS
520func el2_sysregs_context_save_nv2
521	/*
522	 * VNCR_EL2 register is saved only when FEAT_NV2 is supported.
523	 */
524	mrs	x16, vncr_el2
525	str	x16, [x0, #CTX_VNCR_EL2]
526	ret
527endfunc el2_sysregs_context_save_nv2
528
529func el2_sysregs_context_restore_nv2
530	/*
531	 * VNCR_EL2 register is restored only when FEAT_NV2 is supported.
532	 */
533	ldr	x16, [x0, #CTX_VNCR_EL2]
534	msr	vncr_el2, x16
535	ret
536endfunc el2_sysregs_context_restore_nv2
537#endif /* CTX_INCLUDE_NEVE_REGS */
538
539#if ENABLE_TRF_FOR_NS
540func el2_sysregs_context_save_trf
541	/*
542	 * TRFCR_EL2 register is saved only when FEAT_TRF is supported.
543	 */
544	mrs	x12, TRFCR_EL2
545	str	x12, [x0, #CTX_TRFCR_EL2]
546	ret
547endfunc el2_sysregs_context_save_trf
548
549func el2_sysregs_context_restore_trf
550	/*
551	 * TRFCR_EL2 register is restored only when FEAT_TRF is supported.
552	 */
553	ldr	x12, [x0, #CTX_TRFCR_EL2]
554	msr	TRFCR_EL2, x12
555	ret
556endfunc el2_sysregs_context_restore_trf
557#endif /* ENABLE_TRF_FOR_NS */
558
559#if ENABLE_FEAT_CSV2_2
560func el2_sysregs_context_save_csv2
561	/*
562	 * SCXTNUM_EL2 register is saved only when FEAT_CSV2_2 is supported.
563	 */
564	mrs	x13, scxtnum_el2
565	str	x13, [x0, #CTX_SCXTNUM_EL2]
566	ret
567endfunc el2_sysregs_context_save_csv2
568
569func el2_sysregs_context_restore_csv2
570	/*
571	 * SCXTNUM_EL2 register is restored only when FEAT_CSV2_2 is supported.
572	 */
573	ldr	x13, [x0, #CTX_SCXTNUM_EL2]
574	msr	scxtnum_el2, x13
575	ret
576endfunc el2_sysregs_context_restore_csv2
577#endif /* ENABLE_FEAT_CSV2_2 */
578
579#endif /* CTX_INCLUDE_EL2_REGS */
580
581/* ------------------------------------------------------------------
582 * The following function strictly follows the AArch64 PCS to use
583 * x9-x17 (temporary caller-saved registers) to save EL1 system
584 * register context. It assumes that 'x0' is pointing to a
585 * 'el1_sys_regs' structure where the register context will be saved.
586 * ------------------------------------------------------------------
587 */
588func el1_sysregs_context_save
589
590	mrs	x9, spsr_el1
591	mrs	x10, elr_el1
592	stp	x9, x10, [x0, #CTX_SPSR_EL1]
593
594#if !ERRATA_SPECULATIVE_AT
595	mrs	x15, sctlr_el1
596	mrs	x16, tcr_el1
597	stp	x15, x16, [x0, #CTX_SCTLR_EL1]
598#endif /* ERRATA_SPECULATIVE_AT */
599
600	mrs	x17, cpacr_el1
601	mrs	x9, csselr_el1
602	stp	x17, x9, [x0, #CTX_CPACR_EL1]
603
604	mrs	x10, sp_el1
605	mrs	x11, esr_el1
606	stp	x10, x11, [x0, #CTX_SP_EL1]
607
608	mrs	x12, ttbr0_el1
609	mrs	x13, ttbr1_el1
610	stp	x12, x13, [x0, #CTX_TTBR0_EL1]
611
612	mrs	x14, mair_el1
613	mrs	x15, amair_el1
614	stp	x14, x15, [x0, #CTX_MAIR_EL1]
615
616	mrs	x16, actlr_el1
617	mrs	x17, tpidr_el1
618	stp	x16, x17, [x0, #CTX_ACTLR_EL1]
619
620	mrs	x9, tpidr_el0
621	mrs	x10, tpidrro_el0
622	stp	x9, x10, [x0, #CTX_TPIDR_EL0]
623
624	mrs	x13, par_el1
625	mrs	x14, far_el1
626	stp	x13, x14, [x0, #CTX_PAR_EL1]
627
628	mrs	x15, afsr0_el1
629	mrs	x16, afsr1_el1
630	stp	x15, x16, [x0, #CTX_AFSR0_EL1]
631
632	mrs	x17, contextidr_el1
633	mrs	x9, vbar_el1
634	stp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
635
636	/* Save AArch32 system registers if the build has instructed so */
637#if CTX_INCLUDE_AARCH32_REGS
638	mrs	x11, spsr_abt
639	mrs	x12, spsr_und
640	stp	x11, x12, [x0, #CTX_SPSR_ABT]
641
642	mrs	x13, spsr_irq
643	mrs	x14, spsr_fiq
644	stp	x13, x14, [x0, #CTX_SPSR_IRQ]
645
646	mrs	x15, dacr32_el2
647	mrs	x16, ifsr32_el2
648	stp	x15, x16, [x0, #CTX_DACR32_EL2]
649#endif /* CTX_INCLUDE_AARCH32_REGS */
650
651	/* Save NS timer registers if the build has instructed so */
652#if NS_TIMER_SWITCH
653	mrs	x10, cntp_ctl_el0
654	mrs	x11, cntp_cval_el0
655	stp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
656
657	mrs	x12, cntv_ctl_el0
658	mrs	x13, cntv_cval_el0
659	stp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
660
661	mrs	x14, cntkctl_el1
662	str	x14, [x0, #CTX_CNTKCTL_EL1]
663#endif /* NS_TIMER_SWITCH */
664
665	/* Save MTE system registers if the build has instructed so */
666#if CTX_INCLUDE_MTE_REGS
667	mrs	x15, TFSRE0_EL1
668	mrs	x16, TFSR_EL1
669	stp	x15, x16, [x0, #CTX_TFSRE0_EL1]
670
671	mrs	x9, RGSR_EL1
672	mrs	x10, GCR_EL1
673	stp	x9, x10, [x0, #CTX_RGSR_EL1]
674#endif /* CTX_INCLUDE_MTE_REGS */
675
676	ret
677endfunc el1_sysregs_context_save
678
679/* ------------------------------------------------------------------
680 * The following function strictly follows the AArch64 PCS to use
681 * x9-x17 (temporary caller-saved registers) to restore EL1 system
682 * register context.  It assumes that 'x0' is pointing to a
683 * 'el1_sys_regs' structure from where the register context will be
684 * restored
685 * ------------------------------------------------------------------
686 */
687func el1_sysregs_context_restore
688
689	ldp	x9, x10, [x0, #CTX_SPSR_EL1]
690	msr	spsr_el1, x9
691	msr	elr_el1, x10
692
693#if !ERRATA_SPECULATIVE_AT
694	ldp	x15, x16, [x0, #CTX_SCTLR_EL1]
695	msr	sctlr_el1, x15
696	msr	tcr_el1, x16
697#endif /* ERRATA_SPECULATIVE_AT */
698
699	ldp	x17, x9, [x0, #CTX_CPACR_EL1]
700	msr	cpacr_el1, x17
701	msr	csselr_el1, x9
702
703	ldp	x10, x11, [x0, #CTX_SP_EL1]
704	msr	sp_el1, x10
705	msr	esr_el1, x11
706
707	ldp	x12, x13, [x0, #CTX_TTBR0_EL1]
708	msr	ttbr0_el1, x12
709	msr	ttbr1_el1, x13
710
711	ldp	x14, x15, [x0, #CTX_MAIR_EL1]
712	msr	mair_el1, x14
713	msr	amair_el1, x15
714
715	ldp 	x16, x17, [x0, #CTX_ACTLR_EL1]
716	msr	actlr_el1, x16
717	msr	tpidr_el1, x17
718
719	ldp	x9, x10, [x0, #CTX_TPIDR_EL0]
720	msr	tpidr_el0, x9
721	msr	tpidrro_el0, x10
722
723	ldp	x13, x14, [x0, #CTX_PAR_EL1]
724	msr	par_el1, x13
725	msr	far_el1, x14
726
727	ldp	x15, x16, [x0, #CTX_AFSR0_EL1]
728	msr	afsr0_el1, x15
729	msr	afsr1_el1, x16
730
731	ldp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
732	msr	contextidr_el1, x17
733	msr	vbar_el1, x9
734
735	/* Restore AArch32 system registers if the build has instructed so */
736#if CTX_INCLUDE_AARCH32_REGS
737	ldp	x11, x12, [x0, #CTX_SPSR_ABT]
738	msr	spsr_abt, x11
739	msr	spsr_und, x12
740
741	ldp	x13, x14, [x0, #CTX_SPSR_IRQ]
742	msr	spsr_irq, x13
743	msr	spsr_fiq, x14
744
745	ldp	x15, x16, [x0, #CTX_DACR32_EL2]
746	msr	dacr32_el2, x15
747	msr	ifsr32_el2, x16
748#endif /* CTX_INCLUDE_AARCH32_REGS */
749
750	/* Restore NS timer registers if the build has instructed so */
751#if NS_TIMER_SWITCH
752	ldp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
753	msr	cntp_ctl_el0, x10
754	msr	cntp_cval_el0, x11
755
756	ldp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
757	msr	cntv_ctl_el0, x12
758	msr	cntv_cval_el0, x13
759
760	ldr	x14, [x0, #CTX_CNTKCTL_EL1]
761	msr	cntkctl_el1, x14
762#endif /* NS_TIMER_SWITCH */
763
764	/* Restore MTE system registers if the build has instructed so */
765#if CTX_INCLUDE_MTE_REGS
766	ldp	x11, x12, [x0, #CTX_TFSRE0_EL1]
767	msr	TFSRE0_EL1, x11
768	msr	TFSR_EL1, x12
769
770	ldp	x13, x14, [x0, #CTX_RGSR_EL1]
771	msr	RGSR_EL1, x13
772	msr	GCR_EL1, x14
773#endif /* CTX_INCLUDE_MTE_REGS */
774
775	/* No explict ISB required here as ERET covers it */
776	ret
777endfunc el1_sysregs_context_restore
778
779/* ------------------------------------------------------------------
780 * The following function follows the aapcs_64 strictly to use
781 * x9-x17 (temporary caller-saved registers according to AArch64 PCS)
782 * to save floating point register context. It assumes that 'x0' is
783 * pointing to a 'fp_regs' structure where the register context will
784 * be saved.
785 *
786 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
787 * However currently we don't use VFP registers nor set traps in
788 * Trusted Firmware, and assume it's cleared.
789 *
790 * TODO: Revisit when VFP is used in secure world
791 * ------------------------------------------------------------------
792 */
793#if CTX_INCLUDE_FPREGS
794func fpregs_context_save
795	stp	q0, q1, [x0, #CTX_FP_Q0]
796	stp	q2, q3, [x0, #CTX_FP_Q2]
797	stp	q4, q5, [x0, #CTX_FP_Q4]
798	stp	q6, q7, [x0, #CTX_FP_Q6]
799	stp	q8, q9, [x0, #CTX_FP_Q8]
800	stp	q10, q11, [x0, #CTX_FP_Q10]
801	stp	q12, q13, [x0, #CTX_FP_Q12]
802	stp	q14, q15, [x0, #CTX_FP_Q14]
803	stp	q16, q17, [x0, #CTX_FP_Q16]
804	stp	q18, q19, [x0, #CTX_FP_Q18]
805	stp	q20, q21, [x0, #CTX_FP_Q20]
806	stp	q22, q23, [x0, #CTX_FP_Q22]
807	stp	q24, q25, [x0, #CTX_FP_Q24]
808	stp	q26, q27, [x0, #CTX_FP_Q26]
809	stp	q28, q29, [x0, #CTX_FP_Q28]
810	stp	q30, q31, [x0, #CTX_FP_Q30]
811
812	mrs	x9, fpsr
813	str	x9, [x0, #CTX_FP_FPSR]
814
815	mrs	x10, fpcr
816	str	x10, [x0, #CTX_FP_FPCR]
817
818#if CTX_INCLUDE_AARCH32_REGS
819	mrs	x11, fpexc32_el2
820	str	x11, [x0, #CTX_FP_FPEXC32_EL2]
821#endif /* CTX_INCLUDE_AARCH32_REGS */
822	ret
823endfunc fpregs_context_save
824
825/* ------------------------------------------------------------------
826 * The following function follows the aapcs_64 strictly to use x9-x17
827 * (temporary caller-saved registers according to AArch64 PCS) to
828 * restore floating point register context. It assumes that 'x0' is
829 * pointing to a 'fp_regs' structure from where the register context
830 * will be restored.
831 *
832 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
833 * However currently we don't use VFP registers nor set traps in
834 * Trusted Firmware, and assume it's cleared.
835 *
836 * TODO: Revisit when VFP is used in secure world
837 * ------------------------------------------------------------------
838 */
839func fpregs_context_restore
840	ldp	q0, q1, [x0, #CTX_FP_Q0]
841	ldp	q2, q3, [x0, #CTX_FP_Q2]
842	ldp	q4, q5, [x0, #CTX_FP_Q4]
843	ldp	q6, q7, [x0, #CTX_FP_Q6]
844	ldp	q8, q9, [x0, #CTX_FP_Q8]
845	ldp	q10, q11, [x0, #CTX_FP_Q10]
846	ldp	q12, q13, [x0, #CTX_FP_Q12]
847	ldp	q14, q15, [x0, #CTX_FP_Q14]
848	ldp	q16, q17, [x0, #CTX_FP_Q16]
849	ldp	q18, q19, [x0, #CTX_FP_Q18]
850	ldp	q20, q21, [x0, #CTX_FP_Q20]
851	ldp	q22, q23, [x0, #CTX_FP_Q22]
852	ldp	q24, q25, [x0, #CTX_FP_Q24]
853	ldp	q26, q27, [x0, #CTX_FP_Q26]
854	ldp	q28, q29, [x0, #CTX_FP_Q28]
855	ldp	q30, q31, [x0, #CTX_FP_Q30]
856
857	ldr	x9, [x0, #CTX_FP_FPSR]
858	msr	fpsr, x9
859
860	ldr	x10, [x0, #CTX_FP_FPCR]
861	msr	fpcr, x10
862
863#if CTX_INCLUDE_AARCH32_REGS
864	ldr	x11, [x0, #CTX_FP_FPEXC32_EL2]
865	msr	fpexc32_el2, x11
866#endif /* CTX_INCLUDE_AARCH32_REGS */
867
868	/*
869	 * No explict ISB required here as ERET to
870	 * switch to secure EL1 or non-secure world
871	 * covers it
872	 */
873
874	ret
875endfunc fpregs_context_restore
876#endif /* CTX_INCLUDE_FPREGS */
877
878	/*
879	 * Set SCR_EL3.EA bit to enable SErrors at EL3
880	 */
881	.macro enable_serror_at_el3
882	mrs     x8, scr_el3
883	orr     x8, x8, #SCR_EA_BIT
884	msr     scr_el3, x8
885	.endm
886
887	/*
888	 * Set the PSTATE bits not set when the exception was taken as
889	 * described in the AArch64.TakeException() pseudocode function
890	 * in ARM DDI 0487F.c page J1-7635 to a default value.
891	 */
892	.macro set_unset_pstate_bits
893	/*
894	 * If Data Independent Timing (DIT) functionality is implemented,
895	 * always enable DIT in EL3
896	 */
897#if ENABLE_FEAT_DIT
898	mov     x8, #DIT_BIT
899	msr     DIT, x8
900#endif /* ENABLE_FEAT_DIT */
901	.endm /* set_unset_pstate_bits */
902
903/* ------------------------------------------------------------------
904 * The following macro is used to save and restore all the general
905 * purpose and ARMv8.3-PAuth (if enabled) registers.
906 * It also checks if the Secure Cycle Counter (PMCCNTR_EL0)
907 * is disabled in EL3/Secure (ARMv8.5-PMU), wherein PMCCNTR_EL0
908 * needs not to be saved/restored during world switch.
909 *
910 * Ideally we would only save and restore the callee saved registers
911 * when a world switch occurs but that type of implementation is more
912 * complex. So currently we will always save and restore these
913 * registers on entry and exit of EL3.
914 * clobbers: x18
915 * ------------------------------------------------------------------
916 */
917	.macro save_gp_pmcr_pauth_regs
918	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
919	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
920	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
921	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
922	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
923	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
924	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
925	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
926	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
927	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
928	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
929	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
930	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
931	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
932	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
933	mrs	x18, sp_el0
934	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
935
936	/* ----------------------------------------------------------
937	 * Check if earlier initialization of MDCR_EL3.SCCD/MCCD to 1
938	 * has failed.
939	 *
940	 * MDCR_EL3:
941	 * MCCD bit set, Prohibits the Cycle Counter PMCCNTR_EL0 from
942	 * counting at EL3.
943	 * SCCD bit set, Secure Cycle Counter Disable. Prohibits PMCCNTR_EL0
944	 * from counting in Secure state.
945	 * If these bits are not set, meaning that FEAT_PMUv3p5/7 is
946	 * not implemented and PMCR_EL0 should be saved in non-secure
947	 * context.
948	 * ----------------------------------------------------------
949	 */
950	mov_imm	x10, (MDCR_SCCD_BIT | MDCR_MCCD_BIT)
951	mrs	x9, mdcr_el3
952	tst	x9, x10
953	bne	1f
954
955	/* ----------------------------------------------------------
956	 * If control reaches here, it ensures the Secure Cycle
957	 * Counter (PMCCNTR_EL0) is not prohibited from counting at
958	 * EL3 and in secure states.
959	 * Henceforth, PMCR_EL0 to be saved before world switch.
960	 * ----------------------------------------------------------
961	 */
962	mrs	x9, pmcr_el0
963
964	/* Check caller's security state */
965	mrs	x10, scr_el3
966	tst	x10, #SCR_NS_BIT
967	beq	2f
968
969	/* Save PMCR_EL0 if called from Non-secure state */
970	str	x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
971
972	/* Disable cycle counter when event counting is prohibited */
9732:	orr	x9, x9, #PMCR_EL0_DP_BIT
974	msr	pmcr_el0, x9
975	isb
9761:
977#if CTX_INCLUDE_PAUTH_REGS
978	/* ----------------------------------------------------------
979 	 * Save the ARMv8.3-PAuth keys as they are not banked
980 	 * by exception level
981	 * ----------------------------------------------------------
982	 */
983	add	x19, sp, #CTX_PAUTH_REGS_OFFSET
984
985	mrs	x20, APIAKeyLo_EL1	/* x21:x20 = APIAKey */
986	mrs	x21, APIAKeyHi_EL1
987	mrs	x22, APIBKeyLo_EL1	/* x23:x22 = APIBKey */
988	mrs	x23, APIBKeyHi_EL1
989	mrs	x24, APDAKeyLo_EL1	/* x25:x24 = APDAKey */
990	mrs	x25, APDAKeyHi_EL1
991	mrs	x26, APDBKeyLo_EL1	/* x27:x26 = APDBKey */
992	mrs	x27, APDBKeyHi_EL1
993	mrs	x28, APGAKeyLo_EL1	/* x29:x28 = APGAKey */
994	mrs	x29, APGAKeyHi_EL1
995
996	stp	x20, x21, [x19, #CTX_PACIAKEY_LO]
997	stp	x22, x23, [x19, #CTX_PACIBKEY_LO]
998	stp	x24, x25, [x19, #CTX_PACDAKEY_LO]
999	stp	x26, x27, [x19, #CTX_PACDBKEY_LO]
1000	stp	x28, x29, [x19, #CTX_PACGAKEY_LO]
1001#endif /* CTX_INCLUDE_PAUTH_REGS */
1002	.endm /* save_gp_pmcr_pauth_regs */
1003
1004/* -----------------------------------------------------------------
1005 * This function saves the context and sets the PSTATE to a known
1006 * state, preparing entry to el3.
1007 * Save all the general purpose and ARMv8.3-PAuth (if enabled)
1008 * registers.
1009 * Then set any of the PSTATE bits that are not set by hardware
1010 * according to the Aarch64.TakeException pseudocode in the Arm
1011 * Architecture Reference Manual to a default value for EL3.
1012 * clobbers: x17
1013 * -----------------------------------------------------------------
1014 */
1015func prepare_el3_entry
1016	save_gp_pmcr_pauth_regs
1017	enable_serror_at_el3
1018	/*
1019	 * Set the PSTATE bits not described in the Aarch64.TakeException
1020	 * pseudocode to their default values.
1021	 */
1022	set_unset_pstate_bits
1023	ret
1024endfunc prepare_el3_entry
1025
1026/* ------------------------------------------------------------------
1027 * This function restores ARMv8.3-PAuth (if enabled) and all general
1028 * purpose registers except x30 from the CPU context.
1029 * x30 register must be explicitly restored by the caller.
1030 * ------------------------------------------------------------------
1031 */
1032func restore_gp_pmcr_pauth_regs
1033#if CTX_INCLUDE_PAUTH_REGS
1034 	/* Restore the ARMv8.3 PAuth keys */
1035	add	x10, sp, #CTX_PAUTH_REGS_OFFSET
1036
1037	ldp	x0, x1, [x10, #CTX_PACIAKEY_LO]	/* x1:x0 = APIAKey */
1038	ldp	x2, x3, [x10, #CTX_PACIBKEY_LO]	/* x3:x2 = APIBKey */
1039	ldp	x4, x5, [x10, #CTX_PACDAKEY_LO]	/* x5:x4 = APDAKey */
1040	ldp	x6, x7, [x10, #CTX_PACDBKEY_LO]	/* x7:x6 = APDBKey */
1041	ldp	x8, x9, [x10, #CTX_PACGAKEY_LO]	/* x9:x8 = APGAKey */
1042
1043	msr	APIAKeyLo_EL1, x0
1044	msr	APIAKeyHi_EL1, x1
1045	msr	APIBKeyLo_EL1, x2
1046	msr	APIBKeyHi_EL1, x3
1047	msr	APDAKeyLo_EL1, x4
1048	msr	APDAKeyHi_EL1, x5
1049	msr	APDBKeyLo_EL1, x6
1050	msr	APDBKeyHi_EL1, x7
1051	msr	APGAKeyLo_EL1, x8
1052	msr	APGAKeyHi_EL1, x9
1053#endif /* CTX_INCLUDE_PAUTH_REGS */
1054
1055	/* ----------------------------------------------------------
1056	 * Restore PMCR_EL0 when returning to Non-secure state if
1057	 * Secure Cycle Counter is not disabled in MDCR_EL3 when
1058	 * ARMv8.5-PMU is implemented.
1059	 * ----------------------------------------------------------
1060	 */
1061	mrs	x0, scr_el3
1062	tst	x0, #SCR_NS_BIT
1063	beq	2f
1064
1065	/* ----------------------------------------------------------
1066	 * Back to Non-secure state.
1067	 * Check if earlier initialization MDCR_EL3.SCCD/MCCD to 1
1068	 * failed, meaning that FEAT_PMUv3p5/7 is not implemented and
1069	 * PMCR_EL0 should be restored from non-secure context.
1070	 * ----------------------------------------------------------
1071	 */
1072	mov_imm	x1, (MDCR_SCCD_BIT | MDCR_MCCD_BIT)
1073	mrs	x0, mdcr_el3
1074	tst	x0, x1
1075	bne	2f
1076	ldr	x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
1077	msr	pmcr_el0, x0
10782:
1079	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
1080	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
1081	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
1082	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
1083	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
1084	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
1085	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
1086	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
1087	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
1088	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
1089	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
1090	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
1091	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
1092	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
1093	ldr	x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
1094	msr	sp_el0, x28
1095	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
1096	ret
1097endfunc restore_gp_pmcr_pauth_regs
1098
1099/*
1100 * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1
1101 * registers and update EL1 registers to disable stage1 and stage2
1102 * page table walk
1103 */
1104func save_and_update_ptw_el1_sys_regs
1105	/* ----------------------------------------------------------
1106	 * Save only sctlr_el1 and tcr_el1 registers
1107	 * ----------------------------------------------------------
1108	 */
1109	mrs	x29, sctlr_el1
1110	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1)]
1111	mrs	x29, tcr_el1
1112	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_TCR_EL1)]
1113
1114	/* ------------------------------------------------------------
1115	 * Must follow below order in order to disable page table
1116	 * walk for lower ELs (EL1 and EL0). First step ensures that
1117	 * page table walk is disabled for stage1 and second step
1118	 * ensures that page table walker should use TCR_EL1.EPDx
1119	 * bits to perform address translation. ISB ensures that CPU
1120	 * does these 2 steps in order.
1121	 *
1122	 * 1. Update TCR_EL1.EPDx bits to disable page table walk by
1123	 *    stage1.
1124	 * 2. Enable MMU bit to avoid identity mapping via stage2
1125	 *    and force TCR_EL1.EPDx to be used by the page table
1126	 *    walker.
1127	 * ------------------------------------------------------------
1128	 */
1129	orr	x29, x29, #(TCR_EPD0_BIT)
1130	orr	x29, x29, #(TCR_EPD1_BIT)
1131	msr	tcr_el1, x29
1132	isb
1133	mrs	x29, sctlr_el1
1134	orr	x29, x29, #SCTLR_M_BIT
1135	msr	sctlr_el1, x29
1136	isb
1137
1138	ret
1139endfunc save_and_update_ptw_el1_sys_regs
1140
1141/* ------------------------------------------------------------------
1142 * This routine assumes that the SP_EL3 is pointing to a valid
1143 * context structure from where the gp regs and other special
1144 * registers can be retrieved.
1145 * ------------------------------------------------------------------
1146 */
1147func el3_exit
1148#if ENABLE_ASSERTIONS
1149	/* el3_exit assumes SP_EL0 on entry */
1150	mrs	x17, spsel
1151	cmp	x17, #MODE_SP_EL0
1152	ASM_ASSERT(eq)
1153#endif /* ENABLE_ASSERTIONS */
1154
1155	/* ----------------------------------------------------------
1156	 * Save the current SP_EL0 i.e. the EL3 runtime stack which
1157	 * will be used for handling the next SMC.
1158	 * Then switch to SP_EL3.
1159	 * ----------------------------------------------------------
1160	 */
1161	mov	x17, sp
1162	msr	spsel, #MODE_SP_ELX
1163	str	x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
1164
1165#if IMAGE_BL31
1166	/* ----------------------------------------------------------
1167	 * Restore CPTR_EL3.
1168	 * ZCR is only restored if SVE is supported and enabled.
1169	 * Synchronization is required before zcr_el3 is addressed.
1170	 * ----------------------------------------------------------
1171	 */
1172	ldp	x19, x20, [sp, #CTX_EL3STATE_OFFSET + CTX_CPTR_EL3]
1173	msr	cptr_el3, x19
1174
1175	ands	x19, x19, #CPTR_EZ_BIT
1176	beq	sve_not_enabled
1177
1178	isb
1179	msr	S3_6_C1_C2_0, x20 /* zcr_el3 */
1180sve_not_enabled:
1181#endif /* IMAGE_BL31 */
1182
1183#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
1184	/* ----------------------------------------------------------
1185	 * Restore mitigation state as it was on entry to EL3
1186	 * ----------------------------------------------------------
1187	 */
1188	ldr	x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
1189	cbz	x17, 1f
1190	blr	x17
11911:
1192#endif /* IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 */
1193
1194#if IMAGE_BL31 && RAS_EXTENSION
1195	/* ----------------------------------------------------------
1196	 * Issue Error Synchronization Barrier to synchronize SErrors
1197	 * before exiting EL3. We're running with EAs unmasked, so
1198	 * any synchronized errors would be taken immediately;
1199	 * therefore no need to inspect DISR_EL1 register.
1200 	 * ----------------------------------------------------------
1201	 */
1202	esb
1203#else
1204	dsb	sy
1205#endif /* IMAGE_BL31 && RAS_EXTENSION */
1206
1207	/* ----------------------------------------------------------
1208	 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
1209	 * ----------------------------------------------------------
1210	 */
1211	ldr	x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
1212	ldp	x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
1213	msr	scr_el3, x18
1214	msr	spsr_el3, x16
1215	msr	elr_el3, x17
1216
1217	restore_ptw_el1_sys_regs
1218
1219	/* ----------------------------------------------------------
1220	 * Restore general purpose (including x30), PMCR_EL0 and
1221	 * ARMv8.3-PAuth registers.
1222	 * Exit EL3 via ERET to a lower exception level.
1223 	 * ----------------------------------------------------------
1224 	 */
1225	bl	restore_gp_pmcr_pauth_regs
1226	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
1227
1228#ifdef IMAGE_BL31
1229	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
1230#endif /* IMAGE_BL31 */
1231
1232	exception_return
1233
1234endfunc el3_exit
1235