xref: /rk3399_ARM-atf/lib/el3_runtime/aarch64/context.S (revision 4c700c1563aff7b51df95f17e952e050b9b4e37f)
1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <context.h>
11#include <el3_common_macros.S>
12
13#if CTX_INCLUDE_EL2_REGS
14	.global	el2_sysregs_context_save_common
15	.global	el2_sysregs_context_restore_common
16#if CTX_INCLUDE_MTE_REGS
17	.global	el2_sysregs_context_save_mte
18	.global	el2_sysregs_context_restore_mte
19#endif /* CTX_INCLUDE_MTE_REGS */
20#endif /* CTX_INCLUDE_EL2_REGS */
21
22	.global	el1_sysregs_context_save
23	.global	el1_sysregs_context_restore
24#if CTX_INCLUDE_FPREGS
25	.global	fpregs_context_save
26	.global	fpregs_context_restore
27#endif /* CTX_INCLUDE_FPREGS */
28	.global	prepare_el3_entry
29	.global	restore_gp_pmcr_pauth_regs
30	.global save_and_update_ptw_el1_sys_regs
31	.global	el3_exit
32
33#if CTX_INCLUDE_EL2_REGS
34
35/* -----------------------------------------------------
36 * The following functions strictly follow the AArch64
37 * PCS to use x9-x16 (temporary caller-saved registers)
38 * to save/restore EL2 system register context.
39 * el2_sysregs_context_save/restore_common functions
40 * save and restore registers that are common to all
41 * configurations. The rest of the functions save and
42 * restore EL2 system registers that are present when a
43 * particular feature is enabled. All functions assume
44 * that 'x0' is pointing to a 'el2_sys_regs' structure
45 * where the register context will be saved/restored.
46 *
47 * The following registers are not added.
48 * AMEVCNTVOFF0<n>_EL2
49 * AMEVCNTVOFF1<n>_EL2
50 * ICH_AP0R<n>_EL2
51 * ICH_AP1R<n>_EL2
52 * ICH_LR<n>_EL2
53 * -----------------------------------------------------
54 */
55func el2_sysregs_context_save_common
56	mrs	x9, actlr_el2
57	mrs	x10, afsr0_el2
58	stp	x9, x10, [x0, #CTX_ACTLR_EL2]
59
60	mrs	x11, afsr1_el2
61	mrs	x12, amair_el2
62	stp	x11, x12, [x0, #CTX_AFSR1_EL2]
63
64	mrs	x13, cnthctl_el2
65	mrs	x14, cntvoff_el2
66	stp	x13, x14, [x0, #CTX_CNTHCTL_EL2]
67
68	mrs	x15, cptr_el2
69	str	x15, [x0, #CTX_CPTR_EL2]
70
71#if CTX_INCLUDE_AARCH32_REGS
72	mrs	x16, dbgvcr32_el2
73	str	x16, [x0, #CTX_DBGVCR32_EL2]
74#endif /* CTX_INCLUDE_AARCH32_REGS */
75
76	mrs	x9, elr_el2
77	mrs	x10, esr_el2
78	stp	x9, x10, [x0, #CTX_ELR_EL2]
79
80	mrs	x11, far_el2
81	mrs	x12, hacr_el2
82	stp	x11, x12, [x0, #CTX_FAR_EL2]
83
84	mrs	x13, hcr_el2
85	mrs	x14, hpfar_el2
86	stp	x13, x14, [x0, #CTX_HCR_EL2]
87
88	mrs	x15, hstr_el2
89	mrs	x16, ICC_SRE_EL2
90	stp	x15, x16, [x0, #CTX_HSTR_EL2]
91
92	mrs	x9, ICH_HCR_EL2
93	mrs	x10, ICH_VMCR_EL2
94	stp	x9, x10, [x0, #CTX_ICH_HCR_EL2]
95
96	mrs	x11, mair_el2
97	mrs	x12, mdcr_el2
98	stp	x11, x12, [x0, #CTX_MAIR_EL2]
99
100	mrs	x14, sctlr_el2
101	str	x14, [x0, #CTX_SCTLR_EL2]
102
103	mrs	x15, spsr_el2
104	mrs	x16, sp_el2
105	stp	x15, x16, [x0, #CTX_SPSR_EL2]
106
107	mrs	x9, tcr_el2
108	mrs	x10, tpidr_el2
109	stp	x9, x10, [x0, #CTX_TCR_EL2]
110
111	mrs	x11, ttbr0_el2
112	mrs	x12, vbar_el2
113	stp	x11, x12, [x0, #CTX_TTBR0_EL2]
114
115	mrs	x13, vmpidr_el2
116	mrs	x14, vpidr_el2
117	stp	x13, x14, [x0, #CTX_VMPIDR_EL2]
118
119	mrs	x15, vtcr_el2
120	mrs	x16, vttbr_el2
121	stp	x15, x16, [x0, #CTX_VTCR_EL2]
122	ret
123endfunc el2_sysregs_context_save_common
124
125func el2_sysregs_context_restore_common
126	ldp	x9, x10, [x0, #CTX_ACTLR_EL2]
127	msr	actlr_el2, x9
128	msr	afsr0_el2, x10
129
130	ldp	x11, x12, [x0, #CTX_AFSR1_EL2]
131	msr	afsr1_el2, x11
132	msr	amair_el2, x12
133
134	ldp	x13, x14, [x0, #CTX_CNTHCTL_EL2]
135	msr	cnthctl_el2, x13
136	msr	cntvoff_el2, x14
137
138	ldr	x15, [x0, #CTX_CPTR_EL2]
139	msr	cptr_el2, x15
140
141#if CTX_INCLUDE_AARCH32_REGS
142	ldr	x16, [x0, #CTX_DBGVCR32_EL2]
143	msr	dbgvcr32_el2, x16
144#endif /* CTX_INCLUDE_AARCH32_REGS */
145
146	ldp	x9, x10, [x0, #CTX_ELR_EL2]
147	msr	elr_el2, x9
148	msr	esr_el2, x10
149
150	ldp	x11, x12, [x0, #CTX_FAR_EL2]
151	msr	far_el2, x11
152	msr	hacr_el2, x12
153
154	ldp	x13, x14, [x0, #CTX_HCR_EL2]
155	msr	hcr_el2, x13
156	msr	hpfar_el2, x14
157
158	ldp	x15, x16, [x0, #CTX_HSTR_EL2]
159	msr	hstr_el2, x15
160	msr	ICC_SRE_EL2, x16
161
162	ldp	x9, x10, [x0, #CTX_ICH_HCR_EL2]
163	msr	ICH_HCR_EL2, x9
164	msr	ICH_VMCR_EL2, x10
165
166	ldp	x11, x12, [x0, #CTX_MAIR_EL2]
167	msr	mair_el2, x11
168	msr	mdcr_el2, x12
169
170	ldr	x14, [x0, #CTX_SCTLR_EL2]
171	msr	sctlr_el2, x14
172
173	ldp	x15, x16, [x0, #CTX_SPSR_EL2]
174	msr	spsr_el2, x15
175	msr	sp_el2, x16
176
177	ldp	x9, x10, [x0, #CTX_TCR_EL2]
178	msr	tcr_el2, x9
179	msr	tpidr_el2, x10
180
181	ldp	x11, x12, [x0, #CTX_TTBR0_EL2]
182	msr	ttbr0_el2, x11
183	msr	vbar_el2, x12
184
185	ldp	x13, x14, [x0, #CTX_VMPIDR_EL2]
186	msr	vmpidr_el2, x13
187	msr	vpidr_el2, x14
188
189	ldp	x15, x16, [x0, #CTX_VTCR_EL2]
190	msr	vtcr_el2, x15
191	msr	vttbr_el2, x16
192	ret
193endfunc el2_sysregs_context_restore_common
194
195#if CTX_INCLUDE_MTE_REGS
196func el2_sysregs_context_save_mte
197	mrs	x9, TFSR_EL2
198	str	x9, [x0, #CTX_TFSR_EL2]
199	ret
200endfunc el2_sysregs_context_save_mte
201
202func el2_sysregs_context_restore_mte
203	ldr	x9, [x0, #CTX_TFSR_EL2]
204	msr	TFSR_EL2, x9
205	ret
206endfunc el2_sysregs_context_restore_mte
207#endif /* CTX_INCLUDE_MTE_REGS */
208
209#endif /* CTX_INCLUDE_EL2_REGS */
210
211/* ------------------------------------------------------------------
212 * The following function strictly follows the AArch64 PCS to use
213 * x9-x17 (temporary caller-saved registers) to save EL1 system
214 * register context. It assumes that 'x0' is pointing to a
215 * 'el1_sys_regs' structure where the register context will be saved.
216 * ------------------------------------------------------------------
217 */
218func el1_sysregs_context_save
219
220	mrs	x9, spsr_el1
221	mrs	x10, elr_el1
222	stp	x9, x10, [x0, #CTX_SPSR_EL1]
223
224#if !ERRATA_SPECULATIVE_AT
225	mrs	x15, sctlr_el1
226	mrs	x16, tcr_el1
227	stp	x15, x16, [x0, #CTX_SCTLR_EL1]
228#endif /* ERRATA_SPECULATIVE_AT */
229
230	mrs	x17, cpacr_el1
231	mrs	x9, csselr_el1
232	stp	x17, x9, [x0, #CTX_CPACR_EL1]
233
234	mrs	x10, sp_el1
235	mrs	x11, esr_el1
236	stp	x10, x11, [x0, #CTX_SP_EL1]
237
238	mrs	x12, ttbr0_el1
239	mrs	x13, ttbr1_el1
240	stp	x12, x13, [x0, #CTX_TTBR0_EL1]
241
242	mrs	x14, mair_el1
243	mrs	x15, amair_el1
244	stp	x14, x15, [x0, #CTX_MAIR_EL1]
245
246	mrs	x16, actlr_el1
247	mrs	x17, tpidr_el1
248	stp	x16, x17, [x0, #CTX_ACTLR_EL1]
249
250	mrs	x9, tpidr_el0
251	mrs	x10, tpidrro_el0
252	stp	x9, x10, [x0, #CTX_TPIDR_EL0]
253
254	mrs	x13, par_el1
255	mrs	x14, far_el1
256	stp	x13, x14, [x0, #CTX_PAR_EL1]
257
258	mrs	x15, afsr0_el1
259	mrs	x16, afsr1_el1
260	stp	x15, x16, [x0, #CTX_AFSR0_EL1]
261
262	mrs	x17, contextidr_el1
263	mrs	x9, vbar_el1
264	stp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
265
266	/* Save AArch32 system registers if the build has instructed so */
267#if CTX_INCLUDE_AARCH32_REGS
268	mrs	x11, spsr_abt
269	mrs	x12, spsr_und
270	stp	x11, x12, [x0, #CTX_SPSR_ABT]
271
272	mrs	x13, spsr_irq
273	mrs	x14, spsr_fiq
274	stp	x13, x14, [x0, #CTX_SPSR_IRQ]
275
276	mrs	x15, dacr32_el2
277	mrs	x16, ifsr32_el2
278	stp	x15, x16, [x0, #CTX_DACR32_EL2]
279#endif /* CTX_INCLUDE_AARCH32_REGS */
280
281	/* Save NS timer registers if the build has instructed so */
282#if NS_TIMER_SWITCH
283	mrs	x10, cntp_ctl_el0
284	mrs	x11, cntp_cval_el0
285	stp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
286
287	mrs	x12, cntv_ctl_el0
288	mrs	x13, cntv_cval_el0
289	stp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
290
291	mrs	x14, cntkctl_el1
292	str	x14, [x0, #CTX_CNTKCTL_EL1]
293#endif /* NS_TIMER_SWITCH */
294
295	/* Save MTE system registers if the build has instructed so */
296#if CTX_INCLUDE_MTE_REGS
297	mrs	x15, TFSRE0_EL1
298	mrs	x16, TFSR_EL1
299	stp	x15, x16, [x0, #CTX_TFSRE0_EL1]
300
301	mrs	x9, RGSR_EL1
302	mrs	x10, GCR_EL1
303	stp	x9, x10, [x0, #CTX_RGSR_EL1]
304#endif /* CTX_INCLUDE_MTE_REGS */
305
306	ret
307endfunc el1_sysregs_context_save
308
309/* ------------------------------------------------------------------
310 * The following function strictly follows the AArch64 PCS to use
311 * x9-x17 (temporary caller-saved registers) to restore EL1 system
312 * register context.  It assumes that 'x0' is pointing to a
313 * 'el1_sys_regs' structure from where the register context will be
314 * restored
315 * ------------------------------------------------------------------
316 */
317func el1_sysregs_context_restore
318
319	ldp	x9, x10, [x0, #CTX_SPSR_EL1]
320	msr	spsr_el1, x9
321	msr	elr_el1, x10
322
323#if !ERRATA_SPECULATIVE_AT
324	ldp	x15, x16, [x0, #CTX_SCTLR_EL1]
325	msr	sctlr_el1, x15
326	msr	tcr_el1, x16
327#endif /* ERRATA_SPECULATIVE_AT */
328
329	ldp	x17, x9, [x0, #CTX_CPACR_EL1]
330	msr	cpacr_el1, x17
331	msr	csselr_el1, x9
332
333	ldp	x10, x11, [x0, #CTX_SP_EL1]
334	msr	sp_el1, x10
335	msr	esr_el1, x11
336
337	ldp	x12, x13, [x0, #CTX_TTBR0_EL1]
338	msr	ttbr0_el1, x12
339	msr	ttbr1_el1, x13
340
341	ldp	x14, x15, [x0, #CTX_MAIR_EL1]
342	msr	mair_el1, x14
343	msr	amair_el1, x15
344
345	ldp 	x16, x17, [x0, #CTX_ACTLR_EL1]
346	msr	actlr_el1, x16
347	msr	tpidr_el1, x17
348
349	ldp	x9, x10, [x0, #CTX_TPIDR_EL0]
350	msr	tpidr_el0, x9
351	msr	tpidrro_el0, x10
352
353	ldp	x13, x14, [x0, #CTX_PAR_EL1]
354	msr	par_el1, x13
355	msr	far_el1, x14
356
357	ldp	x15, x16, [x0, #CTX_AFSR0_EL1]
358	msr	afsr0_el1, x15
359	msr	afsr1_el1, x16
360
361	ldp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
362	msr	contextidr_el1, x17
363	msr	vbar_el1, x9
364
365	/* Restore AArch32 system registers if the build has instructed so */
366#if CTX_INCLUDE_AARCH32_REGS
367	ldp	x11, x12, [x0, #CTX_SPSR_ABT]
368	msr	spsr_abt, x11
369	msr	spsr_und, x12
370
371	ldp	x13, x14, [x0, #CTX_SPSR_IRQ]
372	msr	spsr_irq, x13
373	msr	spsr_fiq, x14
374
375	ldp	x15, x16, [x0, #CTX_DACR32_EL2]
376	msr	dacr32_el2, x15
377	msr	ifsr32_el2, x16
378#endif /* CTX_INCLUDE_AARCH32_REGS */
379
380	/* Restore NS timer registers if the build has instructed so */
381#if NS_TIMER_SWITCH
382	ldp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
383	msr	cntp_ctl_el0, x10
384	msr	cntp_cval_el0, x11
385
386	ldp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
387	msr	cntv_ctl_el0, x12
388	msr	cntv_cval_el0, x13
389
390	ldr	x14, [x0, #CTX_CNTKCTL_EL1]
391	msr	cntkctl_el1, x14
392#endif /* NS_TIMER_SWITCH */
393
394	/* Restore MTE system registers if the build has instructed so */
395#if CTX_INCLUDE_MTE_REGS
396	ldp	x11, x12, [x0, #CTX_TFSRE0_EL1]
397	msr	TFSRE0_EL1, x11
398	msr	TFSR_EL1, x12
399
400	ldp	x13, x14, [x0, #CTX_RGSR_EL1]
401	msr	RGSR_EL1, x13
402	msr	GCR_EL1, x14
403#endif /* CTX_INCLUDE_MTE_REGS */
404
405	/* No explict ISB required here as ERET covers it */
406	ret
407endfunc el1_sysregs_context_restore
408
409/* ------------------------------------------------------------------
410 * The following function follows the aapcs_64 strictly to use
411 * x9-x17 (temporary caller-saved registers according to AArch64 PCS)
412 * to save floating point register context. It assumes that 'x0' is
413 * pointing to a 'fp_regs' structure where the register context will
414 * be saved.
415 *
416 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
417 * However currently we don't use VFP registers nor set traps in
418 * Trusted Firmware, and assume it's cleared.
419 *
420 * TODO: Revisit when VFP is used in secure world
421 * ------------------------------------------------------------------
422 */
423#if CTX_INCLUDE_FPREGS
424func fpregs_context_save
425	stp	q0, q1, [x0, #CTX_FP_Q0]
426	stp	q2, q3, [x0, #CTX_FP_Q2]
427	stp	q4, q5, [x0, #CTX_FP_Q4]
428	stp	q6, q7, [x0, #CTX_FP_Q6]
429	stp	q8, q9, [x0, #CTX_FP_Q8]
430	stp	q10, q11, [x0, #CTX_FP_Q10]
431	stp	q12, q13, [x0, #CTX_FP_Q12]
432	stp	q14, q15, [x0, #CTX_FP_Q14]
433	stp	q16, q17, [x0, #CTX_FP_Q16]
434	stp	q18, q19, [x0, #CTX_FP_Q18]
435	stp	q20, q21, [x0, #CTX_FP_Q20]
436	stp	q22, q23, [x0, #CTX_FP_Q22]
437	stp	q24, q25, [x0, #CTX_FP_Q24]
438	stp	q26, q27, [x0, #CTX_FP_Q26]
439	stp	q28, q29, [x0, #CTX_FP_Q28]
440	stp	q30, q31, [x0, #CTX_FP_Q30]
441
442	mrs	x9, fpsr
443	str	x9, [x0, #CTX_FP_FPSR]
444
445	mrs	x10, fpcr
446	str	x10, [x0, #CTX_FP_FPCR]
447
448#if CTX_INCLUDE_AARCH32_REGS
449	mrs	x11, fpexc32_el2
450	str	x11, [x0, #CTX_FP_FPEXC32_EL2]
451#endif /* CTX_INCLUDE_AARCH32_REGS */
452	ret
453endfunc fpregs_context_save
454
455/* ------------------------------------------------------------------
456 * The following function follows the aapcs_64 strictly to use x9-x17
457 * (temporary caller-saved registers according to AArch64 PCS) to
458 * restore floating point register context. It assumes that 'x0' is
459 * pointing to a 'fp_regs' structure from where the register context
460 * will be restored.
461 *
462 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
463 * However currently we don't use VFP registers nor set traps in
464 * Trusted Firmware, and assume it's cleared.
465 *
466 * TODO: Revisit when VFP is used in secure world
467 * ------------------------------------------------------------------
468 */
469func fpregs_context_restore
470	ldp	q0, q1, [x0, #CTX_FP_Q0]
471	ldp	q2, q3, [x0, #CTX_FP_Q2]
472	ldp	q4, q5, [x0, #CTX_FP_Q4]
473	ldp	q6, q7, [x0, #CTX_FP_Q6]
474	ldp	q8, q9, [x0, #CTX_FP_Q8]
475	ldp	q10, q11, [x0, #CTX_FP_Q10]
476	ldp	q12, q13, [x0, #CTX_FP_Q12]
477	ldp	q14, q15, [x0, #CTX_FP_Q14]
478	ldp	q16, q17, [x0, #CTX_FP_Q16]
479	ldp	q18, q19, [x0, #CTX_FP_Q18]
480	ldp	q20, q21, [x0, #CTX_FP_Q20]
481	ldp	q22, q23, [x0, #CTX_FP_Q22]
482	ldp	q24, q25, [x0, #CTX_FP_Q24]
483	ldp	q26, q27, [x0, #CTX_FP_Q26]
484	ldp	q28, q29, [x0, #CTX_FP_Q28]
485	ldp	q30, q31, [x0, #CTX_FP_Q30]
486
487	ldr	x9, [x0, #CTX_FP_FPSR]
488	msr	fpsr, x9
489
490	ldr	x10, [x0, #CTX_FP_FPCR]
491	msr	fpcr, x10
492
493#if CTX_INCLUDE_AARCH32_REGS
494	ldr	x11, [x0, #CTX_FP_FPEXC32_EL2]
495	msr	fpexc32_el2, x11
496#endif /* CTX_INCLUDE_AARCH32_REGS */
497
498	/*
499	 * No explict ISB required here as ERET to
500	 * switch to secure EL1 or non-secure world
501	 * covers it
502	 */
503
504	ret
505endfunc fpregs_context_restore
506#endif /* CTX_INCLUDE_FPREGS */
507
508	/*
509	 * Set SCR_EL3.EA bit to enable SErrors at EL3
510	 */
511	.macro enable_serror_at_el3
512	mrs     x8, scr_el3
513	orr     x8, x8, #SCR_EA_BIT
514	msr     scr_el3, x8
515	.endm
516
517	/*
518	 * Set the PSTATE bits not set when the exception was taken as
519	 * described in the AArch64.TakeException() pseudocode function
520	 * in ARM DDI 0487F.c page J1-7635 to a default value.
521	 */
522	.macro set_unset_pstate_bits
523	/*
524	 * If Data Independent Timing (DIT) functionality is implemented,
525	 * always enable DIT in EL3
526	 */
527#if ENABLE_FEAT_DIT
528#if ENABLE_FEAT_DIT == 2
529	mrs	x8, id_aa64pfr0_el1
530	and	x8, x8, #(ID_AA64PFR0_DIT_MASK << ID_AA64PFR0_DIT_SHIFT)
531	cbz	x8, 1f
532#endif
533	mov     x8, #DIT_BIT
534	msr     DIT, x8
5351:
536#endif /* ENABLE_FEAT_DIT */
537	.endm /* set_unset_pstate_bits */
538
539/* ------------------------------------------------------------------
540 * The following macro is used to save and restore all the general
541 * purpose and ARMv8.3-PAuth (if enabled) registers.
542 * It also checks if the Secure Cycle Counter (PMCCNTR_EL0)
543 * is disabled in EL3/Secure (ARMv8.5-PMU), wherein PMCCNTR_EL0
544 * needs not to be saved/restored during world switch.
545 *
546 * Ideally we would only save and restore the callee saved registers
547 * when a world switch occurs but that type of implementation is more
548 * complex. So currently we will always save and restore these
549 * registers on entry and exit of EL3.
550 * clobbers: x18
551 * ------------------------------------------------------------------
552 */
553	.macro save_gp_pmcr_pauth_regs
554	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
555	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
556	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
557	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
558	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
559	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
560	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
561	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
562	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
563	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
564	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
565	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
566	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
567	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
568	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
569	mrs	x18, sp_el0
570	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
571
572	/* PMUv3 is presumed to be always present */
573	mrs	x9, pmcr_el0
574	str	x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
575	/* Disable cycle counter when event counting is prohibited */
576	orr	x9, x9, #PMCR_EL0_DP_BIT
577	msr	pmcr_el0, x9
578	isb
579#if CTX_INCLUDE_PAUTH_REGS
580	/* ----------------------------------------------------------
581 	 * Save the ARMv8.3-PAuth keys as they are not banked
582 	 * by exception level
583	 * ----------------------------------------------------------
584	 */
585	add	x19, sp, #CTX_PAUTH_REGS_OFFSET
586
587	mrs	x20, APIAKeyLo_EL1	/* x21:x20 = APIAKey */
588	mrs	x21, APIAKeyHi_EL1
589	mrs	x22, APIBKeyLo_EL1	/* x23:x22 = APIBKey */
590	mrs	x23, APIBKeyHi_EL1
591	mrs	x24, APDAKeyLo_EL1	/* x25:x24 = APDAKey */
592	mrs	x25, APDAKeyHi_EL1
593	mrs	x26, APDBKeyLo_EL1	/* x27:x26 = APDBKey */
594	mrs	x27, APDBKeyHi_EL1
595	mrs	x28, APGAKeyLo_EL1	/* x29:x28 = APGAKey */
596	mrs	x29, APGAKeyHi_EL1
597
598	stp	x20, x21, [x19, #CTX_PACIAKEY_LO]
599	stp	x22, x23, [x19, #CTX_PACIBKEY_LO]
600	stp	x24, x25, [x19, #CTX_PACDAKEY_LO]
601	stp	x26, x27, [x19, #CTX_PACDBKEY_LO]
602	stp	x28, x29, [x19, #CTX_PACGAKEY_LO]
603#endif /* CTX_INCLUDE_PAUTH_REGS */
604	.endm /* save_gp_pmcr_pauth_regs */
605
606/* -----------------------------------------------------------------
607 * This function saves the context and sets the PSTATE to a known
608 * state, preparing entry to el3.
609 * Save all the general purpose and ARMv8.3-PAuth (if enabled)
610 * registers.
611 * Then set any of the PSTATE bits that are not set by hardware
612 * according to the Aarch64.TakeException pseudocode in the Arm
613 * Architecture Reference Manual to a default value for EL3.
614 * clobbers: x17
615 * -----------------------------------------------------------------
616 */
617func prepare_el3_entry
618	save_gp_pmcr_pauth_regs
619	enable_serror_at_el3
620	/*
621	 * Set the PSTATE bits not described in the Aarch64.TakeException
622	 * pseudocode to their default values.
623	 */
624	set_unset_pstate_bits
625	ret
626endfunc prepare_el3_entry
627
628/* ------------------------------------------------------------------
629 * This function restores ARMv8.3-PAuth (if enabled) and all general
630 * purpose registers except x30 from the CPU context.
631 * x30 register must be explicitly restored by the caller.
632 * ------------------------------------------------------------------
633 */
634func restore_gp_pmcr_pauth_regs
635#if CTX_INCLUDE_PAUTH_REGS
636 	/* Restore the ARMv8.3 PAuth keys */
637	add	x10, sp, #CTX_PAUTH_REGS_OFFSET
638
639	ldp	x0, x1, [x10, #CTX_PACIAKEY_LO]	/* x1:x0 = APIAKey */
640	ldp	x2, x3, [x10, #CTX_PACIBKEY_LO]	/* x3:x2 = APIBKey */
641	ldp	x4, x5, [x10, #CTX_PACDAKEY_LO]	/* x5:x4 = APDAKey */
642	ldp	x6, x7, [x10, #CTX_PACDBKEY_LO]	/* x7:x6 = APDBKey */
643	ldp	x8, x9, [x10, #CTX_PACGAKEY_LO]	/* x9:x8 = APGAKey */
644
645	msr	APIAKeyLo_EL1, x0
646	msr	APIAKeyHi_EL1, x1
647	msr	APIBKeyLo_EL1, x2
648	msr	APIBKeyHi_EL1, x3
649	msr	APDAKeyLo_EL1, x4
650	msr	APDAKeyHi_EL1, x5
651	msr	APDBKeyLo_EL1, x6
652	msr	APDBKeyHi_EL1, x7
653	msr	APGAKeyLo_EL1, x8
654	msr	APGAKeyHi_EL1, x9
655#endif /* CTX_INCLUDE_PAUTH_REGS */
656
657	/* PMUv3 is presumed to be always present */
658	ldr	x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
659	msr	pmcr_el0, x0
660	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
661	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
662	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
663	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
664	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
665	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
666	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
667	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
668	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
669	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
670	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
671	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
672	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
673	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
674	ldr	x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
675	msr	sp_el0, x28
676	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
677	ret
678endfunc restore_gp_pmcr_pauth_regs
679
680/*
681 * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1
682 * registers and update EL1 registers to disable stage1 and stage2
683 * page table walk
684 */
685func save_and_update_ptw_el1_sys_regs
686	/* ----------------------------------------------------------
687	 * Save only sctlr_el1 and tcr_el1 registers
688	 * ----------------------------------------------------------
689	 */
690	mrs	x29, sctlr_el1
691	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1)]
692	mrs	x29, tcr_el1
693	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_TCR_EL1)]
694
695	/* ------------------------------------------------------------
696	 * Must follow below order in order to disable page table
697	 * walk for lower ELs (EL1 and EL0). First step ensures that
698	 * page table walk is disabled for stage1 and second step
699	 * ensures that page table walker should use TCR_EL1.EPDx
700	 * bits to perform address translation. ISB ensures that CPU
701	 * does these 2 steps in order.
702	 *
703	 * 1. Update TCR_EL1.EPDx bits to disable page table walk by
704	 *    stage1.
705	 * 2. Enable MMU bit to avoid identity mapping via stage2
706	 *    and force TCR_EL1.EPDx to be used by the page table
707	 *    walker.
708	 * ------------------------------------------------------------
709	 */
710	orr	x29, x29, #(TCR_EPD0_BIT)
711	orr	x29, x29, #(TCR_EPD1_BIT)
712	msr	tcr_el1, x29
713	isb
714	mrs	x29, sctlr_el1
715	orr	x29, x29, #SCTLR_M_BIT
716	msr	sctlr_el1, x29
717	isb
718
719	ret
720endfunc save_and_update_ptw_el1_sys_regs
721
722/* ------------------------------------------------------------------
723 * This routine assumes that the SP_EL3 is pointing to a valid
724 * context structure from where the gp regs and other special
725 * registers can be retrieved.
726 * ------------------------------------------------------------------
727 */
728func el3_exit
729#if ENABLE_ASSERTIONS
730	/* el3_exit assumes SP_EL0 on entry */
731	mrs	x17, spsel
732	cmp	x17, #MODE_SP_EL0
733	ASM_ASSERT(eq)
734#endif /* ENABLE_ASSERTIONS */
735
736	/* ----------------------------------------------------------
737	 * Save the current SP_EL0 i.e. the EL3 runtime stack which
738	 * will be used for handling the next SMC.
739	 * Then switch to SP_EL3.
740	 * ----------------------------------------------------------
741	 */
742	mov	x17, sp
743	msr	spsel, #MODE_SP_ELX
744	str	x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
745
746#if IMAGE_BL31
747	/* ----------------------------------------------------------
748	 * Restore CPTR_EL3.
749	 * ZCR is only restored if SVE is supported and enabled.
750	 * Synchronization is required before zcr_el3 is addressed.
751	 * ----------------------------------------------------------
752	 */
753	ldp	x19, x20, [sp, #CTX_EL3STATE_OFFSET + CTX_CPTR_EL3]
754	msr	cptr_el3, x19
755
756	ands	x19, x19, #CPTR_EZ_BIT
757	beq	sve_not_enabled
758
759	isb
760	msr	S3_6_C1_C2_0, x20 /* zcr_el3 */
761sve_not_enabled:
762#endif /* IMAGE_BL31 */
763
764#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
765	/* ----------------------------------------------------------
766	 * Restore mitigation state as it was on entry to EL3
767	 * ----------------------------------------------------------
768	 */
769	ldr	x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
770	cbz	x17, 1f
771	blr	x17
7721:
773#endif /* IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 */
774
775/*
776 * This is a hot path, so we don't want to do some actual FEAT_RAS runtime
777 * detection here. The "esb" is a cheaper variant, so using "dsb" in the
778 * ENABLE_FEAT_RAS==2 case is not ideal, but won't hurt.
779 */
780#if IMAGE_BL31 && ENABLE_FEAT_RAS == 1
781	/* ----------------------------------------------------------
782	 * Issue Error Synchronization Barrier to synchronize SErrors
783	 * before exiting EL3. We're running with EAs unmasked, so
784	 * any synchronized errors would be taken immediately;
785	 * therefore no need to inspect DISR_EL1 register.
786 	 * ----------------------------------------------------------
787	 */
788	esb
789#else
790	dsb	sy
791#endif /* IMAGE_BL31 && ENABLE_FEAT_RAS */
792
793	/* ----------------------------------------------------------
794	 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
795	 * ----------------------------------------------------------
796	 */
797	ldr	x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
798	ldp	x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
799	msr	scr_el3, x18
800	msr	spsr_el3, x16
801	msr	elr_el3, x17
802
803	restore_ptw_el1_sys_regs
804
805	/* ----------------------------------------------------------
806	 * Restore general purpose (including x30), PMCR_EL0 and
807	 * ARMv8.3-PAuth registers.
808	 * Exit EL3 via ERET to a lower exception level.
809 	 * ----------------------------------------------------------
810 	 */
811	bl	restore_gp_pmcr_pauth_regs
812	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
813
814#ifdef IMAGE_BL31
815	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
816#endif /* IMAGE_BL31 */
817
818	exception_return
819
820endfunc el3_exit
821