xref: /rk3399_ARM-atf/lib/el3_runtime/aarch64/context.S (revision 672eb21e26a41657b8146372d4283e794b430c5f)
1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <context.h>
11#include <el3_common_macros.S>
12
13#if CTX_INCLUDE_EL2_REGS
14	.global	el2_sysregs_context_save_common
15	.global	el2_sysregs_context_restore_common
16#if CTX_INCLUDE_MTE_REGS
17	.global	el2_sysregs_context_save_mte
18	.global	el2_sysregs_context_restore_mte
19#endif /* CTX_INCLUDE_MTE_REGS */
20#if ENABLE_FEAT_ECV
21	.global	el2_sysregs_context_save_ecv
22	.global	el2_sysregs_context_restore_ecv
23#endif /* ENABLE_FEAT_ECV */
24#if RAS_EXTENSION
25	.global	el2_sysregs_context_save_ras
26	.global	el2_sysregs_context_restore_ras
27#endif /* RAS_EXTENSION */
28#if CTX_INCLUDE_NEVE_REGS
29	.global	el2_sysregs_context_save_nv2
30	.global	el2_sysregs_context_restore_nv2
31#endif /* CTX_INCLUDE_NEVE_REGS */
32#if ENABLE_FEAT_CSV2_2
33	.global	el2_sysregs_context_save_csv2
34	.global	el2_sysregs_context_restore_csv2
35#endif /* ENABLE_FEAT_CSV2_2 */
36#endif /* CTX_INCLUDE_EL2_REGS */
37
38	.global	el1_sysregs_context_save
39	.global	el1_sysregs_context_restore
40#if CTX_INCLUDE_FPREGS
41	.global	fpregs_context_save
42	.global	fpregs_context_restore
43#endif /* CTX_INCLUDE_FPREGS */
44	.global	prepare_el3_entry
45	.global	restore_gp_pmcr_pauth_regs
46	.global save_and_update_ptw_el1_sys_regs
47	.global	el3_exit
48
49#if CTX_INCLUDE_EL2_REGS
50
51/* -----------------------------------------------------
52 * The following functions strictly follow the AArch64
53 * PCS to use x9-x16 (temporary caller-saved registers)
54 * to save/restore EL2 system register context.
55 * el2_sysregs_context_save/restore_common functions
56 * save and restore registers that are common to all
57 * configurations. The rest of the functions save and
58 * restore EL2 system registers that are present when a
59 * particular feature is enabled. All functions assume
60 * that 'x0' is pointing to a 'el2_sys_regs' structure
61 * where the register context will be saved/restored.
62 *
63 * The following registers are not added.
64 * AMEVCNTVOFF0<n>_EL2
65 * AMEVCNTVOFF1<n>_EL2
66 * ICH_AP0R<n>_EL2
67 * ICH_AP1R<n>_EL2
68 * ICH_LR<n>_EL2
69 * -----------------------------------------------------
70 */
71func el2_sysregs_context_save_common
72	mrs	x9, actlr_el2
73	mrs	x10, afsr0_el2
74	stp	x9, x10, [x0, #CTX_ACTLR_EL2]
75
76	mrs	x11, afsr1_el2
77	mrs	x12, amair_el2
78	stp	x11, x12, [x0, #CTX_AFSR1_EL2]
79
80	mrs	x13, cnthctl_el2
81	mrs	x14, cntvoff_el2
82	stp	x13, x14, [x0, #CTX_CNTHCTL_EL2]
83
84	mrs	x15, cptr_el2
85	str	x15, [x0, #CTX_CPTR_EL2]
86
87#if CTX_INCLUDE_AARCH32_REGS
88	mrs	x16, dbgvcr32_el2
89	str	x16, [x0, #CTX_DBGVCR32_EL2]
90#endif /* CTX_INCLUDE_AARCH32_REGS */
91
92	mrs	x9, elr_el2
93	mrs	x10, esr_el2
94	stp	x9, x10, [x0, #CTX_ELR_EL2]
95
96	mrs	x11, far_el2
97	mrs	x12, hacr_el2
98	stp	x11, x12, [x0, #CTX_FAR_EL2]
99
100	mrs	x13, hcr_el2
101	mrs	x14, hpfar_el2
102	stp	x13, x14, [x0, #CTX_HCR_EL2]
103
104	mrs	x15, hstr_el2
105	mrs	x16, ICC_SRE_EL2
106	stp	x15, x16, [x0, #CTX_HSTR_EL2]
107
108	mrs	x9, ICH_HCR_EL2
109	mrs	x10, ICH_VMCR_EL2
110	stp	x9, x10, [x0, #CTX_ICH_HCR_EL2]
111
112	mrs	x11, mair_el2
113	mrs	x12, mdcr_el2
114	stp	x11, x12, [x0, #CTX_MAIR_EL2]
115
116	mrs	x14, sctlr_el2
117	str	x14, [x0, #CTX_SCTLR_EL2]
118
119	mrs	x15, spsr_el2
120	mrs	x16, sp_el2
121	stp	x15, x16, [x0, #CTX_SPSR_EL2]
122
123	mrs	x9, tcr_el2
124	mrs	x10, tpidr_el2
125	stp	x9, x10, [x0, #CTX_TCR_EL2]
126
127	mrs	x11, ttbr0_el2
128	mrs	x12, vbar_el2
129	stp	x11, x12, [x0, #CTX_TTBR0_EL2]
130
131	mrs	x13, vmpidr_el2
132	mrs	x14, vpidr_el2
133	stp	x13, x14, [x0, #CTX_VMPIDR_EL2]
134
135	mrs	x15, vtcr_el2
136	mrs	x16, vttbr_el2
137	stp	x15, x16, [x0, #CTX_VTCR_EL2]
138	ret
139endfunc el2_sysregs_context_save_common
140
141func el2_sysregs_context_restore_common
142	ldp	x9, x10, [x0, #CTX_ACTLR_EL2]
143	msr	actlr_el2, x9
144	msr	afsr0_el2, x10
145
146	ldp	x11, x12, [x0, #CTX_AFSR1_EL2]
147	msr	afsr1_el2, x11
148	msr	amair_el2, x12
149
150	ldp	x13, x14, [x0, #CTX_CNTHCTL_EL2]
151	msr	cnthctl_el2, x13
152	msr	cntvoff_el2, x14
153
154	ldr	x15, [x0, #CTX_CPTR_EL2]
155	msr	cptr_el2, x15
156
157#if CTX_INCLUDE_AARCH32_REGS
158	ldr	x16, [x0, #CTX_DBGVCR32_EL2]
159	msr	dbgvcr32_el2, x16
160#endif /* CTX_INCLUDE_AARCH32_REGS */
161
162	ldp	x9, x10, [x0, #CTX_ELR_EL2]
163	msr	elr_el2, x9
164	msr	esr_el2, x10
165
166	ldp	x11, x12, [x0, #CTX_FAR_EL2]
167	msr	far_el2, x11
168	msr	hacr_el2, x12
169
170	ldp	x13, x14, [x0, #CTX_HCR_EL2]
171	msr	hcr_el2, x13
172	msr	hpfar_el2, x14
173
174	ldp	x15, x16, [x0, #CTX_HSTR_EL2]
175	msr	hstr_el2, x15
176	msr	ICC_SRE_EL2, x16
177
178	ldp	x9, x10, [x0, #CTX_ICH_HCR_EL2]
179	msr	ICH_HCR_EL2, x9
180	msr	ICH_VMCR_EL2, x10
181
182	ldp	x11, x12, [x0, #CTX_MAIR_EL2]
183	msr	mair_el2, x11
184	msr	mdcr_el2, x12
185
186	ldr	x14, [x0, #CTX_SCTLR_EL2]
187	msr	sctlr_el2, x14
188
189	ldp	x15, x16, [x0, #CTX_SPSR_EL2]
190	msr	spsr_el2, x15
191	msr	sp_el2, x16
192
193	ldp	x9, x10, [x0, #CTX_TCR_EL2]
194	msr	tcr_el2, x9
195	msr	tpidr_el2, x10
196
197	ldp	x11, x12, [x0, #CTX_TTBR0_EL2]
198	msr	ttbr0_el2, x11
199	msr	vbar_el2, x12
200
201	ldp	x13, x14, [x0, #CTX_VMPIDR_EL2]
202	msr	vmpidr_el2, x13
203	msr	vpidr_el2, x14
204
205	ldp	x15, x16, [x0, #CTX_VTCR_EL2]
206	msr	vtcr_el2, x15
207	msr	vttbr_el2, x16
208	ret
209endfunc el2_sysregs_context_restore_common
210
211#if CTX_INCLUDE_MTE_REGS
212func el2_sysregs_context_save_mte
213	mrs	x9, TFSR_EL2
214	str	x9, [x0, #CTX_TFSR_EL2]
215	ret
216endfunc el2_sysregs_context_save_mte
217
218func el2_sysregs_context_restore_mte
219	ldr	x9, [x0, #CTX_TFSR_EL2]
220	msr	TFSR_EL2, x9
221	ret
222endfunc el2_sysregs_context_restore_mte
223#endif /* CTX_INCLUDE_MTE_REGS */
224
225#if ENABLE_FEAT_ECV
226func el2_sysregs_context_save_ecv
227	mrs	x11, CNTPOFF_EL2
228	str	x11, [x0, #CTX_CNTPOFF_EL2]
229	ret
230endfunc el2_sysregs_context_save_ecv
231
232func el2_sysregs_context_restore_ecv
233	ldr	x11, [x0, #CTX_CNTPOFF_EL2]
234	msr	CNTPOFF_EL2, x11
235	ret
236endfunc el2_sysregs_context_restore_ecv
237#endif /* ENABLE_FEAT_ECV */
238
239#if RAS_EXTENSION
240func el2_sysregs_context_save_ras
241	/*
242	 * VDISR_EL2 and VSESR_EL2 registers are saved only when
243	 * FEAT_RAS is supported.
244	 */
245	mrs	x11, vdisr_el2
246	mrs	x12, vsesr_el2
247	stp	x11, x12, [x0, #CTX_VDISR_EL2]
248	ret
249endfunc el2_sysregs_context_save_ras
250
251func el2_sysregs_context_restore_ras
252	/*
253	 * VDISR_EL2 and VSESR_EL2 registers are restored only when FEAT_RAS
254	 * is supported.
255	 */
256	ldp	x11, x12, [x0, #CTX_VDISR_EL2]
257	msr	vdisr_el2, x11
258	msr	vsesr_el2, x12
259	ret
260endfunc el2_sysregs_context_restore_ras
261#endif /* RAS_EXTENSION */
262
263#if CTX_INCLUDE_NEVE_REGS
264func el2_sysregs_context_save_nv2
265	/*
266	 * VNCR_EL2 register is saved only when FEAT_NV2 is supported.
267	 */
268	mrs	x16, vncr_el2
269	str	x16, [x0, #CTX_VNCR_EL2]
270	ret
271endfunc el2_sysregs_context_save_nv2
272
273func el2_sysregs_context_restore_nv2
274	/*
275	 * VNCR_EL2 register is restored only when FEAT_NV2 is supported.
276	 */
277	ldr	x16, [x0, #CTX_VNCR_EL2]
278	msr	vncr_el2, x16
279	ret
280endfunc el2_sysregs_context_restore_nv2
281#endif /* CTX_INCLUDE_NEVE_REGS */
282
283#if ENABLE_FEAT_CSV2_2
284func el2_sysregs_context_save_csv2
285	/*
286	 * SCXTNUM_EL2 register is saved only when FEAT_CSV2_2 is supported.
287	 */
288	mrs	x13, scxtnum_el2
289	str	x13, [x0, #CTX_SCXTNUM_EL2]
290	ret
291endfunc el2_sysregs_context_save_csv2
292
293func el2_sysregs_context_restore_csv2
294	/*
295	 * SCXTNUM_EL2 register is restored only when FEAT_CSV2_2 is supported.
296	 */
297	ldr	x13, [x0, #CTX_SCXTNUM_EL2]
298	msr	scxtnum_el2, x13
299	ret
300endfunc el2_sysregs_context_restore_csv2
301#endif /* ENABLE_FEAT_CSV2_2 */
302
303#endif /* CTX_INCLUDE_EL2_REGS */
304
305/* ------------------------------------------------------------------
306 * The following function strictly follows the AArch64 PCS to use
307 * x9-x17 (temporary caller-saved registers) to save EL1 system
308 * register context. It assumes that 'x0' is pointing to a
309 * 'el1_sys_regs' structure where the register context will be saved.
310 * ------------------------------------------------------------------
311 */
312func el1_sysregs_context_save
313
314	mrs	x9, spsr_el1
315	mrs	x10, elr_el1
316	stp	x9, x10, [x0, #CTX_SPSR_EL1]
317
318#if !ERRATA_SPECULATIVE_AT
319	mrs	x15, sctlr_el1
320	mrs	x16, tcr_el1
321	stp	x15, x16, [x0, #CTX_SCTLR_EL1]
322#endif /* ERRATA_SPECULATIVE_AT */
323
324	mrs	x17, cpacr_el1
325	mrs	x9, csselr_el1
326	stp	x17, x9, [x0, #CTX_CPACR_EL1]
327
328	mrs	x10, sp_el1
329	mrs	x11, esr_el1
330	stp	x10, x11, [x0, #CTX_SP_EL1]
331
332	mrs	x12, ttbr0_el1
333	mrs	x13, ttbr1_el1
334	stp	x12, x13, [x0, #CTX_TTBR0_EL1]
335
336	mrs	x14, mair_el1
337	mrs	x15, amair_el1
338	stp	x14, x15, [x0, #CTX_MAIR_EL1]
339
340	mrs	x16, actlr_el1
341	mrs	x17, tpidr_el1
342	stp	x16, x17, [x0, #CTX_ACTLR_EL1]
343
344	mrs	x9, tpidr_el0
345	mrs	x10, tpidrro_el0
346	stp	x9, x10, [x0, #CTX_TPIDR_EL0]
347
348	mrs	x13, par_el1
349	mrs	x14, far_el1
350	stp	x13, x14, [x0, #CTX_PAR_EL1]
351
352	mrs	x15, afsr0_el1
353	mrs	x16, afsr1_el1
354	stp	x15, x16, [x0, #CTX_AFSR0_EL1]
355
356	mrs	x17, contextidr_el1
357	mrs	x9, vbar_el1
358	stp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
359
360	/* Save AArch32 system registers if the build has instructed so */
361#if CTX_INCLUDE_AARCH32_REGS
362	mrs	x11, spsr_abt
363	mrs	x12, spsr_und
364	stp	x11, x12, [x0, #CTX_SPSR_ABT]
365
366	mrs	x13, spsr_irq
367	mrs	x14, spsr_fiq
368	stp	x13, x14, [x0, #CTX_SPSR_IRQ]
369
370	mrs	x15, dacr32_el2
371	mrs	x16, ifsr32_el2
372	stp	x15, x16, [x0, #CTX_DACR32_EL2]
373#endif /* CTX_INCLUDE_AARCH32_REGS */
374
375	/* Save NS timer registers if the build has instructed so */
376#if NS_TIMER_SWITCH
377	mrs	x10, cntp_ctl_el0
378	mrs	x11, cntp_cval_el0
379	stp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
380
381	mrs	x12, cntv_ctl_el0
382	mrs	x13, cntv_cval_el0
383	stp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
384
385	mrs	x14, cntkctl_el1
386	str	x14, [x0, #CTX_CNTKCTL_EL1]
387#endif /* NS_TIMER_SWITCH */
388
389	/* Save MTE system registers if the build has instructed so */
390#if CTX_INCLUDE_MTE_REGS
391	mrs	x15, TFSRE0_EL1
392	mrs	x16, TFSR_EL1
393	stp	x15, x16, [x0, #CTX_TFSRE0_EL1]
394
395	mrs	x9, RGSR_EL1
396	mrs	x10, GCR_EL1
397	stp	x9, x10, [x0, #CTX_RGSR_EL1]
398#endif /* CTX_INCLUDE_MTE_REGS */
399
400	ret
401endfunc el1_sysregs_context_save
402
403/* ------------------------------------------------------------------
404 * The following function strictly follows the AArch64 PCS to use
405 * x9-x17 (temporary caller-saved registers) to restore EL1 system
406 * register context.  It assumes that 'x0' is pointing to a
407 * 'el1_sys_regs' structure from where the register context will be
408 * restored
409 * ------------------------------------------------------------------
410 */
411func el1_sysregs_context_restore
412
413	ldp	x9, x10, [x0, #CTX_SPSR_EL1]
414	msr	spsr_el1, x9
415	msr	elr_el1, x10
416
417#if !ERRATA_SPECULATIVE_AT
418	ldp	x15, x16, [x0, #CTX_SCTLR_EL1]
419	msr	sctlr_el1, x15
420	msr	tcr_el1, x16
421#endif /* ERRATA_SPECULATIVE_AT */
422
423	ldp	x17, x9, [x0, #CTX_CPACR_EL1]
424	msr	cpacr_el1, x17
425	msr	csselr_el1, x9
426
427	ldp	x10, x11, [x0, #CTX_SP_EL1]
428	msr	sp_el1, x10
429	msr	esr_el1, x11
430
431	ldp	x12, x13, [x0, #CTX_TTBR0_EL1]
432	msr	ttbr0_el1, x12
433	msr	ttbr1_el1, x13
434
435	ldp	x14, x15, [x0, #CTX_MAIR_EL1]
436	msr	mair_el1, x14
437	msr	amair_el1, x15
438
439	ldp 	x16, x17, [x0, #CTX_ACTLR_EL1]
440	msr	actlr_el1, x16
441	msr	tpidr_el1, x17
442
443	ldp	x9, x10, [x0, #CTX_TPIDR_EL0]
444	msr	tpidr_el0, x9
445	msr	tpidrro_el0, x10
446
447	ldp	x13, x14, [x0, #CTX_PAR_EL1]
448	msr	par_el1, x13
449	msr	far_el1, x14
450
451	ldp	x15, x16, [x0, #CTX_AFSR0_EL1]
452	msr	afsr0_el1, x15
453	msr	afsr1_el1, x16
454
455	ldp	x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
456	msr	contextidr_el1, x17
457	msr	vbar_el1, x9
458
459	/* Restore AArch32 system registers if the build has instructed so */
460#if CTX_INCLUDE_AARCH32_REGS
461	ldp	x11, x12, [x0, #CTX_SPSR_ABT]
462	msr	spsr_abt, x11
463	msr	spsr_und, x12
464
465	ldp	x13, x14, [x0, #CTX_SPSR_IRQ]
466	msr	spsr_irq, x13
467	msr	spsr_fiq, x14
468
469	ldp	x15, x16, [x0, #CTX_DACR32_EL2]
470	msr	dacr32_el2, x15
471	msr	ifsr32_el2, x16
472#endif /* CTX_INCLUDE_AARCH32_REGS */
473
474	/* Restore NS timer registers if the build has instructed so */
475#if NS_TIMER_SWITCH
476	ldp	x10, x11, [x0, #CTX_CNTP_CTL_EL0]
477	msr	cntp_ctl_el0, x10
478	msr	cntp_cval_el0, x11
479
480	ldp	x12, x13, [x0, #CTX_CNTV_CTL_EL0]
481	msr	cntv_ctl_el0, x12
482	msr	cntv_cval_el0, x13
483
484	ldr	x14, [x0, #CTX_CNTKCTL_EL1]
485	msr	cntkctl_el1, x14
486#endif /* NS_TIMER_SWITCH */
487
488	/* Restore MTE system registers if the build has instructed so */
489#if CTX_INCLUDE_MTE_REGS
490	ldp	x11, x12, [x0, #CTX_TFSRE0_EL1]
491	msr	TFSRE0_EL1, x11
492	msr	TFSR_EL1, x12
493
494	ldp	x13, x14, [x0, #CTX_RGSR_EL1]
495	msr	RGSR_EL1, x13
496	msr	GCR_EL1, x14
497#endif /* CTX_INCLUDE_MTE_REGS */
498
499	/* No explict ISB required here as ERET covers it */
500	ret
501endfunc el1_sysregs_context_restore
502
503/* ------------------------------------------------------------------
504 * The following function follows the aapcs_64 strictly to use
505 * x9-x17 (temporary caller-saved registers according to AArch64 PCS)
506 * to save floating point register context. It assumes that 'x0' is
507 * pointing to a 'fp_regs' structure where the register context will
508 * be saved.
509 *
510 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
511 * However currently we don't use VFP registers nor set traps in
512 * Trusted Firmware, and assume it's cleared.
513 *
514 * TODO: Revisit when VFP is used in secure world
515 * ------------------------------------------------------------------
516 */
517#if CTX_INCLUDE_FPREGS
518func fpregs_context_save
519	stp	q0, q1, [x0, #CTX_FP_Q0]
520	stp	q2, q3, [x0, #CTX_FP_Q2]
521	stp	q4, q5, [x0, #CTX_FP_Q4]
522	stp	q6, q7, [x0, #CTX_FP_Q6]
523	stp	q8, q9, [x0, #CTX_FP_Q8]
524	stp	q10, q11, [x0, #CTX_FP_Q10]
525	stp	q12, q13, [x0, #CTX_FP_Q12]
526	stp	q14, q15, [x0, #CTX_FP_Q14]
527	stp	q16, q17, [x0, #CTX_FP_Q16]
528	stp	q18, q19, [x0, #CTX_FP_Q18]
529	stp	q20, q21, [x0, #CTX_FP_Q20]
530	stp	q22, q23, [x0, #CTX_FP_Q22]
531	stp	q24, q25, [x0, #CTX_FP_Q24]
532	stp	q26, q27, [x0, #CTX_FP_Q26]
533	stp	q28, q29, [x0, #CTX_FP_Q28]
534	stp	q30, q31, [x0, #CTX_FP_Q30]
535
536	mrs	x9, fpsr
537	str	x9, [x0, #CTX_FP_FPSR]
538
539	mrs	x10, fpcr
540	str	x10, [x0, #CTX_FP_FPCR]
541
542#if CTX_INCLUDE_AARCH32_REGS
543	mrs	x11, fpexc32_el2
544	str	x11, [x0, #CTX_FP_FPEXC32_EL2]
545#endif /* CTX_INCLUDE_AARCH32_REGS */
546	ret
547endfunc fpregs_context_save
548
549/* ------------------------------------------------------------------
550 * The following function follows the aapcs_64 strictly to use x9-x17
551 * (temporary caller-saved registers according to AArch64 PCS) to
552 * restore floating point register context. It assumes that 'x0' is
553 * pointing to a 'fp_regs' structure from where the register context
554 * will be restored.
555 *
556 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
557 * However currently we don't use VFP registers nor set traps in
558 * Trusted Firmware, and assume it's cleared.
559 *
560 * TODO: Revisit when VFP is used in secure world
561 * ------------------------------------------------------------------
562 */
563func fpregs_context_restore
564	ldp	q0, q1, [x0, #CTX_FP_Q0]
565	ldp	q2, q3, [x0, #CTX_FP_Q2]
566	ldp	q4, q5, [x0, #CTX_FP_Q4]
567	ldp	q6, q7, [x0, #CTX_FP_Q6]
568	ldp	q8, q9, [x0, #CTX_FP_Q8]
569	ldp	q10, q11, [x0, #CTX_FP_Q10]
570	ldp	q12, q13, [x0, #CTX_FP_Q12]
571	ldp	q14, q15, [x0, #CTX_FP_Q14]
572	ldp	q16, q17, [x0, #CTX_FP_Q16]
573	ldp	q18, q19, [x0, #CTX_FP_Q18]
574	ldp	q20, q21, [x0, #CTX_FP_Q20]
575	ldp	q22, q23, [x0, #CTX_FP_Q22]
576	ldp	q24, q25, [x0, #CTX_FP_Q24]
577	ldp	q26, q27, [x0, #CTX_FP_Q26]
578	ldp	q28, q29, [x0, #CTX_FP_Q28]
579	ldp	q30, q31, [x0, #CTX_FP_Q30]
580
581	ldr	x9, [x0, #CTX_FP_FPSR]
582	msr	fpsr, x9
583
584	ldr	x10, [x0, #CTX_FP_FPCR]
585	msr	fpcr, x10
586
587#if CTX_INCLUDE_AARCH32_REGS
588	ldr	x11, [x0, #CTX_FP_FPEXC32_EL2]
589	msr	fpexc32_el2, x11
590#endif /* CTX_INCLUDE_AARCH32_REGS */
591
592	/*
593	 * No explict ISB required here as ERET to
594	 * switch to secure EL1 or non-secure world
595	 * covers it
596	 */
597
598	ret
599endfunc fpregs_context_restore
600#endif /* CTX_INCLUDE_FPREGS */
601
602	/*
603	 * Set SCR_EL3.EA bit to enable SErrors at EL3
604	 */
605	.macro enable_serror_at_el3
606	mrs     x8, scr_el3
607	orr     x8, x8, #SCR_EA_BIT
608	msr     scr_el3, x8
609	.endm
610
611	/*
612	 * Set the PSTATE bits not set when the exception was taken as
613	 * described in the AArch64.TakeException() pseudocode function
614	 * in ARM DDI 0487F.c page J1-7635 to a default value.
615	 */
616	.macro set_unset_pstate_bits
617	/*
618	 * If Data Independent Timing (DIT) functionality is implemented,
619	 * always enable DIT in EL3
620	 */
621#if ENABLE_FEAT_DIT
622	mov     x8, #DIT_BIT
623	msr     DIT, x8
624#endif /* ENABLE_FEAT_DIT */
625	.endm /* set_unset_pstate_bits */
626
627/* ------------------------------------------------------------------
628 * The following macro is used to save and restore all the general
629 * purpose and ARMv8.3-PAuth (if enabled) registers.
630 * It also checks if the Secure Cycle Counter (PMCCNTR_EL0)
631 * is disabled in EL3/Secure (ARMv8.5-PMU), wherein PMCCNTR_EL0
632 * needs not to be saved/restored during world switch.
633 *
634 * Ideally we would only save and restore the callee saved registers
635 * when a world switch occurs but that type of implementation is more
636 * complex. So currently we will always save and restore these
637 * registers on entry and exit of EL3.
638 * clobbers: x18
639 * ------------------------------------------------------------------
640 */
641	.macro save_gp_pmcr_pauth_regs
642	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
643	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
644	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
645	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
646	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
647	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
648	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
649	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
650	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
651	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
652	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
653	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
654	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
655	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
656	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
657	mrs	x18, sp_el0
658	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
659
660	/* ----------------------------------------------------------
661	 * Check if earlier initialization of MDCR_EL3.SCCD/MCCD to 1
662	 * has failed.
663	 *
664	 * MDCR_EL3:
665	 * MCCD bit set, Prohibits the Cycle Counter PMCCNTR_EL0 from
666	 * counting at EL3.
667	 * SCCD bit set, Secure Cycle Counter Disable. Prohibits PMCCNTR_EL0
668	 * from counting in Secure state.
669	 * If these bits are not set, meaning that FEAT_PMUv3p5/7 is
670	 * not implemented and PMCR_EL0 should be saved in non-secure
671	 * context.
672	 * ----------------------------------------------------------
673	 */
674	mov_imm	x10, (MDCR_SCCD_BIT | MDCR_MCCD_BIT)
675	mrs	x9, mdcr_el3
676	tst	x9, x10
677	bne	1f
678
679	/* ----------------------------------------------------------
680	 * If control reaches here, it ensures the Secure Cycle
681	 * Counter (PMCCNTR_EL0) is not prohibited from counting at
682	 * EL3 and in secure states.
683	 * Henceforth, PMCR_EL0 to be saved before world switch.
684	 * ----------------------------------------------------------
685	 */
686	mrs	x9, pmcr_el0
687
688	/* Check caller's security state */
689	mrs	x10, scr_el3
690	tst	x10, #SCR_NS_BIT
691	beq	2f
692
693	/* Save PMCR_EL0 if called from Non-secure state */
694	str	x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
695
696	/* Disable cycle counter when event counting is prohibited */
6972:	orr	x9, x9, #PMCR_EL0_DP_BIT
698	msr	pmcr_el0, x9
699	isb
7001:
701#if CTX_INCLUDE_PAUTH_REGS
702	/* ----------------------------------------------------------
703 	 * Save the ARMv8.3-PAuth keys as they are not banked
704 	 * by exception level
705	 * ----------------------------------------------------------
706	 */
707	add	x19, sp, #CTX_PAUTH_REGS_OFFSET
708
709	mrs	x20, APIAKeyLo_EL1	/* x21:x20 = APIAKey */
710	mrs	x21, APIAKeyHi_EL1
711	mrs	x22, APIBKeyLo_EL1	/* x23:x22 = APIBKey */
712	mrs	x23, APIBKeyHi_EL1
713	mrs	x24, APDAKeyLo_EL1	/* x25:x24 = APDAKey */
714	mrs	x25, APDAKeyHi_EL1
715	mrs	x26, APDBKeyLo_EL1	/* x27:x26 = APDBKey */
716	mrs	x27, APDBKeyHi_EL1
717	mrs	x28, APGAKeyLo_EL1	/* x29:x28 = APGAKey */
718	mrs	x29, APGAKeyHi_EL1
719
720	stp	x20, x21, [x19, #CTX_PACIAKEY_LO]
721	stp	x22, x23, [x19, #CTX_PACIBKEY_LO]
722	stp	x24, x25, [x19, #CTX_PACDAKEY_LO]
723	stp	x26, x27, [x19, #CTX_PACDBKEY_LO]
724	stp	x28, x29, [x19, #CTX_PACGAKEY_LO]
725#endif /* CTX_INCLUDE_PAUTH_REGS */
726	.endm /* save_gp_pmcr_pauth_regs */
727
728/* -----------------------------------------------------------------
729 * This function saves the context and sets the PSTATE to a known
730 * state, preparing entry to el3.
731 * Save all the general purpose and ARMv8.3-PAuth (if enabled)
732 * registers.
733 * Then set any of the PSTATE bits that are not set by hardware
734 * according to the Aarch64.TakeException pseudocode in the Arm
735 * Architecture Reference Manual to a default value for EL3.
736 * clobbers: x17
737 * -----------------------------------------------------------------
738 */
739func prepare_el3_entry
740	save_gp_pmcr_pauth_regs
741	enable_serror_at_el3
742	/*
743	 * Set the PSTATE bits not described in the Aarch64.TakeException
744	 * pseudocode to their default values.
745	 */
746	set_unset_pstate_bits
747	ret
748endfunc prepare_el3_entry
749
750/* ------------------------------------------------------------------
751 * This function restores ARMv8.3-PAuth (if enabled) and all general
752 * purpose registers except x30 from the CPU context.
753 * x30 register must be explicitly restored by the caller.
754 * ------------------------------------------------------------------
755 */
756func restore_gp_pmcr_pauth_regs
757#if CTX_INCLUDE_PAUTH_REGS
758 	/* Restore the ARMv8.3 PAuth keys */
759	add	x10, sp, #CTX_PAUTH_REGS_OFFSET
760
761	ldp	x0, x1, [x10, #CTX_PACIAKEY_LO]	/* x1:x0 = APIAKey */
762	ldp	x2, x3, [x10, #CTX_PACIBKEY_LO]	/* x3:x2 = APIBKey */
763	ldp	x4, x5, [x10, #CTX_PACDAKEY_LO]	/* x5:x4 = APDAKey */
764	ldp	x6, x7, [x10, #CTX_PACDBKEY_LO]	/* x7:x6 = APDBKey */
765	ldp	x8, x9, [x10, #CTX_PACGAKEY_LO]	/* x9:x8 = APGAKey */
766
767	msr	APIAKeyLo_EL1, x0
768	msr	APIAKeyHi_EL1, x1
769	msr	APIBKeyLo_EL1, x2
770	msr	APIBKeyHi_EL1, x3
771	msr	APDAKeyLo_EL1, x4
772	msr	APDAKeyHi_EL1, x5
773	msr	APDBKeyLo_EL1, x6
774	msr	APDBKeyHi_EL1, x7
775	msr	APGAKeyLo_EL1, x8
776	msr	APGAKeyHi_EL1, x9
777#endif /* CTX_INCLUDE_PAUTH_REGS */
778
779	/* ----------------------------------------------------------
780	 * Restore PMCR_EL0 when returning to Non-secure state if
781	 * Secure Cycle Counter is not disabled in MDCR_EL3 when
782	 * ARMv8.5-PMU is implemented.
783	 * ----------------------------------------------------------
784	 */
785	mrs	x0, scr_el3
786	tst	x0, #SCR_NS_BIT
787	beq	2f
788
789	/* ----------------------------------------------------------
790	 * Back to Non-secure state.
791	 * Check if earlier initialization MDCR_EL3.SCCD/MCCD to 1
792	 * failed, meaning that FEAT_PMUv3p5/7 is not implemented and
793	 * PMCR_EL0 should be restored from non-secure context.
794	 * ----------------------------------------------------------
795	 */
796	mov_imm	x1, (MDCR_SCCD_BIT | MDCR_MCCD_BIT)
797	mrs	x0, mdcr_el3
798	tst	x0, x1
799	bne	2f
800	ldr	x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
801	msr	pmcr_el0, x0
8022:
803	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
804	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
805	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
806	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
807	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
808	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
809	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
810	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
811	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
812	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
813	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
814	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
815	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
816	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
817	ldr	x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
818	msr	sp_el0, x28
819	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
820	ret
821endfunc restore_gp_pmcr_pauth_regs
822
823/*
824 * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1
825 * registers and update EL1 registers to disable stage1 and stage2
826 * page table walk
827 */
828func save_and_update_ptw_el1_sys_regs
829	/* ----------------------------------------------------------
830	 * Save only sctlr_el1 and tcr_el1 registers
831	 * ----------------------------------------------------------
832	 */
833	mrs	x29, sctlr_el1
834	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1)]
835	mrs	x29, tcr_el1
836	str	x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_TCR_EL1)]
837
838	/* ------------------------------------------------------------
839	 * Must follow below order in order to disable page table
840	 * walk for lower ELs (EL1 and EL0). First step ensures that
841	 * page table walk is disabled for stage1 and second step
842	 * ensures that page table walker should use TCR_EL1.EPDx
843	 * bits to perform address translation. ISB ensures that CPU
844	 * does these 2 steps in order.
845	 *
846	 * 1. Update TCR_EL1.EPDx bits to disable page table walk by
847	 *    stage1.
848	 * 2. Enable MMU bit to avoid identity mapping via stage2
849	 *    and force TCR_EL1.EPDx to be used by the page table
850	 *    walker.
851	 * ------------------------------------------------------------
852	 */
853	orr	x29, x29, #(TCR_EPD0_BIT)
854	orr	x29, x29, #(TCR_EPD1_BIT)
855	msr	tcr_el1, x29
856	isb
857	mrs	x29, sctlr_el1
858	orr	x29, x29, #SCTLR_M_BIT
859	msr	sctlr_el1, x29
860	isb
861
862	ret
863endfunc save_and_update_ptw_el1_sys_regs
864
865/* ------------------------------------------------------------------
866 * This routine assumes that the SP_EL3 is pointing to a valid
867 * context structure from where the gp regs and other special
868 * registers can be retrieved.
869 * ------------------------------------------------------------------
870 */
871func el3_exit
872#if ENABLE_ASSERTIONS
873	/* el3_exit assumes SP_EL0 on entry */
874	mrs	x17, spsel
875	cmp	x17, #MODE_SP_EL0
876	ASM_ASSERT(eq)
877#endif /* ENABLE_ASSERTIONS */
878
879	/* ----------------------------------------------------------
880	 * Save the current SP_EL0 i.e. the EL3 runtime stack which
881	 * will be used for handling the next SMC.
882	 * Then switch to SP_EL3.
883	 * ----------------------------------------------------------
884	 */
885	mov	x17, sp
886	msr	spsel, #MODE_SP_ELX
887	str	x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
888
889#if IMAGE_BL31
890	/* ----------------------------------------------------------
891	 * Restore CPTR_EL3.
892	 * ZCR is only restored if SVE is supported and enabled.
893	 * Synchronization is required before zcr_el3 is addressed.
894	 * ----------------------------------------------------------
895	 */
896	ldp	x19, x20, [sp, #CTX_EL3STATE_OFFSET + CTX_CPTR_EL3]
897	msr	cptr_el3, x19
898
899	ands	x19, x19, #CPTR_EZ_BIT
900	beq	sve_not_enabled
901
902	isb
903	msr	S3_6_C1_C2_0, x20 /* zcr_el3 */
904sve_not_enabled:
905#endif /* IMAGE_BL31 */
906
907#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
908	/* ----------------------------------------------------------
909	 * Restore mitigation state as it was on entry to EL3
910	 * ----------------------------------------------------------
911	 */
912	ldr	x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
913	cbz	x17, 1f
914	blr	x17
9151:
916#endif /* IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 */
917
918#if IMAGE_BL31 && RAS_EXTENSION
919	/* ----------------------------------------------------------
920	 * Issue Error Synchronization Barrier to synchronize SErrors
921	 * before exiting EL3. We're running with EAs unmasked, so
922	 * any synchronized errors would be taken immediately;
923	 * therefore no need to inspect DISR_EL1 register.
924 	 * ----------------------------------------------------------
925	 */
926	esb
927#else
928	dsb	sy
929#endif /* IMAGE_BL31 && RAS_EXTENSION */
930
931	/* ----------------------------------------------------------
932	 * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
933	 * ----------------------------------------------------------
934	 */
935	ldr	x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
936	ldp	x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
937	msr	scr_el3, x18
938	msr	spsr_el3, x16
939	msr	elr_el3, x17
940
941	restore_ptw_el1_sys_regs
942
943	/* ----------------------------------------------------------
944	 * Restore general purpose (including x30), PMCR_EL0 and
945	 * ARMv8.3-PAuth registers.
946	 * Exit EL3 via ERET to a lower exception level.
947 	 * ----------------------------------------------------------
948 	 */
949	bl	restore_gp_pmcr_pauth_regs
950	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
951
952#ifdef IMAGE_BL31
953	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
954#endif /* IMAGE_BL31 */
955
956	exception_return
957
958endfunc el3_exit
959