xref: /rk3399_ARM-atf/lib/el3_runtime/aarch64/context.S (revision 5e0be8c0241e5075b34bd5b14df2df9f048715d3)
1/*
2 * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <context.h>
11#include <el3_common_macros.S>
12
13#if CTX_INCLUDE_FPREGS
14	.global	fpregs_context_save
15	.global	fpregs_context_restore
16#endif /* CTX_INCLUDE_FPREGS */
17
18#if ERRATA_SPECULATIVE_AT
19	.global save_and_update_ptw_el1_sys_regs
20#endif /* ERRATA_SPECULATIVE_AT */
21
22	.global	prepare_el3_entry
23	.global	restore_gp_pmcr_pauth_regs
24	.global	el3_exit
25
26/* ------------------------------------------------------------------
27 * The following function follows the aapcs_64 strictly to use
28 * x9-x17 (temporary caller-saved registers according to AArch64 PCS)
29 * to save floating point register context. It assumes that 'x0' is
30 * pointing to a 'fp_regs' structure where the register context will
31 * be saved.
32 *
33 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
34 * However currently we don't use VFP registers nor set traps in
35 * Trusted Firmware, and assume it's cleared.
36 *
37 * TODO: Revisit when VFP is used in secure world
38 * ------------------------------------------------------------------
39 */
40#if CTX_INCLUDE_FPREGS
41func fpregs_context_save
42	stp	q0, q1, [x0, #CTX_FP_Q0]
43	stp	q2, q3, [x0, #CTX_FP_Q2]
44	stp	q4, q5, [x0, #CTX_FP_Q4]
45	stp	q6, q7, [x0, #CTX_FP_Q6]
46	stp	q8, q9, [x0, #CTX_FP_Q8]
47	stp	q10, q11, [x0, #CTX_FP_Q10]
48	stp	q12, q13, [x0, #CTX_FP_Q12]
49	stp	q14, q15, [x0, #CTX_FP_Q14]
50	stp	q16, q17, [x0, #CTX_FP_Q16]
51	stp	q18, q19, [x0, #CTX_FP_Q18]
52	stp	q20, q21, [x0, #CTX_FP_Q20]
53	stp	q22, q23, [x0, #CTX_FP_Q22]
54	stp	q24, q25, [x0, #CTX_FP_Q24]
55	stp	q26, q27, [x0, #CTX_FP_Q26]
56	stp	q28, q29, [x0, #CTX_FP_Q28]
57	stp	q30, q31, [x0, #CTX_FP_Q30]
58
59	mrs	x9, fpsr
60	str	x9, [x0, #CTX_FP_FPSR]
61
62	mrs	x10, fpcr
63	str	x10, [x0, #CTX_FP_FPCR]
64
65#if CTX_INCLUDE_AARCH32_REGS
66	mrs	x11, fpexc32_el2
67	str	x11, [x0, #CTX_FP_FPEXC32_EL2]
68#endif /* CTX_INCLUDE_AARCH32_REGS */
69	ret
70endfunc fpregs_context_save
71
72/* ------------------------------------------------------------------
73 * The following function follows the aapcs_64 strictly to use x9-x17
74 * (temporary caller-saved registers according to AArch64 PCS) to
75 * restore floating point register context. It assumes that 'x0' is
76 * pointing to a 'fp_regs' structure from where the register context
77 * will be restored.
78 *
79 * Access to VFP registers will trap if CPTR_EL3.TFP is set.
80 * However currently we don't use VFP registers nor set traps in
81 * Trusted Firmware, and assume it's cleared.
82 *
83 * TODO: Revisit when VFP is used in secure world
84 * ------------------------------------------------------------------
85 */
86func fpregs_context_restore
87	ldp	q0, q1, [x0, #CTX_FP_Q0]
88	ldp	q2, q3, [x0, #CTX_FP_Q2]
89	ldp	q4, q5, [x0, #CTX_FP_Q4]
90	ldp	q6, q7, [x0, #CTX_FP_Q6]
91	ldp	q8, q9, [x0, #CTX_FP_Q8]
92	ldp	q10, q11, [x0, #CTX_FP_Q10]
93	ldp	q12, q13, [x0, #CTX_FP_Q12]
94	ldp	q14, q15, [x0, #CTX_FP_Q14]
95	ldp	q16, q17, [x0, #CTX_FP_Q16]
96	ldp	q18, q19, [x0, #CTX_FP_Q18]
97	ldp	q20, q21, [x0, #CTX_FP_Q20]
98	ldp	q22, q23, [x0, #CTX_FP_Q22]
99	ldp	q24, q25, [x0, #CTX_FP_Q24]
100	ldp	q26, q27, [x0, #CTX_FP_Q26]
101	ldp	q28, q29, [x0, #CTX_FP_Q28]
102	ldp	q30, q31, [x0, #CTX_FP_Q30]
103
104	ldr	x9, [x0, #CTX_FP_FPSR]
105	msr	fpsr, x9
106
107	ldr	x10, [x0, #CTX_FP_FPCR]
108	msr	fpcr, x10
109
110#if CTX_INCLUDE_AARCH32_REGS
111	ldr	x11, [x0, #CTX_FP_FPEXC32_EL2]
112	msr	fpexc32_el2, x11
113#endif /* CTX_INCLUDE_AARCH32_REGS */
114
115	/*
116	 * No explict ISB required here as ERET to
117	 * switch to secure EL1 or non-secure world
118	 * covers it
119	 */
120
121	ret
122endfunc fpregs_context_restore
123#endif /* CTX_INCLUDE_FPREGS */
124
125	/*
126	 * Set SCR_EL3.EA bit to enable SErrors at EL3
127	 */
128	.macro enable_serror_at_el3
129	mrs     x8, scr_el3
130	orr     x8, x8, #SCR_EA_BIT
131	msr     scr_el3, x8
132	.endm
133
134	/*
135	 * Set the PSTATE bits not set when the exception was taken as
136	 * described in the AArch64.TakeException() pseudocode function
137	 * in ARM DDI 0487F.c page J1-7635 to a default value.
138	 */
139	.macro set_unset_pstate_bits
140	/*
141	 * If Data Independent Timing (DIT) functionality is implemented,
142	 * always enable DIT in EL3
143	 */
144#if ENABLE_FEAT_DIT
145#if ENABLE_FEAT_DIT == 2
146	mrs	x8, id_aa64pfr0_el1
147	and	x8, x8, #(ID_AA64PFR0_DIT_MASK << ID_AA64PFR0_DIT_SHIFT)
148	cbz	x8, 1f
149#endif
150	mov     x8, #DIT_BIT
151	msr     DIT, x8
1521:
153#endif /* ENABLE_FEAT_DIT */
154	.endm /* set_unset_pstate_bits */
155
156/*-------------------------------------------------------------------------
157 * This macro checks the ENABLE_FEAT_MPAM state, performs ID register
158 * check to see if the platform supports MPAM extension and restores MPAM3
159 * register value if it is FEAT_STATE_ENABLED/FEAT_STATE_CHECKED.
160 *
161 * This is particularly more complicated because we can't check
162 * if the platform supports MPAM  by looking for status of a particular bit
163 * in the MDCR_EL3 or CPTR_EL3 register like other extensions.
164 * ------------------------------------------------------------------------
165 */
166
167	.macro	restore_mpam3_el3
168#if ENABLE_FEAT_MPAM
169#if ENABLE_FEAT_MPAM == 2
170
171	mrs x8, id_aa64pfr0_el1
172	lsr x8, x8, #(ID_AA64PFR0_MPAM_SHIFT)
173	and x8, x8, #(ID_AA64PFR0_MPAM_MASK)
174	mrs x7, id_aa64pfr1_el1
175	lsr x7, x7, #(ID_AA64PFR1_MPAM_FRAC_SHIFT)
176	and x7, x7, #(ID_AA64PFR1_MPAM_FRAC_MASK)
177	orr x7, x7, x8
178	cbz x7, no_mpam
179#endif
180	/* -----------------------------------------------------------
181	 * Restore MPAM3_EL3 register as per context state
182	 * Currently we only enable MPAM for NS world and trap to EL3
183	 * for MPAM access in lower ELs of Secure and Realm world
184	 * x9 holds address of the per_world context
185	 * -----------------------------------------------------------
186	 */
187
188	ldr	x17, [x9, #CTX_MPAM3_EL3]
189	msr	S3_6_C10_C5_0, x17 /* mpam3_el3 */
190
191no_mpam:
192#endif
193	.endm /* restore_mpam3_el3 */
194
195/* ------------------------------------------------------------------
196 * The following macro is used to save and restore all the general
197 * purpose and ARMv8.3-PAuth (if enabled) registers.
198 * It also checks if the Secure Cycle Counter (PMCCNTR_EL0)
199 * is disabled in EL3/Secure (ARMv8.5-PMU), wherein PMCCNTR_EL0
200 * needs not to be saved/restored during world switch.
201 *
202 * Ideally we would only save and restore the callee saved registers
203 * when a world switch occurs but that type of implementation is more
204 * complex. So currently we will always save and restore these
205 * registers on entry and exit of EL3.
206 * clobbers: x18
207 * ------------------------------------------------------------------
208 */
209	.macro save_gp_pmcr_pauth_regs
210	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
211	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
212	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
213	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
214	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
215	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
216	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
217	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
218	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
219	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
220	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
221	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
222	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
223	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
224	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
225	mrs	x18, sp_el0
226	str	x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
227
228	/* PMUv3 is presumed to be always present */
229	mrs	x9, pmcr_el0
230	str	x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
231	/* Disable cycle counter when event counting is prohibited */
232	orr	x9, x9, #PMCR_EL0_DP_BIT
233	msr	pmcr_el0, x9
234	isb
235#if CTX_INCLUDE_PAUTH_REGS
236	/* ----------------------------------------------------------
237 	 * Save the ARMv8.3-PAuth keys as they are not banked
238 	 * by exception level
239	 * ----------------------------------------------------------
240	 */
241	add	x19, sp, #CTX_PAUTH_REGS_OFFSET
242
243	mrs	x20, APIAKeyLo_EL1	/* x21:x20 = APIAKey */
244	mrs	x21, APIAKeyHi_EL1
245	mrs	x22, APIBKeyLo_EL1	/* x23:x22 = APIBKey */
246	mrs	x23, APIBKeyHi_EL1
247	mrs	x24, APDAKeyLo_EL1	/* x25:x24 = APDAKey */
248	mrs	x25, APDAKeyHi_EL1
249	mrs	x26, APDBKeyLo_EL1	/* x27:x26 = APDBKey */
250	mrs	x27, APDBKeyHi_EL1
251	mrs	x28, APGAKeyLo_EL1	/* x29:x28 = APGAKey */
252	mrs	x29, APGAKeyHi_EL1
253
254	stp	x20, x21, [x19, #CTX_PACIAKEY_LO]
255	stp	x22, x23, [x19, #CTX_PACIBKEY_LO]
256	stp	x24, x25, [x19, #CTX_PACDAKEY_LO]
257	stp	x26, x27, [x19, #CTX_PACDBKEY_LO]
258	stp	x28, x29, [x19, #CTX_PACGAKEY_LO]
259#endif /* CTX_INCLUDE_PAUTH_REGS */
260	.endm /* save_gp_pmcr_pauth_regs */
261
262/* -----------------------------------------------------------------
263 * This function saves the context and sets the PSTATE to a known
264 * state, preparing entry to el3.
265 * Save all the general purpose and ARMv8.3-PAuth (if enabled)
266 * registers.
267 * Then set any of the PSTATE bits that are not set by hardware
268 * according to the Aarch64.TakeException pseudocode in the Arm
269 * Architecture Reference Manual to a default value for EL3.
270 * clobbers: x17
271 * -----------------------------------------------------------------
272 */
273func prepare_el3_entry
274	save_gp_pmcr_pauth_regs
275	enable_serror_at_el3
276	/*
277	 * Set the PSTATE bits not described in the Aarch64.TakeException
278	 * pseudocode to their default values.
279	 */
280	set_unset_pstate_bits
281	ret
282endfunc prepare_el3_entry
283
284/* ------------------------------------------------------------------
285 * This function restores ARMv8.3-PAuth (if enabled) and all general
286 * purpose registers except x30 from the CPU context.
287 * x30 register must be explicitly restored by the caller.
288 * ------------------------------------------------------------------
289 */
290func restore_gp_pmcr_pauth_regs
291#if CTX_INCLUDE_PAUTH_REGS
292 	/* Restore the ARMv8.3 PAuth keys */
293	add	x10, sp, #CTX_PAUTH_REGS_OFFSET
294
295	ldp	x0, x1, [x10, #CTX_PACIAKEY_LO]	/* x1:x0 = APIAKey */
296	ldp	x2, x3, [x10, #CTX_PACIBKEY_LO]	/* x3:x2 = APIBKey */
297	ldp	x4, x5, [x10, #CTX_PACDAKEY_LO]	/* x5:x4 = APDAKey */
298	ldp	x6, x7, [x10, #CTX_PACDBKEY_LO]	/* x7:x6 = APDBKey */
299	ldp	x8, x9, [x10, #CTX_PACGAKEY_LO]	/* x9:x8 = APGAKey */
300
301	msr	APIAKeyLo_EL1, x0
302	msr	APIAKeyHi_EL1, x1
303	msr	APIBKeyLo_EL1, x2
304	msr	APIBKeyHi_EL1, x3
305	msr	APDAKeyLo_EL1, x4
306	msr	APDAKeyHi_EL1, x5
307	msr	APDBKeyLo_EL1, x6
308	msr	APDBKeyHi_EL1, x7
309	msr	APGAKeyLo_EL1, x8
310	msr	APGAKeyHi_EL1, x9
311#endif /* CTX_INCLUDE_PAUTH_REGS */
312
313	/* PMUv3 is presumed to be always present */
314	ldr	x0, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
315	msr	pmcr_el0, x0
316	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
317	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
318	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
319	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
320	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
321	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
322	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
323	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
324	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
325	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
326	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
327	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
328	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
329	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
330	ldr	x28, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
331	msr	sp_el0, x28
332	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
333	ret
334endfunc restore_gp_pmcr_pauth_regs
335
336#if ERRATA_SPECULATIVE_AT
337/* --------------------------------------------------------------------
338 * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1
339 * registers and update EL1 registers to disable stage1 and stage2
340 * page table walk.
341 * --------------------------------------------------------------------
342 */
343func save_and_update_ptw_el1_sys_regs
344	/* ----------------------------------------------------------
345	 * Save only sctlr_el1 and tcr_el1 registers
346	 * ----------------------------------------------------------
347	 */
348	mrs	x29, sctlr_el1
349	str	x29, [sp, #(CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_SCTLR_EL1)]
350	mrs	x29, tcr_el1
351	str	x29, [sp, #(CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_TCR_EL1)]
352
353	/* ------------------------------------------------------------
354	 * Must follow below order in order to disable page table
355	 * walk for lower ELs (EL1 and EL0). First step ensures that
356	 * page table walk is disabled for stage1 and second step
357	 * ensures that page table walker should use TCR_EL1.EPDx
358	 * bits to perform address translation. ISB ensures that CPU
359	 * does these 2 steps in order.
360	 *
361	 * 1. Update TCR_EL1.EPDx bits to disable page table walk by
362	 *    stage1.
363	 * 2. Enable MMU bit to avoid identity mapping via stage2
364	 *    and force TCR_EL1.EPDx to be used by the page table
365	 *    walker.
366	 * ------------------------------------------------------------
367	 */
368	orr	x29, x29, #(TCR_EPD0_BIT)
369	orr	x29, x29, #(TCR_EPD1_BIT)
370	msr	tcr_el1, x29
371	isb
372	mrs	x29, sctlr_el1
373	orr	x29, x29, #SCTLR_M_BIT
374	msr	sctlr_el1, x29
375	isb
376	ret
377endfunc save_and_update_ptw_el1_sys_regs
378
379#endif /* ERRATA_SPECULATIVE_AT */
380
381/* -----------------------------------------------------------------
382* The below macro returns the address of the per_world context for
383* the security state, retrieved through "get_security_state" macro.
384* The per_world context address is returned in the register argument.
385* Clobbers: x9, x10
386* ------------------------------------------------------------------
387*/
388
389.macro get_per_world_context _reg:req
390	ldr 	x10, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
391	get_security_state x9, x10
392	mov_imm	x10, (CTX_PERWORLD_EL3STATE_END - CTX_CPTR_EL3)
393	mul	x9, x9, x10
394	adrp	x10, per_world_context
395	add	x10, x10, :lo12:per_world_context
396	add	x9, x9, x10
397	mov 	\_reg, x9
398.endm
399
400/* ------------------------------------------------------------------
401 * This routine assumes that the SP_EL3 is pointing to a valid
402 * context structure from where the gp regs and other special
403 * registers can be retrieved.
404 * ------------------------------------------------------------------
405 */
406func el3_exit
407#if ENABLE_ASSERTIONS
408	/* el3_exit assumes SP_EL0 on entry */
409	mrs	x17, spsel
410	cmp	x17, #MODE_SP_EL0
411	ASM_ASSERT(eq)
412#endif /* ENABLE_ASSERTIONS */
413
414	/* ----------------------------------------------------------
415	 * Save the current SP_EL0 i.e. the EL3 runtime stack which
416	 * will be used for handling the next SMC.
417	 * Then switch to SP_EL3.
418	 * ----------------------------------------------------------
419	 */
420	mov	x17, sp
421	msr	spsel, #MODE_SP_ELX
422	str	x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
423
424	/* ----------------------------------------------------------
425	 * Restore CPTR_EL3.
426	 * ZCR is only restored if SVE is supported and enabled.
427	 * Synchronization is required before zcr_el3 is addressed.
428	 * ----------------------------------------------------------
429	 */
430
431	/* The address of the per_world context is stored in x9 */
432	get_per_world_context x9
433
434	ldp	x19, x20, [x9, #CTX_CPTR_EL3]
435	msr	cptr_el3, x19
436
437#if IMAGE_BL31
438	ands	x19, x19, #CPTR_EZ_BIT
439	beq	sve_not_enabled
440
441	isb
442	msr	S3_6_C1_C2_0, x20 /* zcr_el3 */
443sve_not_enabled:
444
445	restore_mpam3_el3
446
447#endif /* IMAGE_BL31 */
448
449#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
450	/* ----------------------------------------------------------
451	 * Restore mitigation state as it was on entry to EL3
452	 * ----------------------------------------------------------
453	 */
454	ldr	x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
455	cbz	x17, 1f
456	blr	x17
4571:
458#endif /* IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 */
459
460#if IMAGE_BL31
461	synchronize_errors
462#endif /* IMAGE_BL31 */
463
464	/* --------------------------------------------------------------
465	 * Restore MDCR_EL3, SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
466	 * --------------------------------------------------------------
467	 */
468	ldp	x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
469	ldr	x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
470	ldr	x19, [sp, #CTX_EL3STATE_OFFSET + CTX_MDCR_EL3]
471	msr	spsr_el3, x16
472	msr	elr_el3, x17
473	msr	scr_el3, x18
474	msr	mdcr_el3, x19
475
476	restore_ptw_el1_sys_regs
477
478	/* ----------------------------------------------------------
479	 * Restore general purpose (including x30), PMCR_EL0 and
480	 * ARMv8.3-PAuth registers.
481	 * Exit EL3 via ERET to a lower exception level.
482 	 * ----------------------------------------------------------
483 	 */
484	bl	restore_gp_pmcr_pauth_regs
485	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
486
487#ifdef IMAGE_BL31
488	/* Clear the EL3 flag as we are exiting el3 */
489	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
490#endif /* IMAGE_BL31 */
491
492	exception_return
493
494endfunc el3_exit
495