xref: /rk3399_ARM-atf/lib/el3_runtime/aarch64/context_mgmt.c (revision 24a70738b2c119a95a78cd4c89b257c3e028c20d)
1 /*
2  * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3  * Copyright (c) 2022, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #include <assert.h>
9 #include <stdbool.h>
10 #include <string.h>
11 
12 #include <platform_def.h>
13 
14 #include <arch.h>
15 #include <arch_helpers.h>
16 #include <arch_features.h>
17 #include <bl31/interrupt_mgmt.h>
18 #include <common/bl_common.h>
19 #include <common/debug.h>
20 #include <context.h>
21 #include <drivers/arm/gicv3.h>
22 #include <lib/el3_runtime/context_mgmt.h>
23 #include <lib/el3_runtime/pubsub_events.h>
24 #include <lib/extensions/amu.h>
25 #include <lib/extensions/brbe.h>
26 #include <lib/extensions/mpam.h>
27 #include <lib/extensions/sme.h>
28 #include <lib/extensions/spe.h>
29 #include <lib/extensions/sve.h>
30 #include <lib/extensions/sys_reg_trace.h>
31 #include <lib/extensions/trbe.h>
32 #include <lib/extensions/trf.h>
33 #include <lib/utils.h>
34 
35 #if ENABLE_FEAT_TWED
36 /* Make sure delay value fits within the range(0-15) */
37 CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check);
38 #endif /* ENABLE_FEAT_TWED */
39 
40 static void manage_extensions_nonsecure(cpu_context_t *ctx);
41 static void manage_extensions_secure(cpu_context_t *ctx);
42 
43 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep)
44 {
45 	u_register_t sctlr_elx, actlr_elx;
46 
47 	/*
48 	 * Initialise SCTLR_EL1 to the reset value corresponding to the target
49 	 * execution state setting all fields rather than relying on the hw.
50 	 * Some fields have architecturally UNKNOWN reset values and these are
51 	 * set to zero.
52 	 *
53 	 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
54 	 *
55 	 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
56 	 * required by PSCI specification)
57 	 */
58 	sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL;
59 	if (GET_RW(ep->spsr) == MODE_RW_64) {
60 		sctlr_elx |= SCTLR_EL1_RES1;
61 	} else {
62 		/*
63 		 * If the target execution state is AArch32 then the following
64 		 * fields need to be set.
65 		 *
66 		 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE
67 		 *  instructions are not trapped to EL1.
68 		 *
69 		 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI
70 		 *  instructions are not trapped to EL1.
71 		 *
72 		 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the
73 		 *  CP15DMB, CP15DSB, and CP15ISB instructions.
74 		 */
75 		sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT
76 					| SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
77 	}
78 
79 #if ERRATA_A75_764081
80 	/*
81 	 * If workaround of errata 764081 for Cortex-A75 is used then set
82 	 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier.
83 	 */
84 	sctlr_elx |= SCTLR_IESB_BIT;
85 #endif
86 	/* Store the initialised SCTLR_EL1 value in the cpu_context */
87 	write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
88 
89 	/*
90 	 * Base the context ACTLR_EL1 on the current value, as it is
91 	 * implementation defined. The context restore process will write
92 	 * the value from the context to the actual register and can cause
93 	 * problems for processor cores that don't expect certain bits to
94 	 * be zero.
95 	 */
96 	actlr_elx = read_actlr_el1();
97 	write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
98 }
99 
100 /******************************************************************************
101  * This function performs initializations that are specific to SECURE state
102  * and updates the cpu context specified by 'ctx'.
103  *****************************************************************************/
104 static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep)
105 {
106 	u_register_t scr_el3;
107 	el3_state_t *state;
108 
109 	state = get_el3state_ctx(ctx);
110 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
111 
112 #if defined(IMAGE_BL31) && !defined(SPD_spmd)
113 	/*
114 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
115 	 * indicated by the interrupt routing model for BL31.
116 	 */
117 	scr_el3 |= get_scr_el3_from_routing_model(SECURE);
118 #endif
119 
120 #if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS
121 	/* Get Memory Tagging Extension support level */
122 	unsigned int mte = get_armv8_5_mte_support();
123 #endif
124 	/*
125 	 * Allow access to Allocation Tags when CTX_INCLUDE_MTE_REGS
126 	 * is set, or when MTE is only implemented at EL0.
127 	 */
128 #if CTX_INCLUDE_MTE_REGS
129 	assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY));
130 	scr_el3 |= SCR_ATA_BIT;
131 #else
132 	if (mte == MTE_IMPLEMENTED_EL0) {
133 		scr_el3 |= SCR_ATA_BIT;
134 	}
135 #endif /* CTX_INCLUDE_MTE_REGS */
136 
137 	/* Enable S-EL2 if the next EL is EL2 and S-EL2 is present */
138 	if ((GET_EL(ep->spsr) == MODE_EL2) && is_feat_sel2_supported()) {
139 		if (GET_RW(ep->spsr) != MODE_RW_64) {
140 			ERROR("S-EL2 can not be used in AArch32\n.");
141 			panic();
142 		}
143 
144 		scr_el3 |= SCR_EEL2_BIT;
145 	}
146 
147 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
148 
149 	/*
150 	 * Initialize EL1 context registers unless SPMC is running
151 	 * at S-EL2.
152 	 */
153 #if !SPMD_SPM_AT_SEL2
154 	setup_el1_context(ctx, ep);
155 #endif
156 
157 	manage_extensions_secure(ctx);
158 }
159 
160 #if ENABLE_RME
161 /******************************************************************************
162  * This function performs initializations that are specific to REALM state
163  * and updates the cpu context specified by 'ctx'.
164  *****************************************************************************/
165 static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep)
166 {
167 	u_register_t scr_el3;
168 	el3_state_t *state;
169 
170 	state = get_el3state_ctx(ctx);
171 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
172 
173 	scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT;
174 
175 	if (is_feat_csv2_2_supported()) {
176 		/* Enable access to the SCXTNUM_ELx registers. */
177 		scr_el3 |= SCR_EnSCXT_BIT;
178 	}
179 
180 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
181 }
182 #endif /* ENABLE_RME */
183 
184 /******************************************************************************
185  * This function performs initializations that are specific to NON-SECURE state
186  * and updates the cpu context specified by 'ctx'.
187  *****************************************************************************/
188 static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep)
189 {
190 	u_register_t scr_el3;
191 	el3_state_t *state;
192 
193 	state = get_el3state_ctx(ctx);
194 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
195 
196 	/* SCR_NS: Set the NS bit */
197 	scr_el3 |= SCR_NS_BIT;
198 
199 #if !CTX_INCLUDE_PAUTH_REGS
200 	/*
201 	 * If the pointer authentication registers aren't saved during world
202 	 * switches the value of the registers can be leaked from the Secure to
203 	 * the Non-secure world. To prevent this, rather than enabling pointer
204 	 * authentication everywhere, we only enable it in the Non-secure world.
205 	 *
206 	 * If the Secure world wants to use pointer authentication,
207 	 * CTX_INCLUDE_PAUTH_REGS must be set to 1.
208 	 */
209 	scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
210 #endif /* !CTX_INCLUDE_PAUTH_REGS */
211 
212 	/* Allow access to Allocation Tags when MTE is implemented. */
213 	scr_el3 |= SCR_ATA_BIT;
214 
215 #if HANDLE_EA_EL3_FIRST_NS
216 	/* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */
217 	scr_el3 |= SCR_EA_BIT;
218 #endif
219 
220 #if RAS_TRAP_NS_ERR_REC_ACCESS
221 	/*
222 	 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR
223 	 * and RAS ERX registers from EL1 and EL2(from any security state)
224 	 * are trapped to EL3.
225 	 * Set here to trap only for NS EL1/EL2
226 	 *
227 	 */
228 	scr_el3 |= SCR_TERR_BIT;
229 #endif
230 
231 	if (is_feat_csv2_2_supported()) {
232 		/* Enable access to the SCXTNUM_ELx registers. */
233 		scr_el3 |= SCR_EnSCXT_BIT;
234 	}
235 
236 #ifdef IMAGE_BL31
237 	/*
238 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
239 	 *  indicated by the interrupt routing model for BL31.
240 	 */
241 	scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE);
242 #endif
243 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
244 
245 	/* Initialize EL1 context registers */
246 	setup_el1_context(ctx, ep);
247 
248 	/* Initialize EL2 context registers */
249 #if CTX_INCLUDE_EL2_REGS
250 
251 	/*
252 	 * Initialize SCTLR_EL2 context register using Endianness value
253 	 * taken from the entrypoint attribute.
254 	 */
255 	u_register_t sctlr_el2 = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL;
256 	sctlr_el2 |= SCTLR_EL2_RES1;
257 	write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_SCTLR_EL2,
258 			sctlr_el2);
259 
260 	/*
261 	 * Program the ICC_SRE_EL2 to make sure the correct bits are set
262 	 * when restoring NS context.
263 	 */
264 	u_register_t icc_sre_el2 = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT |
265 				   ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT;
266 	write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_ICC_SRE_EL2,
267 			icc_sre_el2);
268 
269 	/*
270 	 * Initialize MDCR_EL2.HPMN to its hardware reset value so we don't
271 	 * throw anyone off who expects this to be sensible.
272 	 * TODO: A similar thing happens in cm_prepare_el3_exit. They should be
273 	 * unified with the proper PMU implementation
274 	 */
275 	u_register_t mdcr_el2 = ((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) &
276 			PMCR_EL0_N_MASK);
277 	write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_MDCR_EL2, mdcr_el2);
278 
279 	if (is_feat_hcx_supported()) {
280 		/*
281 		 * Initialize register HCRX_EL2 with its init value.
282 		 * As the value of HCRX_EL2 is UNKNOWN on reset, there is a
283 		 * chance that this can lead to unexpected behavior in lower
284 		 * ELs that have not been updated since the introduction of
285 		 * this feature if not properly initialized, especially when
286 		 * it comes to those bits that enable/disable traps.
287 		 */
288 		write_ctx_reg(get_el2_sysregs_ctx(ctx), CTX_HCRX_EL2,
289 			HCRX_EL2_INIT_VAL);
290 	}
291 #endif /* CTX_INCLUDE_EL2_REGS */
292 
293 	manage_extensions_nonsecure(ctx);
294 }
295 
296 /*******************************************************************************
297  * The following function performs initialization of the cpu_context 'ctx'
298  * for first use that is common to all security states, and sets the
299  * initial entrypoint state as specified by the entry_point_info structure.
300  *
301  * The EE and ST attributes are used to configure the endianness and secure
302  * timer availability for the new execution context.
303  ******************************************************************************/
304 static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
305 {
306 	u_register_t scr_el3;
307 	el3_state_t *state;
308 	gp_regs_t *gp_regs;
309 
310 	/* Clear any residual register values from the context */
311 	zeromem(ctx, sizeof(*ctx));
312 
313 	/*
314 	 * SCR_EL3 was initialised during reset sequence in macro
315 	 * el3_arch_init_common. This code modifies the SCR_EL3 fields that
316 	 * affect the next EL.
317 	 *
318 	 * The following fields are initially set to zero and then updated to
319 	 * the required value depending on the state of the SPSR_EL3 and the
320 	 * Security state and entrypoint attributes of the next EL.
321 	 */
322 	scr_el3 = read_scr();
323 	scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_EA_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
324 			SCR_ST_BIT | SCR_HCE_BIT | SCR_NSE_BIT);
325 
326 	/*
327 	 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next
328 	 *  Exception level as specified by SPSR.
329 	 */
330 	if (GET_RW(ep->spsr) == MODE_RW_64) {
331 		scr_el3 |= SCR_RW_BIT;
332 	}
333 
334 	/*
335 	 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical
336 	 * Secure timer registers to EL3, from AArch64 state only, if specified
337 	 * by the entrypoint attributes. If SEL2 is present and enabled, the ST
338 	 * bit always behaves as 1 (i.e. secure physical timer register access
339 	 * is not trapped)
340 	 */
341 	if (EP_GET_ST(ep->h.attr) != 0U) {
342 		scr_el3 |= SCR_ST_BIT;
343 	}
344 
345 	/*
346 	 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting
347 	 * SCR_EL3.HXEn.
348 	 */
349 	if (is_feat_hcx_supported()) {
350 		scr_el3 |= SCR_HXEn_BIT;
351 	}
352 
353 	/*
354 	 * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS
355 	 * registers are trapped to EL3.
356 	 */
357 #if ENABLE_FEAT_RNG_TRAP
358 	scr_el3 |= SCR_TRNDR_BIT;
359 #endif
360 
361 #if FAULT_INJECTION_SUPPORT
362 	/* Enable fault injection from lower ELs */
363 	scr_el3 |= SCR_FIEN_BIT;
364 #endif
365 
366 	/*
367 	 * SCR_EL3.TCR2EN: Enable access to TCR2_ELx for AArch64 if present.
368 	 */
369 	if (is_feat_tcr2_supported() && (GET_RW(ep->spsr) == MODE_RW_64)) {
370 		scr_el3 |= SCR_TCR2EN_BIT;
371 	}
372 
373 	/*
374 	 * SCR_EL3.PIEN: Enable permission indirection and overlay
375 	 * registers for AArch64 if present.
376 	 */
377 	if (is_feat_sxpie_supported() || is_feat_sxpoe_supported()) {
378 		scr_el3 |= SCR_PIEN_BIT;
379 	}
380 
381 	/*
382 	 * SCR_EL3.GCSEn: Enable GCS registers for AArch64 if present.
383 	 */
384 	if ((is_feat_gcs_supported()) && (GET_RW(ep->spsr) == MODE_RW_64)) {
385 		scr_el3 |= SCR_GCSEn_BIT;
386 	}
387 
388 	/*
389 	 * CPTR_EL3 was initialized out of reset, copy that value to the
390 	 * context register.
391 	 */
392 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3());
393 
394 	/*
395 	 * SCR_EL3.HCE: Enable HVC instructions if next execution state is
396 	 * AArch64 and next EL is EL2, or if next execution state is AArch32 and
397 	 * next mode is Hyp.
398 	 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the
399 	 * same conditions as HVC instructions and when the processor supports
400 	 * ARMv8.6-FGT.
401 	 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV)
402 	 * CNTPOFF_EL2 register under the same conditions as HVC instructions
403 	 * and when the processor supports ECV.
404 	 */
405 	if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2))
406 	    || ((GET_RW(ep->spsr) != MODE_RW_64)
407 		&& (GET_M32(ep->spsr) == MODE32_hyp))) {
408 		scr_el3 |= SCR_HCE_BIT;
409 
410 		if (is_feat_fgt_supported()) {
411 			scr_el3 |= SCR_FGTEN_BIT;
412 		}
413 
414 		if (is_feat_ecv_supported()) {
415 			scr_el3 |= SCR_ECVEN_BIT;
416 		}
417 	}
418 
419 	/* Enable WFE trap delay in SCR_EL3 if supported and configured */
420 	if (is_feat_twed_supported()) {
421 		/* Set delay in SCR_EL3 */
422 		scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT);
423 		scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK)
424 				<< SCR_TWEDEL_SHIFT);
425 
426 		/* Enable WFE delay */
427 		scr_el3 |= SCR_TWEDEn_BIT;
428 	}
429 
430 	/*
431 	 * Populate EL3 state so that we've the right context
432 	 * before doing ERET
433 	 */
434 	state = get_el3state_ctx(ctx);
435 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
436 	write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
437 	write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
438 
439 	/*
440 	 * Store the X0-X7 value from the entrypoint into the context
441 	 * Use memcpy as we are in control of the layout of the structures
442 	 */
443 	gp_regs = get_gpregs_ctx(ctx);
444 	memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
445 }
446 
447 /*******************************************************************************
448  * Context management library initialization routine. This library is used by
449  * runtime services to share pointers to 'cpu_context' structures for secure
450  * non-secure and realm states. Management of the structures and their associated
451  * memory is not done by the context management library e.g. the PSCI service
452  * manages the cpu context used for entry from and exit to the non-secure state.
453  * The Secure payload dispatcher service manages the context(s) corresponding to
454  * the secure state. It also uses this library to get access to the non-secure
455  * state cpu context pointers.
456  * Lastly, this library provides the API to make SP_EL3 point to the cpu context
457  * which will be used for programming an entry into a lower EL. The same context
458  * will be used to save state upon exception entry from that EL.
459  ******************************************************************************/
460 void __init cm_init(void)
461 {
462 	/*
463 	 * The context management library has only global data to initialize, but
464 	 * that will be done when the BSS is zeroed out.
465 	 */
466 }
467 
468 /*******************************************************************************
469  * This is the high-level function used to initialize the cpu_context 'ctx' for
470  * first use. It performs initializations that are common to all security states
471  * and initializations specific to the security state specified in 'ep'
472  ******************************************************************************/
473 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
474 {
475 	unsigned int security_state;
476 
477 	assert(ctx != NULL);
478 
479 	/*
480 	 * Perform initializations that are common
481 	 * to all security states
482 	 */
483 	setup_context_common(ctx, ep);
484 
485 	security_state = GET_SECURITY_STATE(ep->h.attr);
486 
487 	/* Perform security state specific initializations */
488 	switch (security_state) {
489 	case SECURE:
490 		setup_secure_context(ctx, ep);
491 		break;
492 #if ENABLE_RME
493 	case REALM:
494 		setup_realm_context(ctx, ep);
495 		break;
496 #endif
497 	case NON_SECURE:
498 		setup_ns_context(ctx, ep);
499 		break;
500 	default:
501 		ERROR("Invalid security state\n");
502 		panic();
503 		break;
504 	}
505 }
506 
507 /*******************************************************************************
508  * Enable architecture extensions on first entry to Non-secure world.
509  * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
510  * it is zero. This function updates some registers in-place and its contents
511  * are being prepared to be moved to cm_manage_extensions_el3 and
512  * cm_manage_extensions_nonsecure.
513  ******************************************************************************/
514 static void manage_extensions_nonsecure_mixed(bool el2_unused, cpu_context_t *ctx)
515 {
516 #if IMAGE_BL31
517 	if (is_feat_spe_supported()) {
518 		spe_enable(el2_unused);
519 	}
520 
521 	if (is_feat_amu_supported()) {
522 		amu_enable(el2_unused, ctx);
523 	}
524 
525 	/* Enable SVE and FPU/SIMD */
526 	if (is_feat_sve_supported()) {
527 		sve_enable(ctx);
528 	}
529 
530 	if (is_feat_sme_supported()) {
531 		sme_enable(ctx);
532 	}
533 
534 	if (is_feat_mpam_supported()) {
535 		mpam_enable(el2_unused);
536 	}
537 
538 	if (is_feat_trbe_supported()) {
539 		trbe_enable();
540 	}
541 
542 	if (is_feat_brbe_supported()) {
543 		brbe_enable();
544 	}
545 
546 	if (is_feat_sys_reg_trace_supported()) {
547 		sys_reg_trace_enable(ctx);
548 	}
549 
550 	if (is_feat_trf_supported()) {
551 		trf_enable();
552 	}
553 #endif
554 }
555 
556 /*******************************************************************************
557  * Enable architecture extensions for EL3 execution. This function only updates
558  * registers in-place which are expected to either never change or be
559  * overwritten by el3_exit.
560  ******************************************************************************/
561 #if IMAGE_BL31
562 void cm_manage_extensions_el3(void)
563 {
564 }
565 #endif /* IMAGE_BL31 */
566 
567 /*******************************************************************************
568  * Enable architecture extensions on first entry to Non-secure world.
569  ******************************************************************************/
570 static void manage_extensions_nonsecure(cpu_context_t *ctx)
571 {
572 #if IMAGE_BL31
573 #endif /* IMAGE_BL31 */
574 }
575 
576 /*******************************************************************************
577  * Enable architecture extensions in-place at EL2 on first entry to Non-secure
578  * world when EL2 is empty and unused.
579  ******************************************************************************/
580 static void manage_extensions_nonsecure_el2_unused(void)
581 {
582 #if IMAGE_BL31
583 #endif /* IMAGE_BL31 */
584 }
585 
586 /*******************************************************************************
587  * Enable architecture extensions on first entry to Secure world.
588  ******************************************************************************/
589 static void manage_extensions_secure(cpu_context_t *ctx)
590 {
591 #if IMAGE_BL31
592 	if (is_feat_sve_supported()) {
593 		if (ENABLE_SVE_FOR_SWD) {
594 		/*
595 		 * Enable SVE and FPU in secure context, secure manager must
596 		 * ensure that the SVE and FPU register contexts are properly
597 		 * managed.
598 		 */
599 			sve_enable(ctx);
600 		} else {
601 		/*
602 		 * Disable SVE and FPU in secure context so non-secure world
603 		 * can safely use them.
604 		 */
605 			sve_disable(ctx);
606 		}
607 	}
608 
609 	if (is_feat_sme_supported()) {
610 		if (ENABLE_SME_FOR_SWD) {
611 		/*
612 		 * Enable SME, SVE, FPU/SIMD in secure context, secure manager
613 		 * must ensure SME, SVE, and FPU/SIMD context properly managed.
614 		 */
615 			sme_enable(ctx);
616 		} else {
617 		/*
618 		 * Disable SME, SVE, FPU/SIMD in secure context so non-secure
619 		 * world can safely use the associated registers.
620 		 */
621 			sme_disable(ctx);
622 		}
623 	}
624 #endif /* IMAGE_BL31 */
625 }
626 
627 /*******************************************************************************
628  * The following function initializes the cpu_context for a CPU specified by
629  * its `cpu_idx` for first use, and sets the initial entrypoint state as
630  * specified by the entry_point_info structure.
631  ******************************************************************************/
632 void cm_init_context_by_index(unsigned int cpu_idx,
633 			      const entry_point_info_t *ep)
634 {
635 	cpu_context_t *ctx;
636 	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
637 	cm_setup_context(ctx, ep);
638 }
639 
640 /*******************************************************************************
641  * The following function initializes the cpu_context for the current CPU
642  * for first use, and sets the initial entrypoint state as specified by the
643  * entry_point_info structure.
644  ******************************************************************************/
645 void cm_init_my_context(const entry_point_info_t *ep)
646 {
647 	cpu_context_t *ctx;
648 	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
649 	cm_setup_context(ctx, ep);
650 }
651 
652 /*******************************************************************************
653  * Prepare the CPU system registers for first entry into realm, secure, or
654  * normal world.
655  *
656  * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
657  * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
658  * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
659  * For all entries, the EL1 registers are initialized from the cpu_context
660  ******************************************************************************/
661 void cm_prepare_el3_exit(uint32_t security_state)
662 {
663 	u_register_t sctlr_elx, scr_el3, mdcr_el2;
664 	cpu_context_t *ctx = cm_get_context(security_state);
665 	bool el2_unused = false;
666 	uint64_t hcr_el2 = 0U;
667 
668 	assert(ctx != NULL);
669 
670 	if (security_state == NON_SECURE) {
671 		uint64_t el2_implemented = el_implemented(2);
672 
673 		scr_el3 = read_ctx_reg(get_el3state_ctx(ctx),
674 						 CTX_SCR_EL3);
675 
676 		if (((scr_el3 & SCR_HCE_BIT) != 0U)
677 			|| (el2_implemented != EL_IMPL_NONE)) {
678 			/*
679 			 * If context is not being used for EL2, initialize
680 			 * HCRX_EL2 with its init value here.
681 			 */
682 			if (is_feat_hcx_supported()) {
683 				write_hcrx_el2(HCRX_EL2_INIT_VAL);
684 			}
685 		}
686 
687 		if ((scr_el3 & SCR_HCE_BIT) != 0U) {
688 			/* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
689 			sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx),
690 							   CTX_SCTLR_EL1);
691 			sctlr_elx &= SCTLR_EE_BIT;
692 			sctlr_elx |= SCTLR_EL2_RES1;
693 #if ERRATA_A75_764081
694 			/*
695 			 * If workaround of errata 764081 for Cortex-A75 is used
696 			 * then set SCTLR_EL2.IESB to enable Implicit Error
697 			 * Synchronization Barrier.
698 			 */
699 			sctlr_elx |= SCTLR_IESB_BIT;
700 #endif
701 			write_sctlr_el2(sctlr_elx);
702 		} else if (el2_implemented != EL_IMPL_NONE) {
703 			el2_unused = true;
704 
705 			/*
706 			 * EL2 present but unused, need to disable safely.
707 			 * SCTLR_EL2 can be ignored in this case.
708 			 *
709 			 * Set EL2 register width appropriately: Set HCR_EL2
710 			 * field to match SCR_EL3.RW.
711 			 */
712 			if ((scr_el3 & SCR_RW_BIT) != 0U)
713 				hcr_el2 |= HCR_RW_BIT;
714 
715 			/*
716 			 * For Armv8.3 pointer authentication feature, disable
717 			 * traps to EL2 when accessing key registers or using
718 			 * pointer authentication instructions from lower ELs.
719 			 */
720 			hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT);
721 
722 			write_hcr_el2(hcr_el2);
723 
724 			/*
725 			 * Initialise CPTR_EL2 setting all fields rather than
726 			 * relying on the hw. All fields have architecturally
727 			 * UNKNOWN reset values.
728 			 *
729 			 * CPTR_EL2.TCPAC: Set to zero so that Non-secure EL1
730 			 *  accesses to the CPACR_EL1 or CPACR from both
731 			 *  Execution states do not trap to EL2.
732 			 *
733 			 * CPTR_EL2.TTA: Set to zero so that Non-secure System
734 			 *  register accesses to the trace registers from both
735 			 *  Execution states do not trap to EL2.
736 			 *  If PE trace unit System registers are not implemented
737 			 *  then this bit is reserved, and must be set to zero.
738 			 *
739 			 * CPTR_EL2.TFP: Set to zero so that Non-secure accesses
740 			 *  to SIMD and floating-point functionality from both
741 			 *  Execution states do not trap to EL2.
742 			 */
743 			write_cptr_el2(CPTR_EL2_RESET_VAL &
744 					~(CPTR_EL2_TCPAC_BIT | CPTR_EL2_TTA_BIT
745 					| CPTR_EL2_TFP_BIT));
746 
747 			/*
748 			 * Initialise CNTHCTL_EL2. All fields are
749 			 * architecturally UNKNOWN on reset and are set to zero
750 			 * except for field(s) listed below.
751 			 *
752 			 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to
753 			 *  Hyp mode of Non-secure EL0 and EL1 accesses to the
754 			 *  physical timer registers.
755 			 *
756 			 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to
757 			 *  Hyp mode of  Non-secure EL0 and EL1 accesses to the
758 			 *  physical counter registers.
759 			 */
760 			write_cnthctl_el2(CNTHCTL_RESET_VAL |
761 						EL1PCEN_BIT | EL1PCTEN_BIT);
762 
763 			/*
764 			 * Initialise CNTVOFF_EL2 to zero as it resets to an
765 			 * architecturally UNKNOWN value.
766 			 */
767 			write_cntvoff_el2(0);
768 
769 			/*
770 			 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and
771 			 * MPIDR_EL1 respectively.
772 			 */
773 			write_vpidr_el2(read_midr_el1());
774 			write_vmpidr_el2(read_mpidr_el1());
775 
776 			/*
777 			 * Initialise VTTBR_EL2. All fields are architecturally
778 			 * UNKNOWN on reset.
779 			 *
780 			 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage
781 			 *  2 address translation is disabled, cache maintenance
782 			 *  operations depend on the VMID.
783 			 *
784 			 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address
785 			 *  translation is disabled.
786 			 */
787 			write_vttbr_el2(VTTBR_RESET_VAL &
788 				~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT)
789 				| (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
790 
791 			/*
792 			 * Initialise MDCR_EL2, setting all fields rather than
793 			 * relying on hw. Some fields are architecturally
794 			 * UNKNOWN on reset.
795 			 *
796 			 * MDCR_EL2.HLP: Set to one so that event counter
797 			 *  overflow, that is recorded in PMOVSCLR_EL0[0-30],
798 			 *  occurs on the increment that changes
799 			 *  PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is
800 			 *  implemented. This bit is RES0 in versions of the
801 			 *  architecture earlier than ARMv8.5, setting it to 1
802 			 *  doesn't have any effect on them.
803 			 *
804 			 * MDCR_EL2.TTRF: Set to zero so that access to Trace
805 			 *  Filter Control register TRFCR_EL1 at EL1 is not
806 			 *  trapped to EL2. This bit is RES0 in versions of
807 			 *  the architecture earlier than ARMv8.4.
808 			 *
809 			 * MDCR_EL2.HPMD: Set to one so that event counting is
810 			 *  prohibited at EL2. This bit is RES0 in versions of
811 			 *  the architecture earlier than ARMv8.1, setting it
812 			 *  to 1 doesn't have any effect on them.
813 			 *
814 			 * MDCR_EL2.TPMS: Set to zero so that accesses to
815 			 *  Statistical Profiling control registers from EL1
816 			 *  do not trap to EL2. This bit is RES0 when SPE is
817 			 *  not implemented.
818 			 *
819 			 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
820 			 *  EL1 System register accesses to the Debug ROM
821 			 *  registers are not trapped to EL2.
822 			 *
823 			 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1
824 			 *  System register accesses to the powerdown debug
825 			 *  registers are not trapped to EL2.
826 			 *
827 			 * MDCR_EL2.TDA: Set to zero so that System register
828 			 *  accesses to the debug registers do not trap to EL2.
829 			 *
830 			 * MDCR_EL2.TDE: Set to zero so that debug exceptions
831 			 *  are not routed to EL2.
832 			 *
833 			 * MDCR_EL2.HPME: Set to zero to disable EL2 Performance
834 			 *  Monitors.
835 			 *
836 			 * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and
837 			 *  EL1 accesses to all Performance Monitors registers
838 			 *  are not trapped to EL2.
839 			 *
840 			 * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0
841 			 *  and EL1 accesses to the PMCR_EL0 or PMCR are not
842 			 *  trapped to EL2.
843 			 *
844 			 * MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the
845 			 *  architecturally-defined reset value.
846 			 *
847 			 * MDCR_EL2.E2TB: Set to zero so that the trace Buffer
848 			 *  owning exception level is NS-EL1 and, tracing is
849 			 *  prohibited at NS-EL2. These bits are RES0 when
850 			 *  FEAT_TRBE is not implemented.
851 			 */
852 			mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HLP |
853 				     MDCR_EL2_HPMD) |
854 				   ((read_pmcr_el0() & PMCR_EL0_N_BITS)
855 				   >> PMCR_EL0_N_SHIFT)) &
856 				   ~(MDCR_EL2_TTRF | MDCR_EL2_TPMS |
857 				     MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT |
858 				     MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT |
859 				     MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT |
860 				     MDCR_EL2_TPMCR_BIT |
861 				     MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1));
862 
863 			write_mdcr_el2(mdcr_el2);
864 
865 			/*
866 			 * Initialise HSTR_EL2. All fields are architecturally
867 			 * UNKNOWN on reset.
868 			 *
869 			 * HSTR_EL2.T<n>: Set all these fields to zero so that
870 			 *  Non-secure EL0 or EL1 accesses to System registers
871 			 *  do not trap to EL2.
872 			 */
873 			write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK));
874 			/*
875 			 * Initialise CNTHP_CTL_EL2. All fields are
876 			 * architecturally UNKNOWN on reset.
877 			 *
878 			 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2
879 			 *  physical timer and prevent timer interrupts.
880 			 */
881 			write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
882 						~(CNTHP_CTL_ENABLE_BIT));
883 
884 			manage_extensions_nonsecure_el2_unused();
885 		}
886 		manage_extensions_nonsecure_mixed(el2_unused, ctx);
887 	}
888 
889 	cm_el1_sysregs_context_restore(security_state);
890 	cm_set_next_eret_context(security_state);
891 }
892 
893 #if CTX_INCLUDE_EL2_REGS
894 
895 static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx)
896 {
897 	write_ctx_reg(ctx, CTX_HDFGRTR_EL2, read_hdfgrtr_el2());
898 	if (is_feat_amu_supported()) {
899 		write_ctx_reg(ctx, CTX_HAFGRTR_EL2, read_hafgrtr_el2());
900 	}
901 	write_ctx_reg(ctx, CTX_HDFGWTR_EL2, read_hdfgwtr_el2());
902 	write_ctx_reg(ctx, CTX_HFGITR_EL2, read_hfgitr_el2());
903 	write_ctx_reg(ctx, CTX_HFGRTR_EL2, read_hfgrtr_el2());
904 	write_ctx_reg(ctx, CTX_HFGWTR_EL2, read_hfgwtr_el2());
905 }
906 
907 static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx)
908 {
909 	write_hdfgrtr_el2(read_ctx_reg(ctx, CTX_HDFGRTR_EL2));
910 	if (is_feat_amu_supported()) {
911 		write_hafgrtr_el2(read_ctx_reg(ctx, CTX_HAFGRTR_EL2));
912 	}
913 	write_hdfgwtr_el2(read_ctx_reg(ctx, CTX_HDFGWTR_EL2));
914 	write_hfgitr_el2(read_ctx_reg(ctx, CTX_HFGITR_EL2));
915 	write_hfgrtr_el2(read_ctx_reg(ctx, CTX_HFGRTR_EL2));
916 	write_hfgwtr_el2(read_ctx_reg(ctx, CTX_HFGWTR_EL2));
917 }
918 
919 static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx)
920 {
921 	u_register_t mpam_idr = read_mpamidr_el1();
922 
923 	write_ctx_reg(ctx, CTX_MPAM2_EL2, read_mpam2_el2());
924 
925 	/*
926 	 * The context registers that we intend to save would be part of the
927 	 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
928 	 */
929 	if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
930 		return;
931 	}
932 
933 	/*
934 	 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if
935 	 * MPAMIDR_HAS_HCR_BIT == 1.
936 	 */
937 	write_ctx_reg(ctx, CTX_MPAMHCR_EL2, read_mpamhcr_el2());
938 	write_ctx_reg(ctx, CTX_MPAMVPM0_EL2, read_mpamvpm0_el2());
939 	write_ctx_reg(ctx, CTX_MPAMVPMV_EL2, read_mpamvpmv_el2());
940 
941 	/*
942 	 * The number of MPAMVPM registers is implementation defined, their
943 	 * number is stored in the MPAMIDR_EL1 register.
944 	 */
945 	switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
946 	case 7:
947 		write_ctx_reg(ctx, CTX_MPAMVPM7_EL2, read_mpamvpm7_el2());
948 		__fallthrough;
949 	case 6:
950 		write_ctx_reg(ctx, CTX_MPAMVPM6_EL2, read_mpamvpm6_el2());
951 		__fallthrough;
952 	case 5:
953 		write_ctx_reg(ctx, CTX_MPAMVPM5_EL2, read_mpamvpm5_el2());
954 		__fallthrough;
955 	case 4:
956 		write_ctx_reg(ctx, CTX_MPAMVPM4_EL2, read_mpamvpm4_el2());
957 		__fallthrough;
958 	case 3:
959 		write_ctx_reg(ctx, CTX_MPAMVPM3_EL2, read_mpamvpm3_el2());
960 		__fallthrough;
961 	case 2:
962 		write_ctx_reg(ctx, CTX_MPAMVPM2_EL2, read_mpamvpm2_el2());
963 		__fallthrough;
964 	case 1:
965 		write_ctx_reg(ctx, CTX_MPAMVPM1_EL2, read_mpamvpm1_el2());
966 		break;
967 	}
968 }
969 
970 static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx)
971 {
972 	u_register_t mpam_idr = read_mpamidr_el1();
973 
974 	write_mpam2_el2(read_ctx_reg(ctx, CTX_MPAM2_EL2));
975 
976 	if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
977 		return;
978 	}
979 
980 	write_mpamhcr_el2(read_ctx_reg(ctx, CTX_MPAMHCR_EL2));
981 	write_mpamvpm0_el2(read_ctx_reg(ctx, CTX_MPAMVPM0_EL2));
982 	write_mpamvpmv_el2(read_ctx_reg(ctx, CTX_MPAMVPMV_EL2));
983 
984 	switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
985 	case 7:
986 		write_mpamvpm7_el2(read_ctx_reg(ctx, CTX_MPAMVPM7_EL2));
987 		__fallthrough;
988 	case 6:
989 		write_mpamvpm6_el2(read_ctx_reg(ctx, CTX_MPAMVPM6_EL2));
990 		__fallthrough;
991 	case 5:
992 		write_mpamvpm5_el2(read_ctx_reg(ctx, CTX_MPAMVPM5_EL2));
993 		__fallthrough;
994 	case 4:
995 		write_mpamvpm4_el2(read_ctx_reg(ctx, CTX_MPAMVPM4_EL2));
996 		__fallthrough;
997 	case 3:
998 		write_mpamvpm3_el2(read_ctx_reg(ctx, CTX_MPAMVPM3_EL2));
999 		__fallthrough;
1000 	case 2:
1001 		write_mpamvpm2_el2(read_ctx_reg(ctx, CTX_MPAMVPM2_EL2));
1002 		__fallthrough;
1003 	case 1:
1004 		write_mpamvpm1_el2(read_ctx_reg(ctx, CTX_MPAMVPM1_EL2));
1005 		break;
1006 	}
1007 }
1008 
1009 /*******************************************************************************
1010  * Save EL2 sysreg context
1011  ******************************************************************************/
1012 void cm_el2_sysregs_context_save(uint32_t security_state)
1013 {
1014 	u_register_t scr_el3 = read_scr();
1015 
1016 	/*
1017 	 * Always save the non-secure and realm EL2 context, only save the
1018 	 * S-EL2 context if S-EL2 is enabled.
1019 	 */
1020 	if ((security_state != SECURE) ||
1021 	    ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
1022 		cpu_context_t *ctx;
1023 		el2_sysregs_t *el2_sysregs_ctx;
1024 
1025 		ctx = cm_get_context(security_state);
1026 		assert(ctx != NULL);
1027 
1028 		el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
1029 
1030 		el2_sysregs_context_save_common(el2_sysregs_ctx);
1031 #if CTX_INCLUDE_MTE_REGS
1032 		el2_sysregs_context_save_mte(el2_sysregs_ctx);
1033 #endif
1034 		if (is_feat_mpam_supported()) {
1035 			el2_sysregs_context_save_mpam(el2_sysregs_ctx);
1036 		}
1037 
1038 		if (is_feat_fgt_supported()) {
1039 			el2_sysregs_context_save_fgt(el2_sysregs_ctx);
1040 		}
1041 
1042 		if (is_feat_ecv_v2_supported()) {
1043 			write_ctx_reg(el2_sysregs_ctx, CTX_CNTPOFF_EL2,
1044 				      read_cntpoff_el2());
1045 		}
1046 
1047 		if (is_feat_vhe_supported()) {
1048 			write_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2,
1049 				      read_contextidr_el2());
1050 			write_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2,
1051 				      read_ttbr1_el2());
1052 		}
1053 
1054 		if (is_feat_ras_supported()) {
1055 			write_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2,
1056 				      read_vdisr_el2());
1057 			write_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2,
1058 				      read_vsesr_el2());
1059 		}
1060 
1061 		if (is_feat_nv2_supported()) {
1062 			write_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2,
1063 				      read_vncr_el2());
1064 		}
1065 
1066 		if (is_feat_trf_supported()) {
1067 			write_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2, read_trfcr_el2());
1068 		}
1069 
1070 		if (is_feat_csv2_2_supported()) {
1071 			write_ctx_reg(el2_sysregs_ctx, CTX_SCXTNUM_EL2,
1072 				      read_scxtnum_el2());
1073 		}
1074 
1075 		if (is_feat_hcx_supported()) {
1076 			write_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2, read_hcrx_el2());
1077 		}
1078 		if (is_feat_tcr2_supported()) {
1079 			write_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2, read_tcr2_el2());
1080 		}
1081 		if (is_feat_sxpie_supported()) {
1082 			write_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2, read_pire0_el2());
1083 			write_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2, read_pir_el2());
1084 		}
1085 		if (is_feat_s2pie_supported()) {
1086 			write_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2, read_s2pir_el2());
1087 		}
1088 		if (is_feat_sxpoe_supported()) {
1089 			write_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2, read_por_el2());
1090 		}
1091 		if (is_feat_gcs_supported()) {
1092 			write_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2, read_gcspr_el2());
1093 			write_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2, read_gcscr_el2());
1094 		}
1095 	}
1096 }
1097 
1098 /*******************************************************************************
1099  * Restore EL2 sysreg context
1100  ******************************************************************************/
1101 void cm_el2_sysregs_context_restore(uint32_t security_state)
1102 {
1103 	u_register_t scr_el3 = read_scr();
1104 
1105 	/*
1106 	 * Always restore the non-secure and realm EL2 context, only restore the
1107 	 * S-EL2 context if S-EL2 is enabled.
1108 	 */
1109 	if ((security_state != SECURE) ||
1110 	    ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
1111 		cpu_context_t *ctx;
1112 		el2_sysregs_t *el2_sysregs_ctx;
1113 
1114 		ctx = cm_get_context(security_state);
1115 		assert(ctx != NULL);
1116 
1117 		el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
1118 
1119 		el2_sysregs_context_restore_common(el2_sysregs_ctx);
1120 #if CTX_INCLUDE_MTE_REGS
1121 		el2_sysregs_context_restore_mte(el2_sysregs_ctx);
1122 #endif
1123 		if (is_feat_mpam_supported()) {
1124 			el2_sysregs_context_restore_mpam(el2_sysregs_ctx);
1125 		}
1126 
1127 		if (is_feat_fgt_supported()) {
1128 			el2_sysregs_context_restore_fgt(el2_sysregs_ctx);
1129 		}
1130 
1131 		if (is_feat_ecv_v2_supported()) {
1132 			write_cntpoff_el2(read_ctx_reg(el2_sysregs_ctx,
1133 						       CTX_CNTPOFF_EL2));
1134 		}
1135 
1136 		if (is_feat_vhe_supported()) {
1137 			write_contextidr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2));
1138 			write_ttbr1_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2));
1139 		}
1140 
1141 		if (is_feat_ras_supported()) {
1142 			write_vdisr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VDISR_EL2));
1143 			write_vsesr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VSESR_EL2));
1144 		}
1145 
1146 		if (is_feat_nv2_supported()) {
1147 			write_vncr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_VNCR_EL2));
1148 		}
1149 		if (is_feat_trf_supported()) {
1150 			write_trfcr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TRFCR_EL2));
1151 		}
1152 
1153 		if (is_feat_csv2_2_supported()) {
1154 			write_scxtnum_el2(read_ctx_reg(el2_sysregs_ctx,
1155 						       CTX_SCXTNUM_EL2));
1156 		}
1157 
1158 		if (is_feat_hcx_supported()) {
1159 			write_hcrx_el2(read_ctx_reg(el2_sysregs_ctx, CTX_HCRX_EL2));
1160 		}
1161 		if (is_feat_tcr2_supported()) {
1162 			write_tcr2_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TCR2_EL2));
1163 		}
1164 		if (is_feat_sxpie_supported()) {
1165 			write_pire0_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIRE0_EL2));
1166 			write_pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_PIR_EL2));
1167 		}
1168 		if (is_feat_s2pie_supported()) {
1169 			write_s2pir_el2(read_ctx_reg(el2_sysregs_ctx, CTX_S2PIR_EL2));
1170 		}
1171 		if (is_feat_sxpoe_supported()) {
1172 			write_por_el2(read_ctx_reg(el2_sysregs_ctx, CTX_POR_EL2));
1173 		}
1174 		if (is_feat_gcs_supported()) {
1175 			write_gcscr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSCR_EL2));
1176 			write_gcspr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_GCSPR_EL2));
1177 		}
1178 	}
1179 }
1180 #endif /* CTX_INCLUDE_EL2_REGS */
1181 
1182 /*******************************************************************************
1183  * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS
1184  * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly
1185  * updating EL1 and EL2 registers. Otherwise, it calls the generic
1186  * cm_prepare_el3_exit function.
1187  ******************************************************************************/
1188 void cm_prepare_el3_exit_ns(void)
1189 {
1190 #if CTX_INCLUDE_EL2_REGS
1191 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
1192 	assert(ctx != NULL);
1193 
1194 	/* Assert that EL2 is used. */
1195 #if ENABLE_ASSERTIONS
1196 	el3_state_t *state = get_el3state_ctx(ctx);
1197 	u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
1198 #endif
1199 	assert(((scr_el3 & SCR_HCE_BIT) != 0UL) &&
1200 			(el_implemented(2U) != EL_IMPL_NONE));
1201 
1202 	/*
1203 	 * Currently some extensions are configured using
1204 	 * direct register updates. Therefore, do this here
1205 	 * instead of when setting up context.
1206 	 */
1207 	manage_extensions_nonsecure_mixed(0, ctx);
1208 
1209 	/*
1210 	 * Set the NS bit to be able to access the ICC_SRE_EL2
1211 	 * register when restoring context.
1212 	 */
1213 	write_scr_el3(read_scr_el3() | SCR_NS_BIT);
1214 
1215 	/*
1216 	 * Ensure the NS bit change is committed before the EL2/EL1
1217 	 * state restoration.
1218 	 */
1219 	isb();
1220 
1221 	/* Restore EL2 and EL1 sysreg contexts */
1222 	cm_el2_sysregs_context_restore(NON_SECURE);
1223 	cm_el1_sysregs_context_restore(NON_SECURE);
1224 	cm_set_next_eret_context(NON_SECURE);
1225 #else
1226 	cm_prepare_el3_exit(NON_SECURE);
1227 #endif /* CTX_INCLUDE_EL2_REGS */
1228 }
1229 
1230 /*******************************************************************************
1231  * The next four functions are used by runtime services to save and restore
1232  * EL1 context on the 'cpu_context' structure for the specified security
1233  * state.
1234  ******************************************************************************/
1235 void cm_el1_sysregs_context_save(uint32_t security_state)
1236 {
1237 	cpu_context_t *ctx;
1238 
1239 	ctx = cm_get_context(security_state);
1240 	assert(ctx != NULL);
1241 
1242 	el1_sysregs_context_save(get_el1_sysregs_ctx(ctx));
1243 
1244 #if IMAGE_BL31
1245 	if (security_state == SECURE)
1246 		PUBLISH_EVENT(cm_exited_secure_world);
1247 	else
1248 		PUBLISH_EVENT(cm_exited_normal_world);
1249 #endif
1250 }
1251 
1252 void cm_el1_sysregs_context_restore(uint32_t security_state)
1253 {
1254 	cpu_context_t *ctx;
1255 
1256 	ctx = cm_get_context(security_state);
1257 	assert(ctx != NULL);
1258 
1259 	el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx));
1260 
1261 #if IMAGE_BL31
1262 	if (security_state == SECURE)
1263 		PUBLISH_EVENT(cm_entering_secure_world);
1264 	else
1265 		PUBLISH_EVENT(cm_entering_normal_world);
1266 #endif
1267 }
1268 
1269 /*******************************************************************************
1270  * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
1271  * given security state with the given entrypoint
1272  ******************************************************************************/
1273 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
1274 {
1275 	cpu_context_t *ctx;
1276 	el3_state_t *state;
1277 
1278 	ctx = cm_get_context(security_state);
1279 	assert(ctx != NULL);
1280 
1281 	/* Populate EL3 state so that ERET jumps to the correct entry */
1282 	state = get_el3state_ctx(ctx);
1283 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
1284 }
1285 
1286 /*******************************************************************************
1287  * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
1288  * pertaining to the given security state
1289  ******************************************************************************/
1290 void cm_set_elr_spsr_el3(uint32_t security_state,
1291 			uintptr_t entrypoint, uint32_t spsr)
1292 {
1293 	cpu_context_t *ctx;
1294 	el3_state_t *state;
1295 
1296 	ctx = cm_get_context(security_state);
1297 	assert(ctx != NULL);
1298 
1299 	/* Populate EL3 state so that ERET jumps to the correct entry */
1300 	state = get_el3state_ctx(ctx);
1301 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
1302 	write_ctx_reg(state, CTX_SPSR_EL3, spsr);
1303 }
1304 
1305 /*******************************************************************************
1306  * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
1307  * pertaining to the given security state using the value and bit position
1308  * specified in the parameters. It preserves all other bits.
1309  ******************************************************************************/
1310 void cm_write_scr_el3_bit(uint32_t security_state,
1311 			  uint32_t bit_pos,
1312 			  uint32_t value)
1313 {
1314 	cpu_context_t *ctx;
1315 	el3_state_t *state;
1316 	u_register_t scr_el3;
1317 
1318 	ctx = cm_get_context(security_state);
1319 	assert(ctx != NULL);
1320 
1321 	/* Ensure that the bit position is a valid one */
1322 	assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U);
1323 
1324 	/* Ensure that the 'value' is only a bit wide */
1325 	assert(value <= 1U);
1326 
1327 	/*
1328 	 * Get the SCR_EL3 value from the cpu context, clear the desired bit
1329 	 * and set it to its new value.
1330 	 */
1331 	state = get_el3state_ctx(ctx);
1332 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
1333 	scr_el3 &= ~(1UL << bit_pos);
1334 	scr_el3 |= (u_register_t)value << bit_pos;
1335 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
1336 }
1337 
1338 /*******************************************************************************
1339  * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
1340  * given security state.
1341  ******************************************************************************/
1342 u_register_t cm_get_scr_el3(uint32_t security_state)
1343 {
1344 	cpu_context_t *ctx;
1345 	el3_state_t *state;
1346 
1347 	ctx = cm_get_context(security_state);
1348 	assert(ctx != NULL);
1349 
1350 	/* Populate EL3 state so that ERET jumps to the correct entry */
1351 	state = get_el3state_ctx(ctx);
1352 	return read_ctx_reg(state, CTX_SCR_EL3);
1353 }
1354 
1355 /*******************************************************************************
1356  * This function is used to program the context that's used for exception
1357  * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
1358  * the required security state
1359  ******************************************************************************/
1360 void cm_set_next_eret_context(uint32_t security_state)
1361 {
1362 	cpu_context_t *ctx;
1363 
1364 	ctx = cm_get_context(security_state);
1365 	assert(ctx != NULL);
1366 
1367 	cm_set_next_context(ctx);
1368 }
1369