xref: /rk3399_ARM-atf/lib/el3_runtime/aarch64/context_mgmt.c (revision 9890eab5743629c10a3d7432cdb89b65e11c83b8)
1 /*
2  * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
3  * Copyright (c) 2022, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #include <assert.h>
9 #include <stdbool.h>
10 #include <string.h>
11 
12 #include <platform_def.h>
13 
14 #include <arch.h>
15 #include <arch_helpers.h>
16 #include <arch_features.h>
17 #include <bl31/interrupt_mgmt.h>
18 #include <common/bl_common.h>
19 #include <common/debug.h>
20 #include <context.h>
21 #include <drivers/arm/gicv3.h>
22 #include <lib/cpus/cpu_ops.h>
23 #include <lib/cpus/errata.h>
24 #include <lib/el3_runtime/context_mgmt.h>
25 #include <lib/el3_runtime/cpu_data.h>
26 #include <lib/el3_runtime/pubsub_events.h>
27 #include <lib/extensions/amu.h>
28 #include <lib/extensions/brbe.h>
29 #include <lib/extensions/debug_v8p9.h>
30 #include <lib/extensions/fgt2.h>
31 #include <lib/extensions/mpam.h>
32 #include <lib/extensions/pmuv3.h>
33 #include <lib/extensions/sme.h>
34 #include <lib/extensions/spe.h>
35 #include <lib/extensions/sve.h>
36 #include <lib/extensions/sys_reg_trace.h>
37 #include <lib/extensions/tcr2.h>
38 #include <lib/extensions/trbe.h>
39 #include <lib/extensions/trf.h>
40 #include <lib/utils.h>
41 
42 #if ENABLE_FEAT_TWED
43 /* Make sure delay value fits within the range(0-15) */
44 CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check);
45 #endif /* ENABLE_FEAT_TWED */
46 
47 per_world_context_t per_world_context[CPU_DATA_CONTEXT_NUM];
48 static bool has_secure_perworld_init;
49 
50 static void manage_extensions_common(cpu_context_t *ctx);
51 static void manage_extensions_nonsecure(cpu_context_t *ctx);
52 static void manage_extensions_secure(cpu_context_t *ctx);
53 static void manage_extensions_secure_per_world(void);
54 
55 #if ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS)))
56 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep)
57 {
58 	u_register_t sctlr_elx, actlr_elx;
59 
60 	/*
61 	 * Initialise SCTLR_EL1 to the reset value corresponding to the target
62 	 * execution state setting all fields rather than relying on the hw.
63 	 * Some fields have architecturally UNKNOWN reset values and these are
64 	 * set to zero.
65 	 *
66 	 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
67 	 *
68 	 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
69 	 * required by PSCI specification)
70 	 */
71 	sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL;
72 	if (GET_RW(ep->spsr) == MODE_RW_64) {
73 		sctlr_elx |= SCTLR_EL1_RES1;
74 	} else {
75 		/*
76 		 * If the target execution state is AArch32 then the following
77 		 * fields need to be set.
78 		 *
79 		 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE
80 		 *  instructions are not trapped to EL1.
81 		 *
82 		 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI
83 		 *  instructions are not trapped to EL1.
84 		 *
85 		 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the
86 		 *  CP15DMB, CP15DSB, and CP15ISB instructions.
87 		 */
88 		sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT
89 					| SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
90 	}
91 
92 	/*
93 	 * If workaround of errata 764081 for Cortex-A75 is used then set
94 	 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier.
95 	 */
96 	if (errata_a75_764081_applies()) {
97 		sctlr_elx |= SCTLR_IESB_BIT;
98 	}
99 
100 	/* Store the initialised SCTLR_EL1 value in the cpu_context */
101 	write_ctx_sctlr_el1_reg_errata(ctx, sctlr_elx);
102 
103 	/*
104 	 * Base the context ACTLR_EL1 on the current value, as it is
105 	 * implementation defined. The context restore process will write
106 	 * the value from the context to the actual register and can cause
107 	 * problems for processor cores that don't expect certain bits to
108 	 * be zero.
109 	 */
110 	actlr_elx = read_actlr_el1();
111 	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), actlr_el1, actlr_elx);
112 }
113 #endif /* (IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS)) */
114 
115 /******************************************************************************
116  * This function performs initializations that are specific to SECURE state
117  * and updates the cpu context specified by 'ctx'.
118  *****************************************************************************/
119 static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep)
120 {
121 	u_register_t scr_el3;
122 	el3_state_t *state;
123 
124 	state = get_el3state_ctx(ctx);
125 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
126 
127 #if defined(IMAGE_BL31) && !defined(SPD_spmd)
128 	/*
129 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
130 	 * indicated by the interrupt routing model for BL31.
131 	 */
132 	scr_el3 |= get_scr_el3_from_routing_model(SECURE);
133 #endif
134 
135 	/* Allow access to Allocation Tags when FEAT_MTE2 is implemented and enabled. */
136 	if (is_feat_mte2_supported()) {
137 		scr_el3 |= SCR_ATA_BIT;
138 	}
139 
140 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
141 
142 	/*
143 	 * Initialize EL1 context registers unless SPMC is running
144 	 * at S-EL2.
145 	 */
146 #if (!SPMD_SPM_AT_SEL2)
147 	setup_el1_context(ctx, ep);
148 #endif
149 
150 	manage_extensions_secure(ctx);
151 
152 	/**
153 	 * manage_extensions_secure_per_world api has to be executed once,
154 	 * as the registers getting initialised, maintain constant value across
155 	 * all the cpus for the secure world.
156 	 * Henceforth, this check ensures that the registers are initialised once
157 	 * and avoids re-initialization from multiple cores.
158 	 */
159 	if (!has_secure_perworld_init) {
160 		manage_extensions_secure_per_world();
161 	}
162 }
163 
164 #if ENABLE_RME
165 /******************************************************************************
166  * This function performs initializations that are specific to REALM state
167  * and updates the cpu context specified by 'ctx'.
168  *****************************************************************************/
169 static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep)
170 {
171 	u_register_t scr_el3;
172 	el3_state_t *state;
173 
174 	state = get_el3state_ctx(ctx);
175 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
176 
177 	scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT;
178 
179 	/* CSV2 version 2 and above */
180 	if (is_feat_csv2_2_supported()) {
181 		/* Enable access to the SCXTNUM_ELx registers. */
182 		scr_el3 |= SCR_EnSCXT_BIT;
183 	}
184 
185 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
186 }
187 #endif /* ENABLE_RME */
188 
189 /******************************************************************************
190  * This function performs initializations that are specific to NON-SECURE state
191  * and updates the cpu context specified by 'ctx'.
192  *****************************************************************************/
193 static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep)
194 {
195 	u_register_t scr_el3;
196 	el3_state_t *state;
197 
198 	state = get_el3state_ctx(ctx);
199 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
200 
201 	/* SCR_NS: Set the NS bit */
202 	scr_el3 |= SCR_NS_BIT;
203 
204 	/* Allow access to Allocation Tags when FEAT_MTE2 is implemented and enabled. */
205 	if (is_feat_mte2_supported()) {
206 		scr_el3 |= SCR_ATA_BIT;
207 	}
208 
209 #if !CTX_INCLUDE_PAUTH_REGS
210 	/*
211 	 * Pointer Authentication feature, if present, is always enabled by default
212 	 * for Non secure lower exception levels. We do not have an explicit
213 	 * flag to set it.
214 	 * CTX_INCLUDE_PAUTH_REGS flag, is explicitly used to enable for lower
215 	 * exception levels of secure and realm worlds.
216 	 *
217 	 * To prevent the leakage between the worlds during world switch,
218 	 * we enable it only for the non-secure world.
219 	 *
220 	 * If the Secure/realm world wants to use pointer authentication,
221 	 * CTX_INCLUDE_PAUTH_REGS must be explicitly set to 1, in which case
222 	 * it will be enabled globally for all the contexts.
223 	 *
224 	 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs
225 	 *  other than EL3
226 	 *
227 	 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other
228 	 *  than EL3
229 	 */
230 	scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
231 
232 #endif /* CTX_INCLUDE_PAUTH_REGS */
233 
234 #if HANDLE_EA_EL3_FIRST_NS
235 	/* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */
236 	scr_el3 |= SCR_EA_BIT;
237 #endif
238 
239 #if RAS_TRAP_NS_ERR_REC_ACCESS
240 	/*
241 	 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR
242 	 * and RAS ERX registers from EL1 and EL2(from any security state)
243 	 * are trapped to EL3.
244 	 * Set here to trap only for NS EL1/EL2
245 	 *
246 	 */
247 	scr_el3 |= SCR_TERR_BIT;
248 #endif
249 
250 	/* CSV2 version 2 and above */
251 	if (is_feat_csv2_2_supported()) {
252 		/* Enable access to the SCXTNUM_ELx registers. */
253 		scr_el3 |= SCR_EnSCXT_BIT;
254 	}
255 
256 #ifdef IMAGE_BL31
257 	/*
258 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
259 	 *  indicated by the interrupt routing model for BL31.
260 	 */
261 	scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE);
262 #endif
263 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
264 
265 	/* Initialize EL2 context registers */
266 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
267 
268 	/*
269 	 * Initialize SCTLR_EL2 context register with reset value.
270 	 */
271 	write_el2_ctx_common(get_el2_sysregs_ctx(ctx), sctlr_el2, SCTLR_EL2_RES1);
272 
273 	if (is_feat_hcx_supported()) {
274 		/*
275 		 * Initialize register HCRX_EL2 with its init value.
276 		 * As the value of HCRX_EL2 is UNKNOWN on reset, there is a
277 		 * chance that this can lead to unexpected behavior in lower
278 		 * ELs that have not been updated since the introduction of
279 		 * this feature if not properly initialized, especially when
280 		 * it comes to those bits that enable/disable traps.
281 		 */
282 		write_el2_ctx_hcx(get_el2_sysregs_ctx(ctx), hcrx_el2,
283 			HCRX_EL2_INIT_VAL);
284 	}
285 
286 	if (is_feat_fgt_supported()) {
287 		/*
288 		 * Initialize HFG*_EL2 registers with a default value so legacy
289 		 * systems unaware of FEAT_FGT do not get trapped due to their lack
290 		 * of initialization for this feature.
291 		 */
292 		write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgitr_el2,
293 			HFGITR_EL2_INIT_VAL);
294 		write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgrtr_el2,
295 			HFGRTR_EL2_INIT_VAL);
296 		write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgwtr_el2,
297 			HFGWTR_EL2_INIT_VAL);
298 	}
299 #else
300 	/* Initialize EL1 context registers */
301 	setup_el1_context(ctx, ep);
302 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
303 
304 	manage_extensions_nonsecure(ctx);
305 }
306 
307 /*******************************************************************************
308  * The following function performs initialization of the cpu_context 'ctx'
309  * for first use that is common to all security states, and sets the
310  * initial entrypoint state as specified by the entry_point_info structure.
311  *
312  * The EE and ST attributes are used to configure the endianness and secure
313  * timer availability for the new execution context.
314  ******************************************************************************/
315 static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
316 {
317 	u_register_t scr_el3;
318 	u_register_t mdcr_el3;
319 	el3_state_t *state;
320 	gp_regs_t *gp_regs;
321 
322 	state = get_el3state_ctx(ctx);
323 
324 	/* Clear any residual register values from the context */
325 	zeromem(ctx, sizeof(*ctx));
326 
327 	/*
328 	 * The lower-EL context is zeroed so that no stale values leak to a world.
329 	 * It is assumed that an all-zero lower-EL context is good enough for it
330 	 * to boot correctly. However, there are very few registers where this
331 	 * is not true and some values need to be recreated.
332 	 */
333 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
334 	el2_sysregs_t *el2_ctx = get_el2_sysregs_ctx(ctx);
335 
336 	/*
337 	 * These bits are set in the gicv3 driver. Losing them (especially the
338 	 * SRE bit) is problematic for all worlds. Henceforth recreate them.
339 	 */
340 	u_register_t icc_sre_el2_val = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT |
341 				   ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT;
342 	write_el2_ctx_common(el2_ctx, icc_sre_el2, icc_sre_el2_val);
343 
344 	/*
345 	 * The actlr_el2 register can be initialized in platform's reset handler
346 	 * and it may contain access control bits (e.g. CLUSTERPMUEN bit).
347 	 */
348 	write_el2_ctx_common(el2_ctx, actlr_el2, read_actlr_el2());
349 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
350 
351 	/* Start with a clean SCR_EL3 copy as all relevant values are set */
352 	scr_el3 = SCR_RESET_VAL;
353 
354 	/*
355 	 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
356 	 *  EL2, EL1 and EL0 are not trapped to EL3.
357 	 *
358 	 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
359 	 *  EL2, EL1 and EL0 are not trapped to EL3.
360 	 *
361 	 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
362 	 *  both Security states and both Execution states.
363 	 *
364 	 * SCR_EL3.SIF: Set to one to disable secure instruction execution from
365 	 *  Non-secure memory.
366 	 */
367 	scr_el3 &= ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT);
368 
369 	scr_el3 |= SCR_SIF_BIT;
370 
371 	/*
372 	 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next
373 	 *  Exception level as specified by SPSR.
374 	 */
375 	if (GET_RW(ep->spsr) == MODE_RW_64) {
376 		scr_el3 |= SCR_RW_BIT;
377 	}
378 
379 	/*
380 	 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical
381 	 * Secure timer registers to EL3, from AArch64 state only, if specified
382 	 * by the entrypoint attributes. If SEL2 is present and enabled, the ST
383 	 * bit always behaves as 1 (i.e. secure physical timer register access
384 	 * is not trapped)
385 	 */
386 	if (EP_GET_ST(ep->h.attr) != 0U) {
387 		scr_el3 |= SCR_ST_BIT;
388 	}
389 
390 	/*
391 	 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting
392 	 * SCR_EL3.HXEn.
393 	 */
394 	if (is_feat_hcx_supported()) {
395 		scr_el3 |= SCR_HXEn_BIT;
396 	}
397 
398 	/*
399 	 * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS
400 	 * registers are trapped to EL3.
401 	 */
402 #if ENABLE_FEAT_RNG_TRAP
403 	scr_el3 |= SCR_TRNDR_BIT;
404 #endif
405 
406 #if FAULT_INJECTION_SUPPORT
407 	/* Enable fault injection from lower ELs */
408 	scr_el3 |= SCR_FIEN_BIT;
409 #endif
410 
411 #if CTX_INCLUDE_PAUTH_REGS
412 	/*
413 	 * Enable Pointer Authentication globally for all the worlds.
414 	 *
415 	 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs
416 	 *  other than EL3
417 	 *
418 	 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other
419 	 *  than EL3
420 	 */
421 	scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
422 #endif /* CTX_INCLUDE_PAUTH_REGS */
423 
424 	/*
425 	 * SCR_EL3.TCR2EN: Enable access to TCR2_ELx for AArch64 if present.
426 	 */
427 	if (is_feat_tcr2_supported() && (GET_RW(ep->spsr) == MODE_RW_64)) {
428 		scr_el3 |= SCR_TCR2EN_BIT;
429 	}
430 
431 	/*
432 	 * SCR_EL3.PIEN: Enable permission indirection and overlay
433 	 * registers for AArch64 if present.
434 	 */
435 	if (is_feat_sxpie_supported() || is_feat_sxpoe_supported()) {
436 		scr_el3 |= SCR_PIEN_BIT;
437 	}
438 
439 	/*
440 	 * SCR_EL3.GCSEn: Enable GCS registers for AArch64 if present.
441 	 */
442 	if ((is_feat_gcs_supported()) && (GET_RW(ep->spsr) == MODE_RW_64)) {
443 		scr_el3 |= SCR_GCSEn_BIT;
444 	}
445 
446 	/*
447 	 * SCR_EL3.HCE: Enable HVC instructions if next execution state is
448 	 * AArch64 and next EL is EL2, or if next execution state is AArch32 and
449 	 * next mode is Hyp.
450 	 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the
451 	 * same conditions as HVC instructions and when the processor supports
452 	 * ARMv8.6-FGT.
453 	 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV)
454 	 * CNTPOFF_EL2 register under the same conditions as HVC instructions
455 	 * and when the processor supports ECV.
456 	 */
457 	if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2))
458 	    || ((GET_RW(ep->spsr) != MODE_RW_64)
459 		&& (GET_M32(ep->spsr) == MODE32_hyp))) {
460 		scr_el3 |= SCR_HCE_BIT;
461 
462 		if (is_feat_fgt_supported()) {
463 			scr_el3 |= SCR_FGTEN_BIT;
464 		}
465 
466 		if (is_feat_ecv_supported()) {
467 			scr_el3 |= SCR_ECVEN_BIT;
468 		}
469 	}
470 
471 	/* Enable WFE trap delay in SCR_EL3 if supported and configured */
472 	if (is_feat_twed_supported()) {
473 		/* Set delay in SCR_EL3 */
474 		scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT);
475 		scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK)
476 				<< SCR_TWEDEL_SHIFT);
477 
478 		/* Enable WFE delay */
479 		scr_el3 |= SCR_TWEDEn_BIT;
480 	}
481 
482 #if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2
483 	/* Enable S-EL2 if FEAT_SEL2 is implemented for all the contexts. */
484 	if (is_feat_sel2_supported()) {
485 		scr_el3 |= SCR_EEL2_BIT;
486 	}
487 #endif /* (IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2) */
488 
489 	/*
490 	 * Populate EL3 state so that we've the right context
491 	 * before doing ERET
492 	 */
493 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
494 	write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
495 	write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
496 
497 	/* Start with a clean MDCR_EL3 copy as all relevant values are set */
498 	mdcr_el3 = MDCR_EL3_RESET_VAL;
499 
500 	/* ---------------------------------------------------------------------
501 	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
502 	 * Some fields are architecturally UNKNOWN on reset.
503 	 *
504 	 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
505 	 *  Debug exceptions, other than Breakpoint Instruction exceptions, are
506 	 *  disabled from all ELs in Secure state.
507 	 *
508 	 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
509 	 *  privileged debug from S-EL1.
510 	 *
511 	 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
512 	 *  access to the powerdown debug registers do not trap to EL3.
513 	 *
514 	 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
515 	 *  debug registers, other than those registers that are controlled by
516 	 *  MDCR_EL3.TDOSA.
517 	 */
518 	mdcr_el3 |= ((MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE))
519 			& ~(MDCR_TDA_BIT | MDCR_TDOSA_BIT)) ;
520 	write_ctx_reg(state, CTX_MDCR_EL3, mdcr_el3);
521 
522 	/*
523 	 * Configure MDCR_EL3 register as applicable for each world
524 	 * (NS/Secure/Realm) context.
525 	 */
526 	manage_extensions_common(ctx);
527 
528 	/*
529 	 * Store the X0-X7 value from the entrypoint into the context
530 	 * Use memcpy as we are in control of the layout of the structures
531 	 */
532 	gp_regs = get_gpregs_ctx(ctx);
533 	memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
534 }
535 
536 /*******************************************************************************
537  * Context management library initialization routine. This library is used by
538  * runtime services to share pointers to 'cpu_context' structures for secure
539  * non-secure and realm states. Management of the structures and their associated
540  * memory is not done by the context management library e.g. the PSCI service
541  * manages the cpu context used for entry from and exit to the non-secure state.
542  * The Secure payload dispatcher service manages the context(s) corresponding to
543  * the secure state. It also uses this library to get access to the non-secure
544  * state cpu context pointers.
545  * Lastly, this library provides the API to make SP_EL3 point to the cpu context
546  * which will be used for programming an entry into a lower EL. The same context
547  * will be used to save state upon exception entry from that EL.
548  ******************************************************************************/
549 void __init cm_init(void)
550 {
551 	/*
552 	 * The context management library has only global data to initialize, but
553 	 * that will be done when the BSS is zeroed out.
554 	 */
555 }
556 
557 /*******************************************************************************
558  * This is the high-level function used to initialize the cpu_context 'ctx' for
559  * first use. It performs initializations that are common to all security states
560  * and initializations specific to the security state specified in 'ep'
561  ******************************************************************************/
562 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
563 {
564 	unsigned int security_state;
565 
566 	assert(ctx != NULL);
567 
568 	/*
569 	 * Perform initializations that are common
570 	 * to all security states
571 	 */
572 	setup_context_common(ctx, ep);
573 
574 	security_state = GET_SECURITY_STATE(ep->h.attr);
575 
576 	/* Perform security state specific initializations */
577 	switch (security_state) {
578 	case SECURE:
579 		setup_secure_context(ctx, ep);
580 		break;
581 #if ENABLE_RME
582 	case REALM:
583 		setup_realm_context(ctx, ep);
584 		break;
585 #endif
586 	case NON_SECURE:
587 		setup_ns_context(ctx, ep);
588 		break;
589 	default:
590 		ERROR("Invalid security state\n");
591 		panic();
592 		break;
593 	}
594 }
595 
596 /*******************************************************************************
597  * Enable architecture extensions for EL3 execution. This function only updates
598  * registers in-place which are expected to either never change or be
599  * overwritten by el3_exit.
600  ******************************************************************************/
601 #if IMAGE_BL31
602 void cm_manage_extensions_el3(void)
603 {
604 	if (is_feat_amu_supported()) {
605 		amu_init_el3();
606 	}
607 
608 	if (is_feat_sme_supported()) {
609 		sme_init_el3();
610 	}
611 
612 	pmuv3_init_el3();
613 }
614 #endif /* IMAGE_BL31 */
615 
616 /******************************************************************************
617  * Function to initialise the registers with the RESET values in the context
618  * memory, which are maintained per world.
619  ******************************************************************************/
620 #if IMAGE_BL31
621 void cm_el3_arch_init_per_world(per_world_context_t *per_world_ctx)
622 {
623 	/*
624 	 * Initialise CPTR_EL3, setting all fields rather than relying on hw.
625 	 *
626 	 * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
627 	 *  by Advanced SIMD, floating-point or SVE instructions (if
628 	 *  implemented) do not trap to EL3.
629 	 *
630 	 * CPTR_EL3.TCPAC: Set to zero so that accesses to CPACR_EL1,
631 	 *  CPTR_EL2,CPACR, or HCPTR do not trap to EL3.
632 	 */
633 	uint64_t cptr_el3 = CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TFP_BIT);
634 
635 	per_world_ctx->ctx_cptr_el3 = cptr_el3;
636 
637 	/*
638 	 * Initialize MPAM3_EL3 to its default reset value
639 	 *
640 	 * MPAM3_EL3_RESET_VAL sets the MPAM3_EL3.TRAPLOWER bit that forces
641 	 * all lower ELn MPAM3_EL3 register access to, trap to EL3
642 	 */
643 
644 	per_world_ctx->ctx_mpam3_el3 = MPAM3_EL3_RESET_VAL;
645 }
646 #endif /* IMAGE_BL31 */
647 
648 /*******************************************************************************
649  * Initialise per_world_context for Non-Secure world.
650  * This function enables the architecture extensions, which have same value
651  * across the cores for the non-secure world.
652  ******************************************************************************/
653 #if IMAGE_BL31
654 void manage_extensions_nonsecure_per_world(void)
655 {
656 	cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_NS]);
657 
658 	if (is_feat_sme_supported()) {
659 		sme_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
660 	}
661 
662 	if (is_feat_sve_supported()) {
663 		sve_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
664 	}
665 
666 	if (is_feat_amu_supported()) {
667 		amu_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
668 	}
669 
670 	if (is_feat_sys_reg_trace_supported()) {
671 		sys_reg_trace_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
672 	}
673 
674 	if (is_feat_mpam_supported()) {
675 		mpam_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
676 	}
677 }
678 #endif /* IMAGE_BL31 */
679 
680 /*******************************************************************************
681  * Initialise per_world_context for Secure world.
682  * This function enables the architecture extensions, which have same value
683  * across the cores for the secure world.
684  ******************************************************************************/
685 static void manage_extensions_secure_per_world(void)
686 {
687 #if IMAGE_BL31
688 	cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
689 
690 	if (is_feat_sme_supported()) {
691 
692 		if (ENABLE_SME_FOR_SWD) {
693 		/*
694 		 * Enable SME, SVE, FPU/SIMD in secure context, SPM must ensure
695 		 * SME, SVE, and FPU/SIMD context properly managed.
696 		 */
697 			sme_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
698 		} else {
699 		/*
700 		 * Disable SME, SVE, FPU/SIMD in secure context so non-secure
701 		 * world can safely use the associated registers.
702 		 */
703 			sme_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
704 		}
705 	}
706 	if (is_feat_sve_supported()) {
707 		if (ENABLE_SVE_FOR_SWD) {
708 		/*
709 		 * Enable SVE and FPU in secure context, SPM must ensure
710 		 * that the SVE and FPU register contexts are properly managed.
711 		 */
712 			sve_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
713 		} else {
714 		/*
715 		 * Disable SVE and FPU in secure context so non-secure world
716 		 * can safely use them.
717 		 */
718 			sve_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
719 		}
720 	}
721 
722 	/* NS can access this but Secure shouldn't */
723 	if (is_feat_sys_reg_trace_supported()) {
724 		sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
725 	}
726 
727 	has_secure_perworld_init = true;
728 #endif /* IMAGE_BL31 */
729 }
730 
731 /*******************************************************************************
732  * Enable architecture extensions on first entry to Non-secure world only
733  * and disable for secure world.
734  *
735  * NOTE: Arch features which have been provided with the capability of getting
736  * enabled only for non-secure world and being disabled for secure world are
737  * grouped here, as the MDCR_EL3 context value remains same across the worlds.
738  ******************************************************************************/
739 static void manage_extensions_common(cpu_context_t *ctx)
740 {
741 #if IMAGE_BL31
742 	if (is_feat_spe_supported()) {
743 		/*
744 		 * Enable FEAT_SPE for Non-Secure and prohibit for Secure state.
745 		 */
746 		spe_enable(ctx);
747 	}
748 
749 	if (is_feat_trbe_supported()) {
750 		/*
751 		 * Enable FEAT_TRBE for Non-Secure and prohibit for Secure and
752 		 * Realm state.
753 		 */
754 		trbe_enable(ctx);
755 	}
756 
757 	if (is_feat_trf_supported()) {
758 		/*
759 		 * Enable FEAT_TRF for Non-Secure and prohibit for Secure state.
760 		 */
761 		trf_enable(ctx);
762 	}
763 #endif /* IMAGE_BL31 */
764 }
765 
766 /*******************************************************************************
767  * Enable architecture extensions on first entry to Non-secure world.
768  ******************************************************************************/
769 static void manage_extensions_nonsecure(cpu_context_t *ctx)
770 {
771 #if IMAGE_BL31
772 	if (is_feat_amu_supported()) {
773 		amu_enable(ctx);
774 	}
775 
776 	if (is_feat_sme_supported()) {
777 		sme_enable(ctx);
778 	}
779 
780 	if (is_feat_fgt2_supported()) {
781 		fgt2_enable(ctx);
782 	}
783 
784 	if (is_feat_debugv8p9_supported()) {
785 		debugv8p9_extended_bp_wp_enable(ctx);
786 	}
787 
788 	if (is_feat_brbe_supported()) {
789 		brbe_enable(ctx);
790 	}
791 
792 	pmuv3_enable(ctx);
793 #endif /* IMAGE_BL31 */
794 }
795 
796 /* TODO: move to lib/extensions/pauth when it has been ported to FEAT_STATE */
797 static __unused void enable_pauth_el2(void)
798 {
799 	u_register_t hcr_el2 = read_hcr_el2();
800 	/*
801 	 * For Armv8.3 pointer authentication feature, disable traps to EL2 when
802 	 *  accessing key registers or using pointer authentication instructions
803 	 *  from lower ELs.
804 	 */
805 	hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT);
806 
807 	write_hcr_el2(hcr_el2);
808 }
809 
810 #if INIT_UNUSED_NS_EL2
811 /*******************************************************************************
812  * Enable architecture extensions in-place at EL2 on first entry to Non-secure
813  * world when EL2 is empty and unused.
814  ******************************************************************************/
815 static void manage_extensions_nonsecure_el2_unused(void)
816 {
817 #if IMAGE_BL31
818 	if (is_feat_spe_supported()) {
819 		spe_init_el2_unused();
820 	}
821 
822 	if (is_feat_amu_supported()) {
823 		amu_init_el2_unused();
824 	}
825 
826 	if (is_feat_mpam_supported()) {
827 		mpam_init_el2_unused();
828 	}
829 
830 	if (is_feat_trbe_supported()) {
831 		trbe_init_el2_unused();
832 	}
833 
834 	if (is_feat_sys_reg_trace_supported()) {
835 		sys_reg_trace_init_el2_unused();
836 	}
837 
838 	if (is_feat_trf_supported()) {
839 		trf_init_el2_unused();
840 	}
841 
842 	pmuv3_init_el2_unused();
843 
844 	if (is_feat_sve_supported()) {
845 		sve_init_el2_unused();
846 	}
847 
848 	if (is_feat_sme_supported()) {
849 		sme_init_el2_unused();
850 	}
851 
852 #if ENABLE_PAUTH
853 	enable_pauth_el2();
854 #endif /* ENABLE_PAUTH */
855 #endif /* IMAGE_BL31 */
856 }
857 #endif /* INIT_UNUSED_NS_EL2 */
858 
859 /*******************************************************************************
860  * Enable architecture extensions on first entry to Secure world.
861  ******************************************************************************/
862 static void manage_extensions_secure(cpu_context_t *ctx)
863 {
864 #if IMAGE_BL31
865 	if (is_feat_sme_supported()) {
866 		if (ENABLE_SME_FOR_SWD) {
867 		/*
868 		 * Enable SME, SVE, FPU/SIMD in secure context, secure manager
869 		 * must ensure SME, SVE, and FPU/SIMD context properly managed.
870 		 */
871 			sme_init_el3();
872 			sme_enable(ctx);
873 		} else {
874 		/*
875 		 * Disable SME, SVE, FPU/SIMD in secure context so non-secure
876 		 * world can safely use the associated registers.
877 		 */
878 			sme_disable(ctx);
879 		}
880 	}
881 #endif /* IMAGE_BL31 */
882 }
883 
884 #if !IMAGE_BL1
885 /*******************************************************************************
886  * The following function initializes the cpu_context for a CPU specified by
887  * its `cpu_idx` for first use, and sets the initial entrypoint state as
888  * specified by the entry_point_info structure.
889  ******************************************************************************/
890 void cm_init_context_by_index(unsigned int cpu_idx,
891 			      const entry_point_info_t *ep)
892 {
893 	cpu_context_t *ctx;
894 	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
895 	cm_setup_context(ctx, ep);
896 }
897 #endif /* !IMAGE_BL1 */
898 
899 /*******************************************************************************
900  * The following function initializes the cpu_context for the current CPU
901  * for first use, and sets the initial entrypoint state as specified by the
902  * entry_point_info structure.
903  ******************************************************************************/
904 void cm_init_my_context(const entry_point_info_t *ep)
905 {
906 	cpu_context_t *ctx;
907 	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
908 	cm_setup_context(ctx, ep);
909 }
910 
911 /* EL2 present but unused, need to disable safely. SCTLR_EL2 can be ignored */
912 static void init_nonsecure_el2_unused(cpu_context_t *ctx)
913 {
914 #if INIT_UNUSED_NS_EL2
915 	u_register_t hcr_el2 = HCR_RESET_VAL;
916 	u_register_t mdcr_el2;
917 	u_register_t scr_el3;
918 
919 	scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
920 
921 	/* Set EL2 register width: Set HCR_EL2.RW to match SCR_EL3.RW */
922 	if ((scr_el3 & SCR_RW_BIT) != 0U) {
923 		hcr_el2 |= HCR_RW_BIT;
924 	}
925 
926 	write_hcr_el2(hcr_el2);
927 
928 	/*
929 	 * Initialise CPTR_EL2 setting all fields rather than relying on the hw.
930 	 * All fields have architecturally UNKNOWN reset values.
931 	 */
932 	write_cptr_el2(CPTR_EL2_RESET_VAL);
933 
934 	/*
935 	 * Initialise CNTHCTL_EL2. All fields are architecturally UNKNOWN on
936 	 * reset and are set to zero except for field(s) listed below.
937 	 *
938 	 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to Hyp mode of
939 	 * Non-secure EL0 and EL1 accesses to the physical timer registers.
940 	 *
941 	 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to Hyp mode of
942 	 * Non-secure EL0 and EL1 accesses to the physical counter registers.
943 	 */
944 	write_cnthctl_el2(CNTHCTL_RESET_VAL | EL1PCEN_BIT | EL1PCTEN_BIT);
945 
946 	/*
947 	 * Initialise CNTVOFF_EL2 to zero as it resets to an architecturally
948 	 * UNKNOWN value.
949 	 */
950 	write_cntvoff_el2(0);
951 
952 	/*
953 	 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and MPIDR_EL1
954 	 * respectively.
955 	 */
956 	write_vpidr_el2(read_midr_el1());
957 	write_vmpidr_el2(read_mpidr_el1());
958 
959 	/*
960 	 * Initialise VTTBR_EL2. All fields are architecturally UNKNOWN on reset.
961 	 *
962 	 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage 2 address
963 	 * translation is disabled, cache maintenance operations depend on the
964 	 * VMID.
965 	 *
966 	 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address translation is
967 	 * disabled.
968 	 */
969 	write_vttbr_el2(VTTBR_RESET_VAL &
970 		     ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) |
971 		       (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
972 
973 	/*
974 	 * Initialise MDCR_EL2, setting all fields rather than relying on hw.
975 	 * Some fields are architecturally UNKNOWN on reset.
976 	 *
977 	 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and EL1 System
978 	 * register accesses to the Debug ROM registers are not trapped to EL2.
979 	 *
980 	 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 System register
981 	 * accesses to the powerdown debug registers are not trapped to EL2.
982 	 *
983 	 * MDCR_EL2.TDA: Set to zero so that System register accesses to the
984 	 * debug registers do not trap to EL2.
985 	 *
986 	 * MDCR_EL2.TDE: Set to zero so that debug exceptions are not routed to
987 	 * EL2.
988 	 */
989 	mdcr_el2 = MDCR_EL2_RESET_VAL &
990 		 ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | MDCR_EL2_TDA_BIT |
991 		   MDCR_EL2_TDE_BIT);
992 
993 	write_mdcr_el2(mdcr_el2);
994 
995 	/*
996 	 * Initialise HSTR_EL2. All fields are architecturally UNKNOWN on reset.
997 	 *
998 	 * HSTR_EL2.T<n>: Set all these fields to zero so that Non-secure EL0 or
999 	 * EL1 accesses to System registers do not trap to EL2.
1000 	 */
1001 	write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK));
1002 
1003 	/*
1004 	 * Initialise CNTHP_CTL_EL2. All fields are architecturally UNKNOWN on
1005 	 * reset.
1006 	 *
1007 	 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 physical timer
1008 	 * and prevent timer interrupts.
1009 	 */
1010 	write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & ~(CNTHP_CTL_ENABLE_BIT));
1011 
1012 	manage_extensions_nonsecure_el2_unused();
1013 #endif /* INIT_UNUSED_NS_EL2 */
1014 }
1015 
1016 /*******************************************************************************
1017  * Prepare the CPU system registers for first entry into realm, secure, or
1018  * normal world.
1019  *
1020  * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
1021  * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
1022  * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
1023  * For all entries, the EL1 registers are initialized from the cpu_context
1024  ******************************************************************************/
1025 void cm_prepare_el3_exit(uint32_t security_state)
1026 {
1027 	u_register_t sctlr_el2, scr_el3;
1028 	cpu_context_t *ctx = cm_get_context(security_state);
1029 
1030 	assert(ctx != NULL);
1031 
1032 	if (security_state == NON_SECURE) {
1033 		uint64_t el2_implemented = el_implemented(2);
1034 
1035 		scr_el3 = read_ctx_reg(get_el3state_ctx(ctx),
1036 						 CTX_SCR_EL3);
1037 
1038 		if (el2_implemented != EL_IMPL_NONE) {
1039 
1040 			/*
1041 			 * If context is not being used for EL2, initialize
1042 			 * HCRX_EL2 with its init value here.
1043 			 */
1044 			if (is_feat_hcx_supported()) {
1045 				write_hcrx_el2(HCRX_EL2_INIT_VAL);
1046 			}
1047 
1048 			/*
1049 			 * Initialize Fine-grained trap registers introduced
1050 			 * by FEAT_FGT so all traps are initially disabled when
1051 			 * switching to EL2 or a lower EL, preventing undesired
1052 			 * behavior.
1053 			 */
1054 			if (is_feat_fgt_supported()) {
1055 				/*
1056 				 * Initialize HFG*_EL2 registers with a default
1057 				 * value so legacy systems unaware of FEAT_FGT
1058 				 * do not get trapped due to their lack of
1059 				 * initialization for this feature.
1060 				 */
1061 				write_hfgitr_el2(HFGITR_EL2_INIT_VAL);
1062 				write_hfgrtr_el2(HFGRTR_EL2_INIT_VAL);
1063 				write_hfgwtr_el2(HFGWTR_EL2_INIT_VAL);
1064 			}
1065 
1066 			/* Condition to ensure EL2 is being used. */
1067 			if ((scr_el3 & SCR_HCE_BIT) != 0U) {
1068 				/* Initialize SCTLR_EL2 register with reset value. */
1069 				sctlr_el2 = SCTLR_EL2_RES1;
1070 
1071 				/*
1072 				 * If workaround of errata 764081 for Cortex-A75
1073 				 * is used then set SCTLR_EL2.IESB to enable
1074 				 * Implicit Error Synchronization Barrier.
1075 				 */
1076 				if (errata_a75_764081_applies()) {
1077 					sctlr_el2 |= SCTLR_IESB_BIT;
1078 				}
1079 
1080 				write_sctlr_el2(sctlr_el2);
1081 			} else {
1082 				/*
1083 				 * (scr_el3 & SCR_HCE_BIT==0)
1084 				 * EL2 implemented but unused.
1085 				 */
1086 				init_nonsecure_el2_unused(ctx);
1087 			}
1088 		}
1089 	}
1090 #if (!CTX_INCLUDE_EL2_REGS)
1091 	/* Restore EL1 system registers, only when CTX_INCLUDE_EL2_REGS=0 */
1092 	cm_el1_sysregs_context_restore(security_state);
1093 #endif
1094 	cm_set_next_eret_context(security_state);
1095 }
1096 
1097 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
1098 
1099 static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx)
1100 {
1101 	write_el2_ctx_fgt(ctx, hdfgrtr_el2, read_hdfgrtr_el2());
1102 	if (is_feat_amu_supported()) {
1103 		write_el2_ctx_fgt(ctx, hafgrtr_el2, read_hafgrtr_el2());
1104 	}
1105 	write_el2_ctx_fgt(ctx, hdfgwtr_el2, read_hdfgwtr_el2());
1106 	write_el2_ctx_fgt(ctx, hfgitr_el2, read_hfgitr_el2());
1107 	write_el2_ctx_fgt(ctx, hfgrtr_el2, read_hfgrtr_el2());
1108 	write_el2_ctx_fgt(ctx, hfgwtr_el2, read_hfgwtr_el2());
1109 }
1110 
1111 static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx)
1112 {
1113 	write_hdfgrtr_el2(read_el2_ctx_fgt(ctx, hdfgrtr_el2));
1114 	if (is_feat_amu_supported()) {
1115 		write_hafgrtr_el2(read_el2_ctx_fgt(ctx, hafgrtr_el2));
1116 	}
1117 	write_hdfgwtr_el2(read_el2_ctx_fgt(ctx, hdfgwtr_el2));
1118 	write_hfgitr_el2(read_el2_ctx_fgt(ctx, hfgitr_el2));
1119 	write_hfgrtr_el2(read_el2_ctx_fgt(ctx, hfgrtr_el2));
1120 	write_hfgwtr_el2(read_el2_ctx_fgt(ctx, hfgwtr_el2));
1121 }
1122 
1123 static void el2_sysregs_context_save_fgt2(el2_sysregs_t *ctx)
1124 {
1125 	write_el2_ctx_fgt2(ctx, hdfgrtr2_el2, read_hdfgrtr2_el2());
1126 	write_el2_ctx_fgt2(ctx, hdfgwtr2_el2, read_hdfgwtr2_el2());
1127 	write_el2_ctx_fgt2(ctx, hfgitr2_el2, read_hfgitr2_el2());
1128 	write_el2_ctx_fgt2(ctx, hfgrtr2_el2, read_hfgrtr2_el2());
1129 	write_el2_ctx_fgt2(ctx, hfgwtr2_el2, read_hfgwtr2_el2());
1130 }
1131 
1132 static void el2_sysregs_context_restore_fgt2(el2_sysregs_t *ctx)
1133 {
1134 	write_hdfgrtr2_el2(read_el2_ctx_fgt2(ctx, hdfgrtr2_el2));
1135 	write_hdfgwtr2_el2(read_el2_ctx_fgt2(ctx, hdfgwtr2_el2));
1136 	write_hfgitr2_el2(read_el2_ctx_fgt2(ctx, hfgitr2_el2));
1137 	write_hfgrtr2_el2(read_el2_ctx_fgt2(ctx, hfgrtr2_el2));
1138 	write_hfgwtr2_el2(read_el2_ctx_fgt2(ctx, hfgwtr2_el2));
1139 }
1140 
1141 static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx)
1142 {
1143 	u_register_t mpam_idr = read_mpamidr_el1();
1144 
1145 	write_el2_ctx_mpam(ctx, mpam2_el2, read_mpam2_el2());
1146 
1147 	/*
1148 	 * The context registers that we intend to save would be part of the
1149 	 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
1150 	 */
1151 	if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
1152 		return;
1153 	}
1154 
1155 	/*
1156 	 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if
1157 	 * MPAMIDR_HAS_HCR_BIT == 1.
1158 	 */
1159 	write_el2_ctx_mpam(ctx, mpamhcr_el2, read_mpamhcr_el2());
1160 	write_el2_ctx_mpam(ctx, mpamvpm0_el2, read_mpamvpm0_el2());
1161 	write_el2_ctx_mpam(ctx, mpamvpmv_el2, read_mpamvpmv_el2());
1162 
1163 	/*
1164 	 * The number of MPAMVPM registers is implementation defined, their
1165 	 * number is stored in the MPAMIDR_EL1 register.
1166 	 */
1167 	switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
1168 	case 7:
1169 		write_el2_ctx_mpam(ctx, mpamvpm7_el2, read_mpamvpm7_el2());
1170 		__fallthrough;
1171 	case 6:
1172 		write_el2_ctx_mpam(ctx, mpamvpm6_el2, read_mpamvpm6_el2());
1173 		__fallthrough;
1174 	case 5:
1175 		write_el2_ctx_mpam(ctx, mpamvpm5_el2, read_mpamvpm5_el2());
1176 		__fallthrough;
1177 	case 4:
1178 		write_el2_ctx_mpam(ctx, mpamvpm4_el2, read_mpamvpm4_el2());
1179 		__fallthrough;
1180 	case 3:
1181 		write_el2_ctx_mpam(ctx, mpamvpm3_el2, read_mpamvpm3_el2());
1182 		__fallthrough;
1183 	case 2:
1184 		write_el2_ctx_mpam(ctx, mpamvpm2_el2, read_mpamvpm2_el2());
1185 		__fallthrough;
1186 	case 1:
1187 		write_el2_ctx_mpam(ctx, mpamvpm1_el2, read_mpamvpm1_el2());
1188 		break;
1189 	}
1190 }
1191 
1192 static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx)
1193 {
1194 	u_register_t mpam_idr = read_mpamidr_el1();
1195 
1196 	write_mpam2_el2(read_el2_ctx_mpam(ctx, mpam2_el2));
1197 
1198 	if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
1199 		return;
1200 	}
1201 
1202 	write_mpamhcr_el2(read_el2_ctx_mpam(ctx, mpamhcr_el2));
1203 	write_mpamvpm0_el2(read_el2_ctx_mpam(ctx, mpamvpm0_el2));
1204 	write_mpamvpmv_el2(read_el2_ctx_mpam(ctx, mpamvpmv_el2));
1205 
1206 	switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
1207 	case 7:
1208 		write_mpamvpm7_el2(read_el2_ctx_mpam(ctx, mpamvpm7_el2));
1209 		__fallthrough;
1210 	case 6:
1211 		write_mpamvpm6_el2(read_el2_ctx_mpam(ctx, mpamvpm6_el2));
1212 		__fallthrough;
1213 	case 5:
1214 		write_mpamvpm5_el2(read_el2_ctx_mpam(ctx, mpamvpm5_el2));
1215 		__fallthrough;
1216 	case 4:
1217 		write_mpamvpm4_el2(read_el2_ctx_mpam(ctx, mpamvpm4_el2));
1218 		__fallthrough;
1219 	case 3:
1220 		write_mpamvpm3_el2(read_el2_ctx_mpam(ctx, mpamvpm3_el2));
1221 		__fallthrough;
1222 	case 2:
1223 		write_mpamvpm2_el2(read_el2_ctx_mpam(ctx, mpamvpm2_el2));
1224 		__fallthrough;
1225 	case 1:
1226 		write_mpamvpm1_el2(read_el2_ctx_mpam(ctx, mpamvpm1_el2));
1227 		break;
1228 	}
1229 }
1230 
1231 /* ---------------------------------------------------------------------------
1232  * The following registers are not added:
1233  * ICH_AP0R<n>_EL2
1234  * ICH_AP1R<n>_EL2
1235  * ICH_LR<n>_EL2
1236  *
1237  * NOTE: For a system with S-EL2 present but not enabled, accessing
1238  * ICC_SRE_EL2 is undefined from EL3. To workaround this change the
1239  * SCR_EL3.NS = 1 before accessing this register.
1240  * ---------------------------------------------------------------------------
1241  */
1242 static void el2_sysregs_context_save_gic(el2_sysregs_t *ctx)
1243 {
1244 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
1245 	write_el2_ctx_common(ctx, icc_sre_el2, read_icc_sre_el2());
1246 #else
1247 	u_register_t scr_el3 = read_scr_el3();
1248 	write_scr_el3(scr_el3 | SCR_NS_BIT);
1249 	isb();
1250 
1251 	write_el2_ctx_common(ctx, icc_sre_el2, read_icc_sre_el2());
1252 
1253 	write_scr_el3(scr_el3);
1254 	isb();
1255 #endif
1256 	write_el2_ctx_common(ctx, ich_hcr_el2, read_ich_hcr_el2());
1257 	write_el2_ctx_common(ctx, ich_vmcr_el2, read_ich_vmcr_el2());
1258 }
1259 
1260 static void el2_sysregs_context_restore_gic(el2_sysregs_t *ctx)
1261 {
1262 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
1263 	write_icc_sre_el2(read_el2_ctx_common(ctx, icc_sre_el2));
1264 #else
1265 	u_register_t scr_el3 = read_scr_el3();
1266 	write_scr_el3(scr_el3 | SCR_NS_BIT);
1267 	isb();
1268 
1269 	write_icc_sre_el2(read_el2_ctx_common(ctx, icc_sre_el2));
1270 
1271 	write_scr_el3(scr_el3);
1272 	isb();
1273 #endif
1274 	write_ich_hcr_el2(read_el2_ctx_common(ctx, ich_hcr_el2));
1275 	write_ich_vmcr_el2(read_el2_ctx_common(ctx, ich_vmcr_el2));
1276 }
1277 
1278 /* -----------------------------------------------------
1279  * The following registers are not added:
1280  * AMEVCNTVOFF0<n>_EL2
1281  * AMEVCNTVOFF1<n>_EL2
1282  * -----------------------------------------------------
1283  */
1284 static void el2_sysregs_context_save_common(el2_sysregs_t *ctx)
1285 {
1286 	write_el2_ctx_common(ctx, actlr_el2, read_actlr_el2());
1287 	write_el2_ctx_common(ctx, afsr0_el2, read_afsr0_el2());
1288 	write_el2_ctx_common(ctx, afsr1_el2, read_afsr1_el2());
1289 	write_el2_ctx_common(ctx, amair_el2, read_amair_el2());
1290 	write_el2_ctx_common(ctx, cnthctl_el2, read_cnthctl_el2());
1291 	write_el2_ctx_common(ctx, cntvoff_el2, read_cntvoff_el2());
1292 	write_el2_ctx_common(ctx, cptr_el2, read_cptr_el2());
1293 	if (CTX_INCLUDE_AARCH32_REGS) {
1294 		write_el2_ctx_common(ctx, dbgvcr32_el2, read_dbgvcr32_el2());
1295 	}
1296 	write_el2_ctx_common(ctx, elr_el2, read_elr_el2());
1297 	write_el2_ctx_common(ctx, esr_el2, read_esr_el2());
1298 	write_el2_ctx_common(ctx, far_el2, read_far_el2());
1299 	write_el2_ctx_common(ctx, hacr_el2, read_hacr_el2());
1300 	write_el2_ctx_common(ctx, hcr_el2, read_hcr_el2());
1301 	write_el2_ctx_common(ctx, hpfar_el2, read_hpfar_el2());
1302 	write_el2_ctx_common(ctx, hstr_el2, read_hstr_el2());
1303 	write_el2_ctx_common(ctx, mair_el2, read_mair_el2());
1304 	write_el2_ctx_common(ctx, mdcr_el2, read_mdcr_el2());
1305 	write_el2_ctx_common(ctx, sctlr_el2, read_sctlr_el2());
1306 	write_el2_ctx_common(ctx, spsr_el2, read_spsr_el2());
1307 	write_el2_ctx_common(ctx, sp_el2, read_sp_el2());
1308 	write_el2_ctx_common(ctx, tcr_el2, read_tcr_el2());
1309 	write_el2_ctx_common(ctx, tpidr_el2, read_tpidr_el2());
1310 	write_el2_ctx_common(ctx, ttbr0_el2, read_ttbr0_el2());
1311 	write_el2_ctx_common(ctx, vbar_el2, read_vbar_el2());
1312 	write_el2_ctx_common(ctx, vmpidr_el2, read_vmpidr_el2());
1313 	write_el2_ctx_common(ctx, vpidr_el2, read_vpidr_el2());
1314 	write_el2_ctx_common(ctx, vtcr_el2, read_vtcr_el2());
1315 	write_el2_ctx_common(ctx, vttbr_el2, read_vttbr_el2());
1316 }
1317 
1318 static void el2_sysregs_context_restore_common(el2_sysregs_t *ctx)
1319 {
1320 	write_actlr_el2(read_el2_ctx_common(ctx, actlr_el2));
1321 	write_afsr0_el2(read_el2_ctx_common(ctx, afsr0_el2));
1322 	write_afsr1_el2(read_el2_ctx_common(ctx, afsr1_el2));
1323 	write_amair_el2(read_el2_ctx_common(ctx, amair_el2));
1324 	write_cnthctl_el2(read_el2_ctx_common(ctx, cnthctl_el2));
1325 	write_cntvoff_el2(read_el2_ctx_common(ctx, cntvoff_el2));
1326 	write_cptr_el2(read_el2_ctx_common(ctx, cptr_el2));
1327 	if (CTX_INCLUDE_AARCH32_REGS) {
1328 		write_dbgvcr32_el2(read_el2_ctx_common(ctx, dbgvcr32_el2));
1329 	}
1330 	write_elr_el2(read_el2_ctx_common(ctx, elr_el2));
1331 	write_esr_el2(read_el2_ctx_common(ctx, esr_el2));
1332 	write_far_el2(read_el2_ctx_common(ctx, far_el2));
1333 	write_hacr_el2(read_el2_ctx_common(ctx, hacr_el2));
1334 	write_hcr_el2(read_el2_ctx_common(ctx, hcr_el2));
1335 	write_hpfar_el2(read_el2_ctx_common(ctx, hpfar_el2));
1336 	write_hstr_el2(read_el2_ctx_common(ctx, hstr_el2));
1337 	write_mair_el2(read_el2_ctx_common(ctx, mair_el2));
1338 	write_mdcr_el2(read_el2_ctx_common(ctx, mdcr_el2));
1339 	write_sctlr_el2(read_el2_ctx_common(ctx, sctlr_el2));
1340 	write_spsr_el2(read_el2_ctx_common(ctx, spsr_el2));
1341 	write_sp_el2(read_el2_ctx_common(ctx, sp_el2));
1342 	write_tcr_el2(read_el2_ctx_common(ctx, tcr_el2));
1343 	write_tpidr_el2(read_el2_ctx_common(ctx, tpidr_el2));
1344 	write_ttbr0_el2(read_el2_ctx_common(ctx, ttbr0_el2));
1345 	write_vbar_el2(read_el2_ctx_common(ctx, vbar_el2));
1346 	write_vmpidr_el2(read_el2_ctx_common(ctx, vmpidr_el2));
1347 	write_vpidr_el2(read_el2_ctx_common(ctx, vpidr_el2));
1348 	write_vtcr_el2(read_el2_ctx_common(ctx, vtcr_el2));
1349 	write_vttbr_el2(read_el2_ctx_common(ctx, vttbr_el2));
1350 }
1351 
1352 /*******************************************************************************
1353  * Save EL2 sysreg context
1354  ******************************************************************************/
1355 void cm_el2_sysregs_context_save(uint32_t security_state)
1356 {
1357 	cpu_context_t *ctx;
1358 	el2_sysregs_t *el2_sysregs_ctx;
1359 
1360 	ctx = cm_get_context(security_state);
1361 	assert(ctx != NULL);
1362 
1363 	el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
1364 
1365 	el2_sysregs_context_save_common(el2_sysregs_ctx);
1366 	el2_sysregs_context_save_gic(el2_sysregs_ctx);
1367 
1368 	if (is_feat_mte2_supported()) {
1369 		write_el2_ctx_mte2(el2_sysregs_ctx, tfsr_el2, read_tfsr_el2());
1370 	}
1371 
1372 	if (is_feat_mpam_supported()) {
1373 		el2_sysregs_context_save_mpam(el2_sysregs_ctx);
1374 	}
1375 
1376 	if (is_feat_fgt_supported()) {
1377 		el2_sysregs_context_save_fgt(el2_sysregs_ctx);
1378 	}
1379 
1380 	if (is_feat_fgt2_supported()) {
1381 		el2_sysregs_context_save_fgt2(el2_sysregs_ctx);
1382 	}
1383 
1384 	if (is_feat_ecv_v2_supported()) {
1385 		write_el2_ctx_ecv(el2_sysregs_ctx, cntpoff_el2, read_cntpoff_el2());
1386 	}
1387 
1388 	if (is_feat_vhe_supported()) {
1389 		write_el2_ctx_vhe(el2_sysregs_ctx, contextidr_el2,
1390 					read_contextidr_el2());
1391 		write_el2_ctx_vhe(el2_sysregs_ctx, ttbr1_el2, read_ttbr1_el2());
1392 	}
1393 
1394 	if (is_feat_ras_supported()) {
1395 		write_el2_ctx_ras(el2_sysregs_ctx, vdisr_el2, read_vdisr_el2());
1396 		write_el2_ctx_ras(el2_sysregs_ctx, vsesr_el2, read_vsesr_el2());
1397 	}
1398 
1399 	if (is_feat_nv2_supported()) {
1400 		write_el2_ctx_neve(el2_sysregs_ctx, vncr_el2, read_vncr_el2());
1401 	}
1402 
1403 	if (is_feat_trf_supported()) {
1404 		write_el2_ctx_trf(el2_sysregs_ctx, trfcr_el2, read_trfcr_el2());
1405 	}
1406 
1407 	if (is_feat_csv2_2_supported()) {
1408 		write_el2_ctx_csv2_2(el2_sysregs_ctx, scxtnum_el2,
1409 					read_scxtnum_el2());
1410 	}
1411 
1412 	if (is_feat_hcx_supported()) {
1413 		write_el2_ctx_hcx(el2_sysregs_ctx, hcrx_el2, read_hcrx_el2());
1414 	}
1415 
1416 	if (is_feat_tcr2_supported()) {
1417 		write_el2_ctx_tcr2(el2_sysregs_ctx, tcr2_el2, read_tcr2_el2());
1418 	}
1419 
1420 	if (is_feat_sxpie_supported()) {
1421 		write_el2_ctx_sxpie(el2_sysregs_ctx, pire0_el2, read_pire0_el2());
1422 		write_el2_ctx_sxpie(el2_sysregs_ctx, pir_el2, read_pir_el2());
1423 	}
1424 
1425 	if (is_feat_sxpoe_supported()) {
1426 		write_el2_ctx_sxpoe(el2_sysregs_ctx, por_el2, read_por_el2());
1427 	}
1428 
1429 	if (is_feat_s2pie_supported()) {
1430 		write_el2_ctx_s2pie(el2_sysregs_ctx, s2pir_el2, read_s2pir_el2());
1431 	}
1432 
1433 	if (is_feat_gcs_supported()) {
1434 		write_el2_ctx_gcs(el2_sysregs_ctx, gcscr_el2, read_gcscr_el2());
1435 		write_el2_ctx_gcs(el2_sysregs_ctx, gcspr_el2, read_gcspr_el2());
1436 	}
1437 }
1438 
1439 /*******************************************************************************
1440  * Restore EL2 sysreg context
1441  ******************************************************************************/
1442 void cm_el2_sysregs_context_restore(uint32_t security_state)
1443 {
1444 	cpu_context_t *ctx;
1445 	el2_sysregs_t *el2_sysregs_ctx;
1446 
1447 	ctx = cm_get_context(security_state);
1448 	assert(ctx != NULL);
1449 
1450 	el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
1451 
1452 	el2_sysregs_context_restore_common(el2_sysregs_ctx);
1453 	el2_sysregs_context_restore_gic(el2_sysregs_ctx);
1454 
1455 	if (is_feat_mte2_supported()) {
1456 		write_tfsr_el2(read_el2_ctx_mte2(el2_sysregs_ctx, tfsr_el2));
1457 	}
1458 
1459 	if (is_feat_mpam_supported()) {
1460 		el2_sysregs_context_restore_mpam(el2_sysregs_ctx);
1461 	}
1462 
1463 	if (is_feat_fgt_supported()) {
1464 		el2_sysregs_context_restore_fgt(el2_sysregs_ctx);
1465 	}
1466 
1467 	if (is_feat_fgt2_supported()) {
1468 		el2_sysregs_context_restore_fgt2(el2_sysregs_ctx);
1469 	}
1470 
1471 	if (is_feat_ecv_v2_supported()) {
1472 		write_cntpoff_el2(read_el2_ctx_ecv(el2_sysregs_ctx, cntpoff_el2));
1473 	}
1474 
1475 	if (is_feat_vhe_supported()) {
1476 		write_contextidr_el2(read_el2_ctx_vhe(el2_sysregs_ctx,
1477 					contextidr_el2));
1478 		write_ttbr1_el2(read_el2_ctx_vhe(el2_sysregs_ctx, ttbr1_el2));
1479 	}
1480 
1481 	if (is_feat_ras_supported()) {
1482 		write_vdisr_el2(read_el2_ctx_ras(el2_sysregs_ctx, vdisr_el2));
1483 		write_vsesr_el2(read_el2_ctx_ras(el2_sysregs_ctx, vsesr_el2));
1484 	}
1485 
1486 	if (is_feat_nv2_supported()) {
1487 		write_vncr_el2(read_el2_ctx_neve(el2_sysregs_ctx, vncr_el2));
1488 	}
1489 
1490 	if (is_feat_trf_supported()) {
1491 		write_trfcr_el2(read_el2_ctx_trf(el2_sysregs_ctx, trfcr_el2));
1492 	}
1493 
1494 	if (is_feat_csv2_2_supported()) {
1495 		write_scxtnum_el2(read_el2_ctx_csv2_2(el2_sysregs_ctx,
1496 					scxtnum_el2));
1497 	}
1498 
1499 	if (is_feat_hcx_supported()) {
1500 		write_hcrx_el2(read_el2_ctx_hcx(el2_sysregs_ctx, hcrx_el2));
1501 	}
1502 
1503 	if (is_feat_tcr2_supported()) {
1504 		write_tcr2_el2(read_el2_ctx_tcr2(el2_sysregs_ctx, tcr2_el2));
1505 	}
1506 
1507 	if (is_feat_sxpie_supported()) {
1508 		write_pire0_el2(read_el2_ctx_sxpie(el2_sysregs_ctx, pire0_el2));
1509 		write_pir_el2(read_el2_ctx_sxpie(el2_sysregs_ctx, pir_el2));
1510 	}
1511 
1512 	if (is_feat_sxpoe_supported()) {
1513 		write_por_el2(read_el2_ctx_sxpoe(el2_sysregs_ctx, por_el2));
1514 	}
1515 
1516 	if (is_feat_s2pie_supported()) {
1517 		write_s2pir_el2(read_el2_ctx_s2pie(el2_sysregs_ctx, s2pir_el2));
1518 	}
1519 
1520 	if (is_feat_gcs_supported()) {
1521 		write_gcscr_el2(read_el2_ctx_gcs(el2_sysregs_ctx, gcscr_el2));
1522 		write_gcspr_el2(read_el2_ctx_gcs(el2_sysregs_ctx, gcspr_el2));
1523 	}
1524 }
1525 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
1526 
1527 #if IMAGE_BL31
1528 /*********************************************************************************
1529 * This function allows Architecture features asymmetry among cores.
1530 * TF-A assumes that all the cores in the platform has architecture feature parity
1531 * and hence the context is setup on different core (e.g. primary sets up the
1532 * context for secondary cores).This assumption may not be true for systems where
1533 * cores are not conforming to same Arch version or there is CPU Erratum which
1534 * requires certain feature to be be disabled only on a given core.
1535 *
1536 * This function is called on secondary cores to override any disparity in context
1537 * setup by primary, this would be called during warmboot path.
1538 *********************************************************************************/
1539 void cm_handle_asymmetric_features(void)
1540 {
1541 	cpu_context_t *ctx __maybe_unused = cm_get_context(NON_SECURE);
1542 
1543 	assert(ctx != NULL);
1544 
1545 #if ENABLE_SPE_FOR_NS == FEAT_STATE_CHECK_ASYMMETRIC
1546 	if (is_feat_spe_supported()) {
1547 		spe_enable(ctx);
1548 	} else {
1549 		spe_disable(ctx);
1550 	}
1551 #endif
1552 
1553 #if ERRATA_A520_2938996 || ERRATA_X4_2726228
1554 	if (check_if_affected_core() == ERRATA_APPLIES) {
1555 		if (is_feat_trbe_supported()) {
1556 			trbe_disable(ctx);
1557 		}
1558 	}
1559 #endif
1560 
1561 #if ENABLE_FEAT_TCR2 == FEAT_STATE_CHECK_ASYMMETRIC
1562 	el3_state_t *el3_state = get_el3state_ctx(ctx);
1563 	u_register_t spsr = read_ctx_reg(el3_state, CTX_SPSR_EL3);
1564 
1565 	if (is_feat_tcr2_supported() && (GET_RW(spsr) == MODE_RW_64)) {
1566 		tcr2_enable(ctx);
1567 	} else {
1568 		tcr2_disable(ctx);
1569 	}
1570 #endif
1571 
1572 }
1573 #endif
1574 
1575 /*******************************************************************************
1576  * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS
1577  * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly
1578  * updating EL1 and EL2 registers. Otherwise, it calls the generic
1579  * cm_prepare_el3_exit function.
1580  ******************************************************************************/
1581 void cm_prepare_el3_exit_ns(void)
1582 {
1583 #if IMAGE_BL31
1584 	/*
1585 	 * Check and handle Architecture feature asymmetry among cores.
1586 	 *
1587 	 * In warmboot path secondary cores context is initialized on core which
1588 	 * did CPU_ON SMC call, if there is feature asymmetry in these cores handle
1589 	 * it in this function call.
1590 	 * For Symmetric cores this is an empty function.
1591 	 */
1592 	cm_handle_asymmetric_features();
1593 #endif
1594 
1595 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
1596 #if ENABLE_ASSERTIONS
1597 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
1598 	assert(ctx != NULL);
1599 
1600 	/* Assert that EL2 is used. */
1601 	u_register_t scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
1602 	assert(((scr_el3 & SCR_HCE_BIT) != 0UL) &&
1603 			(el_implemented(2U) != EL_IMPL_NONE));
1604 #endif /* ENABLE_ASSERTIONS */
1605 
1606 	/* Restore EL2 sysreg contexts */
1607 	cm_el2_sysregs_context_restore(NON_SECURE);
1608 	cm_set_next_eret_context(NON_SECURE);
1609 #else
1610 	cm_prepare_el3_exit(NON_SECURE);
1611 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
1612 }
1613 
1614 #if ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS)))
1615 /*******************************************************************************
1616  * The next set of six functions are used by runtime services to save and restore
1617  * EL1 context on the 'cpu_context' structure for the specified security state.
1618  ******************************************************************************/
1619 static void el1_sysregs_context_save(el1_sysregs_t *ctx)
1620 {
1621 	write_el1_ctx_common(ctx, spsr_el1, read_spsr_el1());
1622 	write_el1_ctx_common(ctx, elr_el1, read_elr_el1());
1623 
1624 #if (!ERRATA_SPECULATIVE_AT)
1625 	write_el1_ctx_common(ctx, sctlr_el1, read_sctlr_el1());
1626 	write_el1_ctx_common(ctx, tcr_el1, read_tcr_el1());
1627 #endif /* (!ERRATA_SPECULATIVE_AT) */
1628 
1629 	write_el1_ctx_common(ctx, cpacr_el1, read_cpacr_el1());
1630 	write_el1_ctx_common(ctx, csselr_el1, read_csselr_el1());
1631 	write_el1_ctx_common(ctx, sp_el1, read_sp_el1());
1632 	write_el1_ctx_common(ctx, esr_el1, read_esr_el1());
1633 	write_el1_ctx_common(ctx, ttbr0_el1, read_ttbr0_el1());
1634 	write_el1_ctx_common(ctx, ttbr1_el1, read_ttbr1_el1());
1635 	write_el1_ctx_common(ctx, mair_el1, read_mair_el1());
1636 	write_el1_ctx_common(ctx, amair_el1, read_amair_el1());
1637 	write_el1_ctx_common(ctx, actlr_el1, read_actlr_el1());
1638 	write_el1_ctx_common(ctx, tpidr_el1, read_tpidr_el1());
1639 	write_el1_ctx_common(ctx, tpidr_el0, read_tpidr_el0());
1640 	write_el1_ctx_common(ctx, tpidrro_el0, read_tpidrro_el0());
1641 	write_el1_ctx_common(ctx, par_el1, read_par_el1());
1642 	write_el1_ctx_common(ctx, far_el1, read_far_el1());
1643 	write_el1_ctx_common(ctx, afsr0_el1, read_afsr0_el1());
1644 	write_el1_ctx_common(ctx, afsr1_el1, read_afsr1_el1());
1645 	write_el1_ctx_common(ctx, contextidr_el1, read_contextidr_el1());
1646 	write_el1_ctx_common(ctx, vbar_el1, read_vbar_el1());
1647 	write_el1_ctx_common(ctx, mdccint_el1, read_mdccint_el1());
1648 	write_el1_ctx_common(ctx, mdscr_el1, read_mdscr_el1());
1649 
1650 	if (CTX_INCLUDE_AARCH32_REGS) {
1651 		/* Save Aarch32 registers */
1652 		write_el1_ctx_aarch32(ctx, spsr_abt, read_spsr_abt());
1653 		write_el1_ctx_aarch32(ctx, spsr_und, read_spsr_und());
1654 		write_el1_ctx_aarch32(ctx, spsr_irq, read_spsr_irq());
1655 		write_el1_ctx_aarch32(ctx, spsr_fiq, read_spsr_fiq());
1656 		write_el1_ctx_aarch32(ctx, dacr32_el2, read_dacr32_el2());
1657 		write_el1_ctx_aarch32(ctx, ifsr32_el2, read_ifsr32_el2());
1658 	}
1659 
1660 	if (NS_TIMER_SWITCH) {
1661 		/* Save NS Timer registers */
1662 		write_el1_ctx_arch_timer(ctx, cntp_ctl_el0, read_cntp_ctl_el0());
1663 		write_el1_ctx_arch_timer(ctx, cntp_cval_el0, read_cntp_cval_el0());
1664 		write_el1_ctx_arch_timer(ctx, cntv_ctl_el0, read_cntv_ctl_el0());
1665 		write_el1_ctx_arch_timer(ctx, cntv_cval_el0, read_cntv_cval_el0());
1666 		write_el1_ctx_arch_timer(ctx, cntkctl_el1, read_cntkctl_el1());
1667 	}
1668 
1669 	if (is_feat_mte2_supported()) {
1670 		write_el1_ctx_mte2(ctx, tfsre0_el1, read_tfsre0_el1());
1671 		write_el1_ctx_mte2(ctx, tfsr_el1, read_tfsr_el1());
1672 		write_el1_ctx_mte2(ctx, rgsr_el1, read_rgsr_el1());
1673 		write_el1_ctx_mte2(ctx, gcr_el1, read_gcr_el1());
1674 	}
1675 
1676 	if (is_feat_ras_supported()) {
1677 		write_el1_ctx_ras(ctx, disr_el1, read_disr_el1());
1678 	}
1679 
1680 	if (is_feat_s1pie_supported()) {
1681 		write_el1_ctx_s1pie(ctx, pire0_el1, read_pire0_el1());
1682 		write_el1_ctx_s1pie(ctx, pir_el1, read_pir_el1());
1683 	}
1684 
1685 	if (is_feat_s1poe_supported()) {
1686 		write_el1_ctx_s1poe(ctx, por_el1, read_por_el1());
1687 	}
1688 
1689 	if (is_feat_s2poe_supported()) {
1690 		write_el1_ctx_s2poe(ctx, s2por_el1, read_s2por_el1());
1691 	}
1692 
1693 	if (is_feat_tcr2_supported()) {
1694 		write_el1_ctx_tcr2(ctx, tcr2_el1, read_tcr2_el1());
1695 	}
1696 
1697 	if (is_feat_trf_supported()) {
1698 		write_el1_ctx_trf(ctx, trfcr_el1, read_trfcr_el1());
1699 	}
1700 
1701 	if (is_feat_csv2_2_supported()) {
1702 		write_el1_ctx_csv2_2(ctx, scxtnum_el0, read_scxtnum_el0());
1703 		write_el1_ctx_csv2_2(ctx, scxtnum_el1, read_scxtnum_el1());
1704 	}
1705 
1706 	if (is_feat_gcs_supported()) {
1707 		write_el1_ctx_gcs(ctx, gcscr_el1, read_gcscr_el1());
1708 		write_el1_ctx_gcs(ctx, gcscre0_el1, read_gcscre0_el1());
1709 		write_el1_ctx_gcs(ctx, gcspr_el1, read_gcspr_el1());
1710 		write_el1_ctx_gcs(ctx, gcspr_el0, read_gcspr_el0());
1711 	}
1712 }
1713 
1714 static void el1_sysregs_context_restore(el1_sysregs_t *ctx)
1715 {
1716 	write_spsr_el1(read_el1_ctx_common(ctx, spsr_el1));
1717 	write_elr_el1(read_el1_ctx_common(ctx, elr_el1));
1718 
1719 #if (!ERRATA_SPECULATIVE_AT)
1720 	write_sctlr_el1(read_el1_ctx_common(ctx, sctlr_el1));
1721 	write_tcr_el1(read_el1_ctx_common(ctx, tcr_el1));
1722 #endif /* (!ERRATA_SPECULATIVE_AT) */
1723 
1724 	write_cpacr_el1(read_el1_ctx_common(ctx, cpacr_el1));
1725 	write_csselr_el1(read_el1_ctx_common(ctx, csselr_el1));
1726 	write_sp_el1(read_el1_ctx_common(ctx, sp_el1));
1727 	write_esr_el1(read_el1_ctx_common(ctx, esr_el1));
1728 	write_ttbr0_el1(read_el1_ctx_common(ctx, ttbr0_el1));
1729 	write_ttbr1_el1(read_el1_ctx_common(ctx, ttbr1_el1));
1730 	write_mair_el1(read_el1_ctx_common(ctx, mair_el1));
1731 	write_amair_el1(read_el1_ctx_common(ctx, amair_el1));
1732 	write_actlr_el1(read_el1_ctx_common(ctx, actlr_el1));
1733 	write_tpidr_el1(read_el1_ctx_common(ctx, tpidr_el1));
1734 	write_tpidr_el0(read_el1_ctx_common(ctx, tpidr_el0));
1735 	write_tpidrro_el0(read_el1_ctx_common(ctx, tpidrro_el0));
1736 	write_par_el1(read_el1_ctx_common(ctx, par_el1));
1737 	write_far_el1(read_el1_ctx_common(ctx, far_el1));
1738 	write_afsr0_el1(read_el1_ctx_common(ctx, afsr0_el1));
1739 	write_afsr1_el1(read_el1_ctx_common(ctx, afsr1_el1));
1740 	write_contextidr_el1(read_el1_ctx_common(ctx, contextidr_el1));
1741 	write_vbar_el1(read_el1_ctx_common(ctx, vbar_el1));
1742 	write_mdccint_el1(read_el1_ctx_common(ctx, mdccint_el1));
1743 	write_mdscr_el1(read_el1_ctx_common(ctx, mdscr_el1));
1744 
1745 	if (CTX_INCLUDE_AARCH32_REGS) {
1746 		/* Restore Aarch32 registers */
1747 		write_spsr_abt(read_el1_ctx_aarch32(ctx, spsr_abt));
1748 		write_spsr_und(read_el1_ctx_aarch32(ctx, spsr_und));
1749 		write_spsr_irq(read_el1_ctx_aarch32(ctx, spsr_irq));
1750 		write_spsr_fiq(read_el1_ctx_aarch32(ctx, spsr_fiq));
1751 		write_dacr32_el2(read_el1_ctx_aarch32(ctx, dacr32_el2));
1752 		write_ifsr32_el2(read_el1_ctx_aarch32(ctx, ifsr32_el2));
1753 	}
1754 
1755 	if (NS_TIMER_SWITCH) {
1756 		/* Restore NS Timer registers */
1757 		write_cntp_ctl_el0(read_el1_ctx_arch_timer(ctx, cntp_ctl_el0));
1758 		write_cntp_cval_el0(read_el1_ctx_arch_timer(ctx, cntp_cval_el0));
1759 		write_cntv_ctl_el0(read_el1_ctx_arch_timer(ctx, cntv_ctl_el0));
1760 		write_cntv_cval_el0(read_el1_ctx_arch_timer(ctx, cntv_cval_el0));
1761 		write_cntkctl_el1(read_el1_ctx_arch_timer(ctx, cntkctl_el1));
1762 	}
1763 
1764 	if (is_feat_mte2_supported()) {
1765 		write_tfsre0_el1(read_el1_ctx_mte2(ctx, tfsre0_el1));
1766 		write_tfsr_el1(read_el1_ctx_mte2(ctx, tfsr_el1));
1767 		write_rgsr_el1(read_el1_ctx_mte2(ctx, rgsr_el1));
1768 		write_gcr_el1(read_el1_ctx_mte2(ctx, gcr_el1));
1769 	}
1770 
1771 	if (is_feat_ras_supported()) {
1772 		write_disr_el1(read_el1_ctx_ras(ctx, disr_el1));
1773 	}
1774 
1775 	if (is_feat_s1pie_supported()) {
1776 		write_pire0_el1(read_el1_ctx_s1pie(ctx, pire0_el1));
1777 		write_pir_el1(read_el1_ctx_s1pie(ctx, pir_el1));
1778 	}
1779 
1780 	if (is_feat_s1poe_supported()) {
1781 		write_por_el1(read_el1_ctx_s1poe(ctx, por_el1));
1782 	}
1783 
1784 	if (is_feat_s2poe_supported()) {
1785 		write_s2por_el1(read_el1_ctx_s2poe(ctx, s2por_el1));
1786 	}
1787 
1788 	if (is_feat_tcr2_supported()) {
1789 		write_tcr2_el1(read_el1_ctx_tcr2(ctx, tcr2_el1));
1790 	}
1791 
1792 	if (is_feat_trf_supported()) {
1793 		write_trfcr_el1(read_el1_ctx_trf(ctx, trfcr_el1));
1794 	}
1795 
1796 	if (is_feat_csv2_2_supported()) {
1797 		write_scxtnum_el0(read_el1_ctx_csv2_2(ctx, scxtnum_el0));
1798 		write_scxtnum_el1(read_el1_ctx_csv2_2(ctx, scxtnum_el1));
1799 	}
1800 
1801 	if (is_feat_gcs_supported()) {
1802 		write_gcscr_el1(read_el1_ctx_gcs(ctx, gcscr_el1));
1803 		write_gcscre0_el1(read_el1_ctx_gcs(ctx, gcscre0_el1));
1804 		write_gcspr_el1(read_el1_ctx_gcs(ctx, gcspr_el1));
1805 		write_gcspr_el0(read_el1_ctx_gcs(ctx, gcspr_el0));
1806 	}
1807 }
1808 
1809 /*******************************************************************************
1810  * The next couple of functions are used by runtime services to save and restore
1811  * EL1 context on the 'cpu_context' structure for the specified security state.
1812  ******************************************************************************/
1813 void cm_el1_sysregs_context_save(uint32_t security_state)
1814 {
1815 	cpu_context_t *ctx;
1816 
1817 	ctx = cm_get_context(security_state);
1818 	assert(ctx != NULL);
1819 
1820 	el1_sysregs_context_save(get_el1_sysregs_ctx(ctx));
1821 
1822 #if IMAGE_BL31
1823 	if (security_state == SECURE)
1824 		PUBLISH_EVENT(cm_exited_secure_world);
1825 	else
1826 		PUBLISH_EVENT(cm_exited_normal_world);
1827 #endif
1828 }
1829 
1830 void cm_el1_sysregs_context_restore(uint32_t security_state)
1831 {
1832 	cpu_context_t *ctx;
1833 
1834 	ctx = cm_get_context(security_state);
1835 	assert(ctx != NULL);
1836 
1837 	el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx));
1838 
1839 #if IMAGE_BL31
1840 	if (security_state == SECURE)
1841 		PUBLISH_EVENT(cm_entering_secure_world);
1842 	else
1843 		PUBLISH_EVENT(cm_entering_normal_world);
1844 #endif
1845 }
1846 
1847 #endif /* ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS))) */
1848 
1849 /*******************************************************************************
1850  * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
1851  * given security state with the given entrypoint
1852  ******************************************************************************/
1853 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
1854 {
1855 	cpu_context_t *ctx;
1856 	el3_state_t *state;
1857 
1858 	ctx = cm_get_context(security_state);
1859 	assert(ctx != NULL);
1860 
1861 	/* Populate EL3 state so that ERET jumps to the correct entry */
1862 	state = get_el3state_ctx(ctx);
1863 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
1864 }
1865 
1866 /*******************************************************************************
1867  * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
1868  * pertaining to the given security state
1869  ******************************************************************************/
1870 void cm_set_elr_spsr_el3(uint32_t security_state,
1871 			uintptr_t entrypoint, uint32_t spsr)
1872 {
1873 	cpu_context_t *ctx;
1874 	el3_state_t *state;
1875 
1876 	ctx = cm_get_context(security_state);
1877 	assert(ctx != NULL);
1878 
1879 	/* Populate EL3 state so that ERET jumps to the correct entry */
1880 	state = get_el3state_ctx(ctx);
1881 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
1882 	write_ctx_reg(state, CTX_SPSR_EL3, spsr);
1883 }
1884 
1885 /*******************************************************************************
1886  * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
1887  * pertaining to the given security state using the value and bit position
1888  * specified in the parameters. It preserves all other bits.
1889  ******************************************************************************/
1890 void cm_write_scr_el3_bit(uint32_t security_state,
1891 			  uint32_t bit_pos,
1892 			  uint32_t value)
1893 {
1894 	cpu_context_t *ctx;
1895 	el3_state_t *state;
1896 	u_register_t scr_el3;
1897 
1898 	ctx = cm_get_context(security_state);
1899 	assert(ctx != NULL);
1900 
1901 	/* Ensure that the bit position is a valid one */
1902 	assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U);
1903 
1904 	/* Ensure that the 'value' is only a bit wide */
1905 	assert(value <= 1U);
1906 
1907 	/*
1908 	 * Get the SCR_EL3 value from the cpu context, clear the desired bit
1909 	 * and set it to its new value.
1910 	 */
1911 	state = get_el3state_ctx(ctx);
1912 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
1913 	scr_el3 &= ~(1UL << bit_pos);
1914 	scr_el3 |= (u_register_t)value << bit_pos;
1915 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
1916 }
1917 
1918 /*******************************************************************************
1919  * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
1920  * given security state.
1921  ******************************************************************************/
1922 u_register_t cm_get_scr_el3(uint32_t security_state)
1923 {
1924 	cpu_context_t *ctx;
1925 	el3_state_t *state;
1926 
1927 	ctx = cm_get_context(security_state);
1928 	assert(ctx != NULL);
1929 
1930 	/* Populate EL3 state so that ERET jumps to the correct entry */
1931 	state = get_el3state_ctx(ctx);
1932 	return read_ctx_reg(state, CTX_SCR_EL3);
1933 }
1934 
1935 /*******************************************************************************
1936  * This function is used to program the context that's used for exception
1937  * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
1938  * the required security state
1939  ******************************************************************************/
1940 void cm_set_next_eret_context(uint32_t security_state)
1941 {
1942 	cpu_context_t *ctx;
1943 
1944 	ctx = cm_get_context(security_state);
1945 	assert(ctx != NULL);
1946 
1947 	cm_set_next_context(ctx);
1948 }
1949