xref: /rk3399_ARM-atf/lib/el3_runtime/aarch64/context_mgmt.c (revision e264b5573952c72805a14e69e438168c00163e9a)
1 /*
2  * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
3  * Copyright (c) 2022, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #include <assert.h>
9 #include <stdbool.h>
10 #include <string.h>
11 
12 #include <platform_def.h>
13 
14 #include <arch.h>
15 #include <arch_helpers.h>
16 #include <arch_features.h>
17 #include <bl31/interrupt_mgmt.h>
18 #include <common/bl_common.h>
19 #include <common/debug.h>
20 #include <context.h>
21 #include <drivers/arm/gicv3.h>
22 #include <lib/cpus/cpu_ops.h>
23 #include <lib/cpus/errata.h>
24 #include <lib/el3_runtime/context_mgmt.h>
25 #include <lib/el3_runtime/cpu_data.h>
26 #include <lib/el3_runtime/pubsub_events.h>
27 #include <lib/extensions/amu.h>
28 #include <lib/extensions/brbe.h>
29 #include <lib/extensions/debug_v8p9.h>
30 #include <lib/extensions/fgt2.h>
31 #include <lib/extensions/mpam.h>
32 #include <lib/extensions/pmuv3.h>
33 #include <lib/extensions/sme.h>
34 #include <lib/extensions/spe.h>
35 #include <lib/extensions/sve.h>
36 #include <lib/extensions/sys_reg_trace.h>
37 #include <lib/extensions/trbe.h>
38 #include <lib/extensions/trf.h>
39 #include <lib/utils.h>
40 
41 #if ENABLE_FEAT_TWED
42 /* Make sure delay value fits within the range(0-15) */
43 CASSERT(((TWED_DELAY & ~SCR_TWEDEL_MASK) == 0U), assert_twed_delay_value_check);
44 #endif /* ENABLE_FEAT_TWED */
45 
46 per_world_context_t per_world_context[CPU_DATA_CONTEXT_NUM];
47 static bool has_secure_perworld_init;
48 
49 static void manage_extensions_common(cpu_context_t *ctx);
50 static void manage_extensions_nonsecure(cpu_context_t *ctx);
51 static void manage_extensions_secure(cpu_context_t *ctx);
52 static void manage_extensions_secure_per_world(void);
53 
54 #if ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS)))
55 static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info *ep)
56 {
57 	u_register_t sctlr_elx, actlr_elx;
58 
59 	/*
60 	 * Initialise SCTLR_EL1 to the reset value corresponding to the target
61 	 * execution state setting all fields rather than relying on the hw.
62 	 * Some fields have architecturally UNKNOWN reset values and these are
63 	 * set to zero.
64 	 *
65 	 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
66 	 *
67 	 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
68 	 * required by PSCI specification)
69 	 */
70 	sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0UL;
71 	if (GET_RW(ep->spsr) == MODE_RW_64) {
72 		sctlr_elx |= SCTLR_EL1_RES1;
73 	} else {
74 		/*
75 		 * If the target execution state is AArch32 then the following
76 		 * fields need to be set.
77 		 *
78 		 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE
79 		 *  instructions are not trapped to EL1.
80 		 *
81 		 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI
82 		 *  instructions are not trapped to EL1.
83 		 *
84 		 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the
85 		 *  CP15DMB, CP15DSB, and CP15ISB instructions.
86 		 */
87 		sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT
88 					| SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
89 	}
90 
91 #if ERRATA_A75_764081
92 	/*
93 	 * If workaround of errata 764081 for Cortex-A75 is used then set
94 	 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier.
95 	 */
96 	sctlr_elx |= SCTLR_IESB_BIT;
97 #endif
98 
99 	/* Store the initialised SCTLR_EL1 value in the cpu_context */
100 	write_ctx_sctlr_el1_reg_errata(ctx, sctlr_elx);
101 
102 	/*
103 	 * Base the context ACTLR_EL1 on the current value, as it is
104 	 * implementation defined. The context restore process will write
105 	 * the value from the context to the actual register and can cause
106 	 * problems for processor cores that don't expect certain bits to
107 	 * be zero.
108 	 */
109 	actlr_elx = read_actlr_el1();
110 	write_el1_ctx_common(get_el1_sysregs_ctx(ctx), actlr_el1, actlr_elx);
111 }
112 #endif /* (IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS)) */
113 
114 /******************************************************************************
115  * This function performs initializations that are specific to SECURE state
116  * and updates the cpu context specified by 'ctx'.
117  *****************************************************************************/
118 static void setup_secure_context(cpu_context_t *ctx, const struct entry_point_info *ep)
119 {
120 	u_register_t scr_el3;
121 	el3_state_t *state;
122 
123 	state = get_el3state_ctx(ctx);
124 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
125 
126 #if defined(IMAGE_BL31) && !defined(SPD_spmd)
127 	/*
128 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
129 	 * indicated by the interrupt routing model for BL31.
130 	 */
131 	scr_el3 |= get_scr_el3_from_routing_model(SECURE);
132 #endif
133 
134 	/* Allow access to Allocation Tags when FEAT_MTE2 is implemented and enabled. */
135 	if (is_feat_mte2_supported()) {
136 		scr_el3 |= SCR_ATA_BIT;
137 	}
138 
139 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
140 
141 	/*
142 	 * Initialize EL1 context registers unless SPMC is running
143 	 * at S-EL2.
144 	 */
145 #if (!SPMD_SPM_AT_SEL2)
146 	setup_el1_context(ctx, ep);
147 #endif
148 
149 	manage_extensions_secure(ctx);
150 
151 	/**
152 	 * manage_extensions_secure_per_world api has to be executed once,
153 	 * as the registers getting initialised, maintain constant value across
154 	 * all the cpus for the secure world.
155 	 * Henceforth, this check ensures that the registers are initialised once
156 	 * and avoids re-initialization from multiple cores.
157 	 */
158 	if (!has_secure_perworld_init) {
159 		manage_extensions_secure_per_world();
160 	}
161 }
162 
163 #if ENABLE_RME
164 /******************************************************************************
165  * This function performs initializations that are specific to REALM state
166  * and updates the cpu context specified by 'ctx'.
167  *****************************************************************************/
168 static void setup_realm_context(cpu_context_t *ctx, const struct entry_point_info *ep)
169 {
170 	u_register_t scr_el3;
171 	el3_state_t *state;
172 
173 	state = get_el3state_ctx(ctx);
174 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
175 
176 	scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT;
177 
178 	/* CSV2 version 2 and above */
179 	if (is_feat_csv2_2_supported()) {
180 		/* Enable access to the SCXTNUM_ELx registers. */
181 		scr_el3 |= SCR_EnSCXT_BIT;
182 	}
183 
184 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
185 }
186 #endif /* ENABLE_RME */
187 
188 /******************************************************************************
189  * This function performs initializations that are specific to NON-SECURE state
190  * and updates the cpu context specified by 'ctx'.
191  *****************************************************************************/
192 static void setup_ns_context(cpu_context_t *ctx, const struct entry_point_info *ep)
193 {
194 	u_register_t scr_el3;
195 	el3_state_t *state;
196 
197 	state = get_el3state_ctx(ctx);
198 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
199 
200 	/* SCR_NS: Set the NS bit */
201 	scr_el3 |= SCR_NS_BIT;
202 
203 	/* Allow access to Allocation Tags when FEAT_MTE2 is implemented and enabled. */
204 	if (is_feat_mte2_supported()) {
205 		scr_el3 |= SCR_ATA_BIT;
206 	}
207 
208 #if !CTX_INCLUDE_PAUTH_REGS
209 	/*
210 	 * Pointer Authentication feature, if present, is always enabled by default
211 	 * for Non secure lower exception levels. We do not have an explicit
212 	 * flag to set it.
213 	 * CTX_INCLUDE_PAUTH_REGS flag, is explicitly used to enable for lower
214 	 * exception levels of secure and realm worlds.
215 	 *
216 	 * To prevent the leakage between the worlds during world switch,
217 	 * we enable it only for the non-secure world.
218 	 *
219 	 * If the Secure/realm world wants to use pointer authentication,
220 	 * CTX_INCLUDE_PAUTH_REGS must be explicitly set to 1, in which case
221 	 * it will be enabled globally for all the contexts.
222 	 *
223 	 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs
224 	 *  other than EL3
225 	 *
226 	 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other
227 	 *  than EL3
228 	 */
229 	scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
230 
231 #endif /* CTX_INCLUDE_PAUTH_REGS */
232 
233 #if HANDLE_EA_EL3_FIRST_NS
234 	/* SCR_EL3.EA: Route External Abort and SError Interrupt to EL3. */
235 	scr_el3 |= SCR_EA_BIT;
236 #endif
237 
238 #if RAS_TRAP_NS_ERR_REC_ACCESS
239 	/*
240 	 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR
241 	 * and RAS ERX registers from EL1 and EL2(from any security state)
242 	 * are trapped to EL3.
243 	 * Set here to trap only for NS EL1/EL2
244 	 *
245 	 */
246 	scr_el3 |= SCR_TERR_BIT;
247 #endif
248 
249 	/* CSV2 version 2 and above */
250 	if (is_feat_csv2_2_supported()) {
251 		/* Enable access to the SCXTNUM_ELx registers. */
252 		scr_el3 |= SCR_EnSCXT_BIT;
253 	}
254 
255 #ifdef IMAGE_BL31
256 	/*
257 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
258 	 *  indicated by the interrupt routing model for BL31.
259 	 */
260 	scr_el3 |= get_scr_el3_from_routing_model(NON_SECURE);
261 #endif
262 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
263 
264 	/* Initialize EL2 context registers */
265 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
266 
267 	/*
268 	 * Initialize SCTLR_EL2 context register with reset value.
269 	 */
270 	write_el2_ctx_common(get_el2_sysregs_ctx(ctx), sctlr_el2, SCTLR_EL2_RES1);
271 
272 	if (is_feat_hcx_supported()) {
273 		/*
274 		 * Initialize register HCRX_EL2 with its init value.
275 		 * As the value of HCRX_EL2 is UNKNOWN on reset, there is a
276 		 * chance that this can lead to unexpected behavior in lower
277 		 * ELs that have not been updated since the introduction of
278 		 * this feature if not properly initialized, especially when
279 		 * it comes to those bits that enable/disable traps.
280 		 */
281 		write_el2_ctx_hcx(get_el2_sysregs_ctx(ctx), hcrx_el2,
282 			HCRX_EL2_INIT_VAL);
283 	}
284 
285 	if (is_feat_fgt_supported()) {
286 		/*
287 		 * Initialize HFG*_EL2 registers with a default value so legacy
288 		 * systems unaware of FEAT_FGT do not get trapped due to their lack
289 		 * of initialization for this feature.
290 		 */
291 		write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgitr_el2,
292 			HFGITR_EL2_INIT_VAL);
293 		write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgrtr_el2,
294 			HFGRTR_EL2_INIT_VAL);
295 		write_el2_ctx_fgt(get_el2_sysregs_ctx(ctx), hfgwtr_el2,
296 			HFGWTR_EL2_INIT_VAL);
297 	}
298 #else
299 	/* Initialize EL1 context registers */
300 	setup_el1_context(ctx, ep);
301 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
302 
303 	manage_extensions_nonsecure(ctx);
304 }
305 
306 /*******************************************************************************
307  * The following function performs initialization of the cpu_context 'ctx'
308  * for first use that is common to all security states, and sets the
309  * initial entrypoint state as specified by the entry_point_info structure.
310  *
311  * The EE and ST attributes are used to configure the endianness and secure
312  * timer availability for the new execution context.
313  ******************************************************************************/
314 static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
315 {
316 	u_register_t scr_el3;
317 	u_register_t mdcr_el3;
318 	el3_state_t *state;
319 	gp_regs_t *gp_regs;
320 
321 	state = get_el3state_ctx(ctx);
322 
323 	/* Clear any residual register values from the context */
324 	zeromem(ctx, sizeof(*ctx));
325 
326 	/*
327 	 * The lower-EL context is zeroed so that no stale values leak to a world.
328 	 * It is assumed that an all-zero lower-EL context is good enough for it
329 	 * to boot correctly. However, there are very few registers where this
330 	 * is not true and some values need to be recreated.
331 	 */
332 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
333 	el2_sysregs_t *el2_ctx = get_el2_sysregs_ctx(ctx);
334 
335 	/*
336 	 * These bits are set in the gicv3 driver. Losing them (especially the
337 	 * SRE bit) is problematic for all worlds. Henceforth recreate them.
338 	 */
339 	u_register_t icc_sre_el2_val = ICC_SRE_DIB_BIT | ICC_SRE_DFB_BIT |
340 				   ICC_SRE_EN_BIT | ICC_SRE_SRE_BIT;
341 	write_el2_ctx_common(el2_ctx, icc_sre_el2, icc_sre_el2_val);
342 
343 	/*
344 	 * The actlr_el2 register can be initialized in platform's reset handler
345 	 * and it may contain access control bits (e.g. CLUSTERPMUEN bit).
346 	 */
347 	write_el2_ctx_common(el2_ctx, actlr_el2, read_actlr_el2());
348 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
349 
350 	/* Start with a clean SCR_EL3 copy as all relevant values are set */
351 	scr_el3 = SCR_RESET_VAL;
352 
353 	/*
354 	 * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
355 	 *  EL2, EL1 and EL0 are not trapped to EL3.
356 	 *
357 	 * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
358 	 *  EL2, EL1 and EL0 are not trapped to EL3.
359 	 *
360 	 * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
361 	 *  both Security states and both Execution states.
362 	 *
363 	 * SCR_EL3.SIF: Set to one to disable secure instruction execution from
364 	 *  Non-secure memory.
365 	 */
366 	scr_el3 &= ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT);
367 
368 	scr_el3 |= SCR_SIF_BIT;
369 
370 	/*
371 	 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next
372 	 *  Exception level as specified by SPSR.
373 	 */
374 	if (GET_RW(ep->spsr) == MODE_RW_64) {
375 		scr_el3 |= SCR_RW_BIT;
376 	}
377 
378 	/*
379 	 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical
380 	 * Secure timer registers to EL3, from AArch64 state only, if specified
381 	 * by the entrypoint attributes. If SEL2 is present and enabled, the ST
382 	 * bit always behaves as 1 (i.e. secure physical timer register access
383 	 * is not trapped)
384 	 */
385 	if (EP_GET_ST(ep->h.attr) != 0U) {
386 		scr_el3 |= SCR_ST_BIT;
387 	}
388 
389 	/*
390 	 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting
391 	 * SCR_EL3.HXEn.
392 	 */
393 	if (is_feat_hcx_supported()) {
394 		scr_el3 |= SCR_HXEn_BIT;
395 	}
396 
397 	/*
398 	 * If FEAT_RNG_TRAP is enabled, all reads of the RNDR and RNDRRS
399 	 * registers are trapped to EL3.
400 	 */
401 #if ENABLE_FEAT_RNG_TRAP
402 	scr_el3 |= SCR_TRNDR_BIT;
403 #endif
404 
405 #if FAULT_INJECTION_SUPPORT
406 	/* Enable fault injection from lower ELs */
407 	scr_el3 |= SCR_FIEN_BIT;
408 #endif
409 
410 #if CTX_INCLUDE_PAUTH_REGS
411 	/*
412 	 * Enable Pointer Authentication globally for all the worlds.
413 	 *
414 	 * SCR_EL3.API: Set to one to not trap any PAuth instructions at ELs
415 	 *  other than EL3
416 	 *
417 	 * SCR_EL3.APK: Set to one to not trap any PAuth key values at ELs other
418 	 *  than EL3
419 	 */
420 	scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
421 #endif /* CTX_INCLUDE_PAUTH_REGS */
422 
423 	/*
424 	 * SCR_EL3.TCR2EN: Enable access to TCR2_ELx for AArch64 if present.
425 	 */
426 	if (is_feat_tcr2_supported() && (GET_RW(ep->spsr) == MODE_RW_64)) {
427 		scr_el3 |= SCR_TCR2EN_BIT;
428 	}
429 
430 	/*
431 	 * SCR_EL3.PIEN: Enable permission indirection and overlay
432 	 * registers for AArch64 if present.
433 	 */
434 	if (is_feat_sxpie_supported() || is_feat_sxpoe_supported()) {
435 		scr_el3 |= SCR_PIEN_BIT;
436 	}
437 
438 	/*
439 	 * SCR_EL3.GCSEn: Enable GCS registers for AArch64 if present.
440 	 */
441 	if ((is_feat_gcs_supported()) && (GET_RW(ep->spsr) == MODE_RW_64)) {
442 		scr_el3 |= SCR_GCSEn_BIT;
443 	}
444 
445 	/*
446 	 * SCR_EL3.HCE: Enable HVC instructions if next execution state is
447 	 * AArch64 and next EL is EL2, or if next execution state is AArch32 and
448 	 * next mode is Hyp.
449 	 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the
450 	 * same conditions as HVC instructions and when the processor supports
451 	 * ARMv8.6-FGT.
452 	 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV)
453 	 * CNTPOFF_EL2 register under the same conditions as HVC instructions
454 	 * and when the processor supports ECV.
455 	 */
456 	if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2))
457 	    || ((GET_RW(ep->spsr) != MODE_RW_64)
458 		&& (GET_M32(ep->spsr) == MODE32_hyp))) {
459 		scr_el3 |= SCR_HCE_BIT;
460 
461 		if (is_feat_fgt_supported()) {
462 			scr_el3 |= SCR_FGTEN_BIT;
463 		}
464 
465 		if (is_feat_ecv_supported()) {
466 			scr_el3 |= SCR_ECVEN_BIT;
467 		}
468 	}
469 
470 	/* Enable WFE trap delay in SCR_EL3 if supported and configured */
471 	if (is_feat_twed_supported()) {
472 		/* Set delay in SCR_EL3 */
473 		scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT);
474 		scr_el3 |= ((TWED_DELAY & SCR_TWEDEL_MASK)
475 				<< SCR_TWEDEL_SHIFT);
476 
477 		/* Enable WFE delay */
478 		scr_el3 |= SCR_TWEDEn_BIT;
479 	}
480 
481 #if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2
482 	/* Enable S-EL2 if FEAT_SEL2 is implemented for all the contexts. */
483 	if (is_feat_sel2_supported()) {
484 		scr_el3 |= SCR_EEL2_BIT;
485 	}
486 #endif /* (IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2) */
487 
488 	/*
489 	 * Populate EL3 state so that we've the right context
490 	 * before doing ERET
491 	 */
492 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
493 	write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
494 	write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
495 
496 	/* Start with a clean MDCR_EL3 copy as all relevant values are set */
497 	mdcr_el3 = MDCR_EL3_RESET_VAL;
498 
499 	/* ---------------------------------------------------------------------
500 	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
501 	 * Some fields are architecturally UNKNOWN on reset.
502 	 *
503 	 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
504 	 *  Debug exceptions, other than Breakpoint Instruction exceptions, are
505 	 *  disabled from all ELs in Secure state.
506 	 *
507 	 * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
508 	 *  privileged debug from S-EL1.
509 	 *
510 	 * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
511 	 *  access to the powerdown debug registers do not trap to EL3.
512 	 *
513 	 * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
514 	 *  debug registers, other than those registers that are controlled by
515 	 *  MDCR_EL3.TDOSA.
516 	 */
517 	mdcr_el3 |= ((MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE))
518 			& ~(MDCR_TDA_BIT | MDCR_TDOSA_BIT)) ;
519 	write_ctx_reg(state, CTX_MDCR_EL3, mdcr_el3);
520 
521 	/*
522 	 * Configure MDCR_EL3 register as applicable for each world
523 	 * (NS/Secure/Realm) context.
524 	 */
525 	manage_extensions_common(ctx);
526 
527 	/*
528 	 * Store the X0-X7 value from the entrypoint into the context
529 	 * Use memcpy as we are in control of the layout of the structures
530 	 */
531 	gp_regs = get_gpregs_ctx(ctx);
532 	memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
533 }
534 
535 /*******************************************************************************
536  * Context management library initialization routine. This library is used by
537  * runtime services to share pointers to 'cpu_context' structures for secure
538  * non-secure and realm states. Management of the structures and their associated
539  * memory is not done by the context management library e.g. the PSCI service
540  * manages the cpu context used for entry from and exit to the non-secure state.
541  * The Secure payload dispatcher service manages the context(s) corresponding to
542  * the secure state. It also uses this library to get access to the non-secure
543  * state cpu context pointers.
544  * Lastly, this library provides the API to make SP_EL3 point to the cpu context
545  * which will be used for programming an entry into a lower EL. The same context
546  * will be used to save state upon exception entry from that EL.
547  ******************************************************************************/
548 void __init cm_init(void)
549 {
550 	/*
551 	 * The context management library has only global data to initialize, but
552 	 * that will be done when the BSS is zeroed out.
553 	 */
554 }
555 
556 /*******************************************************************************
557  * This is the high-level function used to initialize the cpu_context 'ctx' for
558  * first use. It performs initializations that are common to all security states
559  * and initializations specific to the security state specified in 'ep'
560  ******************************************************************************/
561 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
562 {
563 	unsigned int security_state;
564 
565 	assert(ctx != NULL);
566 
567 	/*
568 	 * Perform initializations that are common
569 	 * to all security states
570 	 */
571 	setup_context_common(ctx, ep);
572 
573 	security_state = GET_SECURITY_STATE(ep->h.attr);
574 
575 	/* Perform security state specific initializations */
576 	switch (security_state) {
577 	case SECURE:
578 		setup_secure_context(ctx, ep);
579 		break;
580 #if ENABLE_RME
581 	case REALM:
582 		setup_realm_context(ctx, ep);
583 		break;
584 #endif
585 	case NON_SECURE:
586 		setup_ns_context(ctx, ep);
587 		break;
588 	default:
589 		ERROR("Invalid security state\n");
590 		panic();
591 		break;
592 	}
593 }
594 
595 /*******************************************************************************
596  * Enable architecture extensions for EL3 execution. This function only updates
597  * registers in-place which are expected to either never change or be
598  * overwritten by el3_exit.
599  ******************************************************************************/
600 #if IMAGE_BL31
601 void cm_manage_extensions_el3(void)
602 {
603 	if (is_feat_amu_supported()) {
604 		amu_init_el3();
605 	}
606 
607 	if (is_feat_sme_supported()) {
608 		sme_init_el3();
609 	}
610 
611 	pmuv3_init_el3();
612 }
613 #endif /* IMAGE_BL31 */
614 
615 /******************************************************************************
616  * Function to initialise the registers with the RESET values in the context
617  * memory, which are maintained per world.
618  ******************************************************************************/
619 #if IMAGE_BL31
620 void cm_el3_arch_init_per_world(per_world_context_t *per_world_ctx)
621 {
622 	/*
623 	 * Initialise CPTR_EL3, setting all fields rather than relying on hw.
624 	 *
625 	 * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
626 	 *  by Advanced SIMD, floating-point or SVE instructions (if
627 	 *  implemented) do not trap to EL3.
628 	 *
629 	 * CPTR_EL3.TCPAC: Set to zero so that accesses to CPACR_EL1,
630 	 *  CPTR_EL2,CPACR, or HCPTR do not trap to EL3.
631 	 */
632 	uint64_t cptr_el3 = CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TFP_BIT);
633 
634 	per_world_ctx->ctx_cptr_el3 = cptr_el3;
635 
636 	/*
637 	 * Initialize MPAM3_EL3 to its default reset value
638 	 *
639 	 * MPAM3_EL3_RESET_VAL sets the MPAM3_EL3.TRAPLOWER bit that forces
640 	 * all lower ELn MPAM3_EL3 register access to, trap to EL3
641 	 */
642 
643 	per_world_ctx->ctx_mpam3_el3 = MPAM3_EL3_RESET_VAL;
644 }
645 #endif /* IMAGE_BL31 */
646 
647 /*******************************************************************************
648  * Initialise per_world_context for Non-Secure world.
649  * This function enables the architecture extensions, which have same value
650  * across the cores for the non-secure world.
651  ******************************************************************************/
652 #if IMAGE_BL31
653 void manage_extensions_nonsecure_per_world(void)
654 {
655 	cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_NS]);
656 
657 	if (is_feat_sme_supported()) {
658 		sme_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
659 	}
660 
661 	if (is_feat_sve_supported()) {
662 		sve_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
663 	}
664 
665 	if (is_feat_amu_supported()) {
666 		amu_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
667 	}
668 
669 	if (is_feat_sys_reg_trace_supported()) {
670 		sys_reg_trace_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
671 	}
672 
673 	if (is_feat_mpam_supported()) {
674 		mpam_enable_per_world(&per_world_context[CPU_CONTEXT_NS]);
675 	}
676 }
677 #endif /* IMAGE_BL31 */
678 
679 /*******************************************************************************
680  * Initialise per_world_context for Secure world.
681  * This function enables the architecture extensions, which have same value
682  * across the cores for the secure world.
683  ******************************************************************************/
684 static void manage_extensions_secure_per_world(void)
685 {
686 #if IMAGE_BL31
687 	cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
688 
689 	if (is_feat_sme_supported()) {
690 
691 		if (ENABLE_SME_FOR_SWD) {
692 		/*
693 		 * Enable SME, SVE, FPU/SIMD in secure context, SPM must ensure
694 		 * SME, SVE, and FPU/SIMD context properly managed.
695 		 */
696 			sme_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
697 		} else {
698 		/*
699 		 * Disable SME, SVE, FPU/SIMD in secure context so non-secure
700 		 * world can safely use the associated registers.
701 		 */
702 			sme_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
703 		}
704 	}
705 	if (is_feat_sve_supported()) {
706 		if (ENABLE_SVE_FOR_SWD) {
707 		/*
708 		 * Enable SVE and FPU in secure context, SPM must ensure
709 		 * that the SVE and FPU register contexts are properly managed.
710 		 */
711 			sve_enable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
712 		} else {
713 		/*
714 		 * Disable SVE and FPU in secure context so non-secure world
715 		 * can safely use them.
716 		 */
717 			sve_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
718 		}
719 	}
720 
721 	/* NS can access this but Secure shouldn't */
722 	if (is_feat_sys_reg_trace_supported()) {
723 		sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_SECURE]);
724 	}
725 
726 	has_secure_perworld_init = true;
727 #endif /* IMAGE_BL31 */
728 }
729 
730 /*******************************************************************************
731  * Enable architecture extensions on first entry to Non-secure world only
732  * and disable for secure world.
733  *
734  * NOTE: Arch features which have been provided with the capability of getting
735  * enabled only for non-secure world and being disabled for secure world are
736  * grouped here, as the MDCR_EL3 context value remains same across the worlds.
737  ******************************************************************************/
738 static void manage_extensions_common(cpu_context_t *ctx)
739 {
740 #if IMAGE_BL31
741 	if (is_feat_spe_supported()) {
742 		/*
743 		 * Enable FEAT_SPE for Non-Secure and prohibit for Secure state.
744 		 */
745 		spe_enable(ctx);
746 	}
747 
748 	if (is_feat_trbe_supported()) {
749 		/*
750 		 * Enable FEAT_TRBE for Non-Secure and prohibit for Secure and
751 		 * Realm state.
752 		 */
753 		trbe_enable(ctx);
754 	}
755 
756 	if (is_feat_trf_supported()) {
757 		/*
758 		 * Enable FEAT_TRF for Non-Secure and prohibit for Secure state.
759 		 */
760 		trf_enable(ctx);
761 	}
762 
763 	if (is_feat_brbe_supported()) {
764 		/*
765 		 * Enable FEAT_BRBE for Non-Secure and prohibit for Secure state.
766 		 */
767 		brbe_enable(ctx);
768 	}
769 #endif /* IMAGE_BL31 */
770 }
771 
772 /*******************************************************************************
773  * Enable architecture extensions on first entry to Non-secure world.
774  ******************************************************************************/
775 static void manage_extensions_nonsecure(cpu_context_t *ctx)
776 {
777 #if IMAGE_BL31
778 	if (is_feat_amu_supported()) {
779 		amu_enable(ctx);
780 	}
781 
782 	if (is_feat_sme_supported()) {
783 		sme_enable(ctx);
784 	}
785 
786 	if (is_feat_fgt2_supported()) {
787 		fgt2_enable(ctx);
788 	}
789 
790 	if (is_feat_debugv8p9_supported()) {
791 		debugv8p9_extended_bp_wp_enable(ctx);
792 	}
793 
794 	pmuv3_enable(ctx);
795 #endif /* IMAGE_BL31 */
796 }
797 
798 /* TODO: move to lib/extensions/pauth when it has been ported to FEAT_STATE */
799 static __unused void enable_pauth_el2(void)
800 {
801 	u_register_t hcr_el2 = read_hcr_el2();
802 	/*
803 	 * For Armv8.3 pointer authentication feature, disable traps to EL2 when
804 	 *  accessing key registers or using pointer authentication instructions
805 	 *  from lower ELs.
806 	 */
807 	hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT);
808 
809 	write_hcr_el2(hcr_el2);
810 }
811 
812 #if INIT_UNUSED_NS_EL2
813 /*******************************************************************************
814  * Enable architecture extensions in-place at EL2 on first entry to Non-secure
815  * world when EL2 is empty and unused.
816  ******************************************************************************/
817 static void manage_extensions_nonsecure_el2_unused(void)
818 {
819 #if IMAGE_BL31
820 	if (is_feat_spe_supported()) {
821 		spe_init_el2_unused();
822 	}
823 
824 	if (is_feat_amu_supported()) {
825 		amu_init_el2_unused();
826 	}
827 
828 	if (is_feat_mpam_supported()) {
829 		mpam_init_el2_unused();
830 	}
831 
832 	if (is_feat_trbe_supported()) {
833 		trbe_init_el2_unused();
834 	}
835 
836 	if (is_feat_sys_reg_trace_supported()) {
837 		sys_reg_trace_init_el2_unused();
838 	}
839 
840 	if (is_feat_trf_supported()) {
841 		trf_init_el2_unused();
842 	}
843 
844 	pmuv3_init_el2_unused();
845 
846 	if (is_feat_sve_supported()) {
847 		sve_init_el2_unused();
848 	}
849 
850 	if (is_feat_sme_supported()) {
851 		sme_init_el2_unused();
852 	}
853 
854 #if ENABLE_PAUTH
855 	enable_pauth_el2();
856 #endif /* ENABLE_PAUTH */
857 #endif /* IMAGE_BL31 */
858 }
859 #endif /* INIT_UNUSED_NS_EL2 */
860 
861 /*******************************************************************************
862  * Enable architecture extensions on first entry to Secure world.
863  ******************************************************************************/
864 static void manage_extensions_secure(cpu_context_t *ctx)
865 {
866 #if IMAGE_BL31
867 	if (is_feat_sme_supported()) {
868 		if (ENABLE_SME_FOR_SWD) {
869 		/*
870 		 * Enable SME, SVE, FPU/SIMD in secure context, secure manager
871 		 * must ensure SME, SVE, and FPU/SIMD context properly managed.
872 		 */
873 			sme_init_el3();
874 			sme_enable(ctx);
875 		} else {
876 		/*
877 		 * Disable SME, SVE, FPU/SIMD in secure context so non-secure
878 		 * world can safely use the associated registers.
879 		 */
880 			sme_disable(ctx);
881 		}
882 	}
883 #endif /* IMAGE_BL31 */
884 }
885 
886 #if !IMAGE_BL1
887 /*******************************************************************************
888  * The following function initializes the cpu_context for a CPU specified by
889  * its `cpu_idx` for first use, and sets the initial entrypoint state as
890  * specified by the entry_point_info structure.
891  ******************************************************************************/
892 void cm_init_context_by_index(unsigned int cpu_idx,
893 			      const entry_point_info_t *ep)
894 {
895 	cpu_context_t *ctx;
896 	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
897 	cm_setup_context(ctx, ep);
898 }
899 #endif /* !IMAGE_BL1 */
900 
901 /*******************************************************************************
902  * The following function initializes the cpu_context for the current CPU
903  * for first use, and sets the initial entrypoint state as specified by the
904  * entry_point_info structure.
905  ******************************************************************************/
906 void cm_init_my_context(const entry_point_info_t *ep)
907 {
908 	cpu_context_t *ctx;
909 	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
910 	cm_setup_context(ctx, ep);
911 }
912 
913 /* EL2 present but unused, need to disable safely. SCTLR_EL2 can be ignored */
914 static void init_nonsecure_el2_unused(cpu_context_t *ctx)
915 {
916 #if INIT_UNUSED_NS_EL2
917 	u_register_t hcr_el2 = HCR_RESET_VAL;
918 	u_register_t mdcr_el2;
919 	u_register_t scr_el3;
920 
921 	scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
922 
923 	/* Set EL2 register width: Set HCR_EL2.RW to match SCR_EL3.RW */
924 	if ((scr_el3 & SCR_RW_BIT) != 0U) {
925 		hcr_el2 |= HCR_RW_BIT;
926 	}
927 
928 	write_hcr_el2(hcr_el2);
929 
930 	/*
931 	 * Initialise CPTR_EL2 setting all fields rather than relying on the hw.
932 	 * All fields have architecturally UNKNOWN reset values.
933 	 */
934 	write_cptr_el2(CPTR_EL2_RESET_VAL);
935 
936 	/*
937 	 * Initialise CNTHCTL_EL2. All fields are architecturally UNKNOWN on
938 	 * reset and are set to zero except for field(s) listed below.
939 	 *
940 	 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to Hyp mode of
941 	 * Non-secure EL0 and EL1 accesses to the physical timer registers.
942 	 *
943 	 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to Hyp mode of
944 	 * Non-secure EL0 and EL1 accesses to the physical counter registers.
945 	 */
946 	write_cnthctl_el2(CNTHCTL_RESET_VAL | EL1PCEN_BIT | EL1PCTEN_BIT);
947 
948 	/*
949 	 * Initialise CNTVOFF_EL2 to zero as it resets to an architecturally
950 	 * UNKNOWN value.
951 	 */
952 	write_cntvoff_el2(0);
953 
954 	/*
955 	 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and MPIDR_EL1
956 	 * respectively.
957 	 */
958 	write_vpidr_el2(read_midr_el1());
959 	write_vmpidr_el2(read_mpidr_el1());
960 
961 	/*
962 	 * Initialise VTTBR_EL2. All fields are architecturally UNKNOWN on reset.
963 	 *
964 	 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage 2 address
965 	 * translation is disabled, cache maintenance operations depend on the
966 	 * VMID.
967 	 *
968 	 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address translation is
969 	 * disabled.
970 	 */
971 	write_vttbr_el2(VTTBR_RESET_VAL &
972 		     ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT) |
973 		       (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
974 
975 	/*
976 	 * Initialise MDCR_EL2, setting all fields rather than relying on hw.
977 	 * Some fields are architecturally UNKNOWN on reset.
978 	 *
979 	 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and EL1 System
980 	 * register accesses to the Debug ROM registers are not trapped to EL2.
981 	 *
982 	 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1 System register
983 	 * accesses to the powerdown debug registers are not trapped to EL2.
984 	 *
985 	 * MDCR_EL2.TDA: Set to zero so that System register accesses to the
986 	 * debug registers do not trap to EL2.
987 	 *
988 	 * MDCR_EL2.TDE: Set to zero so that debug exceptions are not routed to
989 	 * EL2.
990 	 */
991 	mdcr_el2 = MDCR_EL2_RESET_VAL &
992 		 ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT | MDCR_EL2_TDA_BIT |
993 		   MDCR_EL2_TDE_BIT);
994 
995 	write_mdcr_el2(mdcr_el2);
996 
997 	/*
998 	 * Initialise HSTR_EL2. All fields are architecturally UNKNOWN on reset.
999 	 *
1000 	 * HSTR_EL2.T<n>: Set all these fields to zero so that Non-secure EL0 or
1001 	 * EL1 accesses to System registers do not trap to EL2.
1002 	 */
1003 	write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK));
1004 
1005 	/*
1006 	 * Initialise CNTHP_CTL_EL2. All fields are architecturally UNKNOWN on
1007 	 * reset.
1008 	 *
1009 	 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2 physical timer
1010 	 * and prevent timer interrupts.
1011 	 */
1012 	write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & ~(CNTHP_CTL_ENABLE_BIT));
1013 
1014 	manage_extensions_nonsecure_el2_unused();
1015 #endif /* INIT_UNUSED_NS_EL2 */
1016 }
1017 
1018 /*******************************************************************************
1019  * Prepare the CPU system registers for first entry into realm, secure, or
1020  * normal world.
1021  *
1022  * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
1023  * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
1024  * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
1025  * For all entries, the EL1 registers are initialized from the cpu_context
1026  ******************************************************************************/
1027 void cm_prepare_el3_exit(uint32_t security_state)
1028 {
1029 	u_register_t sctlr_el2, scr_el3;
1030 	cpu_context_t *ctx = cm_get_context(security_state);
1031 
1032 	assert(ctx != NULL);
1033 
1034 	if (security_state == NON_SECURE) {
1035 		uint64_t el2_implemented = el_implemented(2);
1036 
1037 		scr_el3 = read_ctx_reg(get_el3state_ctx(ctx),
1038 						 CTX_SCR_EL3);
1039 
1040 		if (el2_implemented != EL_IMPL_NONE) {
1041 
1042 			/*
1043 			 * If context is not being used for EL2, initialize
1044 			 * HCRX_EL2 with its init value here.
1045 			 */
1046 			if (is_feat_hcx_supported()) {
1047 				write_hcrx_el2(HCRX_EL2_INIT_VAL);
1048 			}
1049 
1050 			/*
1051 			 * Initialize Fine-grained trap registers introduced
1052 			 * by FEAT_FGT so all traps are initially disabled when
1053 			 * switching to EL2 or a lower EL, preventing undesired
1054 			 * behavior.
1055 			 */
1056 			if (is_feat_fgt_supported()) {
1057 				/*
1058 				 * Initialize HFG*_EL2 registers with a default
1059 				 * value so legacy systems unaware of FEAT_FGT
1060 				 * do not get trapped due to their lack of
1061 				 * initialization for this feature.
1062 				 */
1063 				write_hfgitr_el2(HFGITR_EL2_INIT_VAL);
1064 				write_hfgrtr_el2(HFGRTR_EL2_INIT_VAL);
1065 				write_hfgwtr_el2(HFGWTR_EL2_INIT_VAL);
1066 			}
1067 
1068 			/* Condition to ensure EL2 is being used. */
1069 			if ((scr_el3 & SCR_HCE_BIT) != 0U) {
1070 				/* Initialize SCTLR_EL2 register with reset value. */
1071 				sctlr_el2 = SCTLR_EL2_RES1;
1072 #if ERRATA_A75_764081
1073 				/*
1074 				 * If workaround of errata 764081 for Cortex-A75
1075 				 * is used then set SCTLR_EL2.IESB to enable
1076 				 * Implicit Error Synchronization Barrier.
1077 				 */
1078 				sctlr_el2 |= SCTLR_IESB_BIT;
1079 #endif
1080 				write_sctlr_el2(sctlr_el2);
1081 			} else {
1082 				/*
1083 				 * (scr_el3 & SCR_HCE_BIT==0)
1084 				 * EL2 implemented but unused.
1085 				 */
1086 				init_nonsecure_el2_unused(ctx);
1087 			}
1088 		}
1089 	}
1090 #if (!CTX_INCLUDE_EL2_REGS)
1091 	/* Restore EL1 system registers, only when CTX_INCLUDE_EL2_REGS=0 */
1092 	cm_el1_sysregs_context_restore(security_state);
1093 #endif
1094 	cm_set_next_eret_context(security_state);
1095 }
1096 
1097 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
1098 
1099 static void el2_sysregs_context_save_fgt(el2_sysregs_t *ctx)
1100 {
1101 	write_el2_ctx_fgt(ctx, hdfgrtr_el2, read_hdfgrtr_el2());
1102 	if (is_feat_amu_supported()) {
1103 		write_el2_ctx_fgt(ctx, hafgrtr_el2, read_hafgrtr_el2());
1104 	}
1105 	write_el2_ctx_fgt(ctx, hdfgwtr_el2, read_hdfgwtr_el2());
1106 	write_el2_ctx_fgt(ctx, hfgitr_el2, read_hfgitr_el2());
1107 	write_el2_ctx_fgt(ctx, hfgrtr_el2, read_hfgrtr_el2());
1108 	write_el2_ctx_fgt(ctx, hfgwtr_el2, read_hfgwtr_el2());
1109 }
1110 
1111 static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx)
1112 {
1113 	write_hdfgrtr_el2(read_el2_ctx_fgt(ctx, hdfgrtr_el2));
1114 	if (is_feat_amu_supported()) {
1115 		write_hafgrtr_el2(read_el2_ctx_fgt(ctx, hafgrtr_el2));
1116 	}
1117 	write_hdfgwtr_el2(read_el2_ctx_fgt(ctx, hdfgwtr_el2));
1118 	write_hfgitr_el2(read_el2_ctx_fgt(ctx, hfgitr_el2));
1119 	write_hfgrtr_el2(read_el2_ctx_fgt(ctx, hfgrtr_el2));
1120 	write_hfgwtr_el2(read_el2_ctx_fgt(ctx, hfgwtr_el2));
1121 }
1122 
1123 static void el2_sysregs_context_save_fgt2(el2_sysregs_t *ctx)
1124 {
1125 	write_el2_ctx_fgt2(ctx, hdfgrtr2_el2, read_hdfgrtr2_el2());
1126 	write_el2_ctx_fgt2(ctx, hdfgwtr2_el2, read_hdfgwtr2_el2());
1127 	write_el2_ctx_fgt2(ctx, hfgitr2_el2, read_hfgitr2_el2());
1128 	write_el2_ctx_fgt2(ctx, hfgrtr2_el2, read_hfgrtr2_el2());
1129 	write_el2_ctx_fgt2(ctx, hfgwtr2_el2, read_hfgwtr2_el2());
1130 }
1131 
1132 static void el2_sysregs_context_restore_fgt2(el2_sysregs_t *ctx)
1133 {
1134 	write_hdfgrtr2_el2(read_el2_ctx_fgt2(ctx, hdfgrtr2_el2));
1135 	write_hdfgwtr2_el2(read_el2_ctx_fgt2(ctx, hdfgwtr2_el2));
1136 	write_hfgitr2_el2(read_el2_ctx_fgt2(ctx, hfgitr2_el2));
1137 	write_hfgrtr2_el2(read_el2_ctx_fgt2(ctx, hfgrtr2_el2));
1138 	write_hfgwtr2_el2(read_el2_ctx_fgt2(ctx, hfgwtr2_el2));
1139 }
1140 
1141 static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx)
1142 {
1143 	u_register_t mpam_idr = read_mpamidr_el1();
1144 
1145 	write_el2_ctx_mpam(ctx, mpam2_el2, read_mpam2_el2());
1146 
1147 	/*
1148 	 * The context registers that we intend to save would be part of the
1149 	 * PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
1150 	 */
1151 	if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
1152 		return;
1153 	}
1154 
1155 	/*
1156 	 * MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if
1157 	 * MPAMIDR_HAS_HCR_BIT == 1.
1158 	 */
1159 	write_el2_ctx_mpam(ctx, mpamhcr_el2, read_mpamhcr_el2());
1160 	write_el2_ctx_mpam(ctx, mpamvpm0_el2, read_mpamvpm0_el2());
1161 	write_el2_ctx_mpam(ctx, mpamvpmv_el2, read_mpamvpmv_el2());
1162 
1163 	/*
1164 	 * The number of MPAMVPM registers is implementation defined, their
1165 	 * number is stored in the MPAMIDR_EL1 register.
1166 	 */
1167 	switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
1168 	case 7:
1169 		write_el2_ctx_mpam(ctx, mpamvpm7_el2, read_mpamvpm7_el2());
1170 		__fallthrough;
1171 	case 6:
1172 		write_el2_ctx_mpam(ctx, mpamvpm6_el2, read_mpamvpm6_el2());
1173 		__fallthrough;
1174 	case 5:
1175 		write_el2_ctx_mpam(ctx, mpamvpm5_el2, read_mpamvpm5_el2());
1176 		__fallthrough;
1177 	case 4:
1178 		write_el2_ctx_mpam(ctx, mpamvpm4_el2, read_mpamvpm4_el2());
1179 		__fallthrough;
1180 	case 3:
1181 		write_el2_ctx_mpam(ctx, mpamvpm3_el2, read_mpamvpm3_el2());
1182 		__fallthrough;
1183 	case 2:
1184 		write_el2_ctx_mpam(ctx, mpamvpm2_el2, read_mpamvpm2_el2());
1185 		__fallthrough;
1186 	case 1:
1187 		write_el2_ctx_mpam(ctx, mpamvpm1_el2, read_mpamvpm1_el2());
1188 		break;
1189 	}
1190 }
1191 
1192 static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx)
1193 {
1194 	u_register_t mpam_idr = read_mpamidr_el1();
1195 
1196 	write_mpam2_el2(read_el2_ctx_mpam(ctx, mpam2_el2));
1197 
1198 	if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
1199 		return;
1200 	}
1201 
1202 	write_mpamhcr_el2(read_el2_ctx_mpam(ctx, mpamhcr_el2));
1203 	write_mpamvpm0_el2(read_el2_ctx_mpam(ctx, mpamvpm0_el2));
1204 	write_mpamvpmv_el2(read_el2_ctx_mpam(ctx, mpamvpmv_el2));
1205 
1206 	switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
1207 	case 7:
1208 		write_mpamvpm7_el2(read_el2_ctx_mpam(ctx, mpamvpm7_el2));
1209 		__fallthrough;
1210 	case 6:
1211 		write_mpamvpm6_el2(read_el2_ctx_mpam(ctx, mpamvpm6_el2));
1212 		__fallthrough;
1213 	case 5:
1214 		write_mpamvpm5_el2(read_el2_ctx_mpam(ctx, mpamvpm5_el2));
1215 		__fallthrough;
1216 	case 4:
1217 		write_mpamvpm4_el2(read_el2_ctx_mpam(ctx, mpamvpm4_el2));
1218 		__fallthrough;
1219 	case 3:
1220 		write_mpamvpm3_el2(read_el2_ctx_mpam(ctx, mpamvpm3_el2));
1221 		__fallthrough;
1222 	case 2:
1223 		write_mpamvpm2_el2(read_el2_ctx_mpam(ctx, mpamvpm2_el2));
1224 		__fallthrough;
1225 	case 1:
1226 		write_mpamvpm1_el2(read_el2_ctx_mpam(ctx, mpamvpm1_el2));
1227 		break;
1228 	}
1229 }
1230 
1231 /* ---------------------------------------------------------------------------
1232  * The following registers are not added:
1233  * ICH_AP0R<n>_EL2
1234  * ICH_AP1R<n>_EL2
1235  * ICH_LR<n>_EL2
1236  *
1237  * NOTE: For a system with S-EL2 present but not enabled, accessing
1238  * ICC_SRE_EL2 is undefined from EL3. To workaround this change the
1239  * SCR_EL3.NS = 1 before accessing this register.
1240  * ---------------------------------------------------------------------------
1241  */
1242 static void el2_sysregs_context_save_gic(el2_sysregs_t *ctx)
1243 {
1244 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
1245 	write_el2_ctx_common(ctx, icc_sre_el2, read_icc_sre_el2());
1246 #else
1247 	u_register_t scr_el3 = read_scr_el3();
1248 	write_scr_el3(scr_el3 | SCR_NS_BIT);
1249 	isb();
1250 
1251 	write_el2_ctx_common(ctx, icc_sre_el2, read_icc_sre_el2());
1252 
1253 	write_scr_el3(scr_el3);
1254 	isb();
1255 #endif
1256 	write_el2_ctx_common(ctx, ich_hcr_el2, read_ich_hcr_el2());
1257 	write_el2_ctx_common(ctx, ich_vmcr_el2, read_ich_vmcr_el2());
1258 }
1259 
1260 static void el2_sysregs_context_restore_gic(el2_sysregs_t *ctx)
1261 {
1262 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
1263 	write_icc_sre_el2(read_el2_ctx_common(ctx, icc_sre_el2));
1264 #else
1265 	u_register_t scr_el3 = read_scr_el3();
1266 	write_scr_el3(scr_el3 | SCR_NS_BIT);
1267 	isb();
1268 
1269 	write_icc_sre_el2(read_el2_ctx_common(ctx, icc_sre_el2));
1270 
1271 	write_scr_el3(scr_el3);
1272 	isb();
1273 #endif
1274 	write_ich_hcr_el2(read_el2_ctx_common(ctx, ich_hcr_el2));
1275 	write_ich_vmcr_el2(read_el2_ctx_common(ctx, ich_vmcr_el2));
1276 }
1277 
1278 /* -----------------------------------------------------
1279  * The following registers are not added:
1280  * AMEVCNTVOFF0<n>_EL2
1281  * AMEVCNTVOFF1<n>_EL2
1282  * -----------------------------------------------------
1283  */
1284 static void el2_sysregs_context_save_common(el2_sysregs_t *ctx)
1285 {
1286 	write_el2_ctx_common(ctx, actlr_el2, read_actlr_el2());
1287 	write_el2_ctx_common(ctx, afsr0_el2, read_afsr0_el2());
1288 	write_el2_ctx_common(ctx, afsr1_el2, read_afsr1_el2());
1289 	write_el2_ctx_common(ctx, amair_el2, read_amair_el2());
1290 	write_el2_ctx_common(ctx, cnthctl_el2, read_cnthctl_el2());
1291 	write_el2_ctx_common(ctx, cntvoff_el2, read_cntvoff_el2());
1292 	write_el2_ctx_common(ctx, cptr_el2, read_cptr_el2());
1293 	if (CTX_INCLUDE_AARCH32_REGS) {
1294 		write_el2_ctx_common(ctx, dbgvcr32_el2, read_dbgvcr32_el2());
1295 	}
1296 	write_el2_ctx_common(ctx, elr_el2, read_elr_el2());
1297 	write_el2_ctx_common(ctx, esr_el2, read_esr_el2());
1298 	write_el2_ctx_common(ctx, far_el2, read_far_el2());
1299 	write_el2_ctx_common(ctx, hacr_el2, read_hacr_el2());
1300 	write_el2_ctx_common(ctx, hcr_el2, read_hcr_el2());
1301 	write_el2_ctx_common(ctx, hpfar_el2, read_hpfar_el2());
1302 	write_el2_ctx_common(ctx, hstr_el2, read_hstr_el2());
1303 	write_el2_ctx_common(ctx, mair_el2, read_mair_el2());
1304 	write_el2_ctx_common(ctx, mdcr_el2, read_mdcr_el2());
1305 	write_el2_ctx_common(ctx, sctlr_el2, read_sctlr_el2());
1306 	write_el2_ctx_common(ctx, spsr_el2, read_spsr_el2());
1307 	write_el2_ctx_common(ctx, sp_el2, read_sp_el2());
1308 	write_el2_ctx_common(ctx, tcr_el2, read_tcr_el2());
1309 	write_el2_ctx_common(ctx, tpidr_el2, read_tpidr_el2());
1310 	write_el2_ctx_common(ctx, ttbr0_el2, read_ttbr0_el2());
1311 	write_el2_ctx_common(ctx, vbar_el2, read_vbar_el2());
1312 	write_el2_ctx_common(ctx, vmpidr_el2, read_vmpidr_el2());
1313 	write_el2_ctx_common(ctx, vpidr_el2, read_vpidr_el2());
1314 	write_el2_ctx_common(ctx, vtcr_el2, read_vtcr_el2());
1315 	write_el2_ctx_common(ctx, vttbr_el2, read_vttbr_el2());
1316 }
1317 
1318 static void el2_sysregs_context_restore_common(el2_sysregs_t *ctx)
1319 {
1320 	write_actlr_el2(read_el2_ctx_common(ctx, actlr_el2));
1321 	write_afsr0_el2(read_el2_ctx_common(ctx, afsr0_el2));
1322 	write_afsr1_el2(read_el2_ctx_common(ctx, afsr1_el2));
1323 	write_amair_el2(read_el2_ctx_common(ctx, amair_el2));
1324 	write_cnthctl_el2(read_el2_ctx_common(ctx, cnthctl_el2));
1325 	write_cntvoff_el2(read_el2_ctx_common(ctx, cntvoff_el2));
1326 	write_cptr_el2(read_el2_ctx_common(ctx, cptr_el2));
1327 	if (CTX_INCLUDE_AARCH32_REGS) {
1328 		write_dbgvcr32_el2(read_el2_ctx_common(ctx, dbgvcr32_el2));
1329 	}
1330 	write_elr_el2(read_el2_ctx_common(ctx, elr_el2));
1331 	write_esr_el2(read_el2_ctx_common(ctx, esr_el2));
1332 	write_far_el2(read_el2_ctx_common(ctx, far_el2));
1333 	write_hacr_el2(read_el2_ctx_common(ctx, hacr_el2));
1334 	write_hcr_el2(read_el2_ctx_common(ctx, hcr_el2));
1335 	write_hpfar_el2(read_el2_ctx_common(ctx, hpfar_el2));
1336 	write_hstr_el2(read_el2_ctx_common(ctx, hstr_el2));
1337 	write_mair_el2(read_el2_ctx_common(ctx, mair_el2));
1338 	write_mdcr_el2(read_el2_ctx_common(ctx, mdcr_el2));
1339 	write_sctlr_el2(read_el2_ctx_common(ctx, sctlr_el2));
1340 	write_spsr_el2(read_el2_ctx_common(ctx, spsr_el2));
1341 	write_sp_el2(read_el2_ctx_common(ctx, sp_el2));
1342 	write_tcr_el2(read_el2_ctx_common(ctx, tcr_el2));
1343 	write_tpidr_el2(read_el2_ctx_common(ctx, tpidr_el2));
1344 	write_ttbr0_el2(read_el2_ctx_common(ctx, ttbr0_el2));
1345 	write_vbar_el2(read_el2_ctx_common(ctx, vbar_el2));
1346 	write_vmpidr_el2(read_el2_ctx_common(ctx, vmpidr_el2));
1347 	write_vpidr_el2(read_el2_ctx_common(ctx, vpidr_el2));
1348 	write_vtcr_el2(read_el2_ctx_common(ctx, vtcr_el2));
1349 	write_vttbr_el2(read_el2_ctx_common(ctx, vttbr_el2));
1350 }
1351 
1352 /*******************************************************************************
1353  * Save EL2 sysreg context
1354  ******************************************************************************/
1355 void cm_el2_sysregs_context_save(uint32_t security_state)
1356 {
1357 	cpu_context_t *ctx;
1358 	el2_sysregs_t *el2_sysregs_ctx;
1359 
1360 	ctx = cm_get_context(security_state);
1361 	assert(ctx != NULL);
1362 
1363 	el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
1364 
1365 	el2_sysregs_context_save_common(el2_sysregs_ctx);
1366 	el2_sysregs_context_save_gic(el2_sysregs_ctx);
1367 
1368 	if (is_feat_mte2_supported()) {
1369 		write_el2_ctx_mte2(el2_sysregs_ctx, tfsr_el2, read_tfsr_el2());
1370 	}
1371 
1372 	if (is_feat_mpam_supported()) {
1373 		el2_sysregs_context_save_mpam(el2_sysregs_ctx);
1374 	}
1375 
1376 	if (is_feat_fgt_supported()) {
1377 		el2_sysregs_context_save_fgt(el2_sysregs_ctx);
1378 	}
1379 
1380 	if (is_feat_fgt2_supported()) {
1381 		el2_sysregs_context_save_fgt2(el2_sysregs_ctx);
1382 	}
1383 
1384 	if (is_feat_ecv_v2_supported()) {
1385 		write_el2_ctx_ecv(el2_sysregs_ctx, cntpoff_el2, read_cntpoff_el2());
1386 	}
1387 
1388 	if (is_feat_vhe_supported()) {
1389 		write_el2_ctx_vhe(el2_sysregs_ctx, contextidr_el2,
1390 					read_contextidr_el2());
1391 		write_el2_ctx_vhe(el2_sysregs_ctx, ttbr1_el2, read_ttbr1_el2());
1392 	}
1393 
1394 	if (is_feat_ras_supported()) {
1395 		write_el2_ctx_ras(el2_sysregs_ctx, vdisr_el2, read_vdisr_el2());
1396 		write_el2_ctx_ras(el2_sysregs_ctx, vsesr_el2, read_vsesr_el2());
1397 	}
1398 
1399 	if (is_feat_nv2_supported()) {
1400 		write_el2_ctx_neve(el2_sysregs_ctx, vncr_el2, read_vncr_el2());
1401 	}
1402 
1403 	if (is_feat_trf_supported()) {
1404 		write_el2_ctx_trf(el2_sysregs_ctx, trfcr_el2, read_trfcr_el2());
1405 	}
1406 
1407 	if (is_feat_csv2_2_supported()) {
1408 		write_el2_ctx_csv2_2(el2_sysregs_ctx, scxtnum_el2,
1409 					read_scxtnum_el2());
1410 	}
1411 
1412 	if (is_feat_hcx_supported()) {
1413 		write_el2_ctx_hcx(el2_sysregs_ctx, hcrx_el2, read_hcrx_el2());
1414 	}
1415 
1416 	if (is_feat_tcr2_supported()) {
1417 		write_el2_ctx_tcr2(el2_sysregs_ctx, tcr2_el2, read_tcr2_el2());
1418 	}
1419 
1420 	if (is_feat_sxpie_supported()) {
1421 		write_el2_ctx_sxpie(el2_sysregs_ctx, pire0_el2, read_pire0_el2());
1422 		write_el2_ctx_sxpie(el2_sysregs_ctx, pir_el2, read_pir_el2());
1423 	}
1424 
1425 	if (is_feat_sxpoe_supported()) {
1426 		write_el2_ctx_sxpoe(el2_sysregs_ctx, por_el2, read_por_el2());
1427 	}
1428 
1429 	if (is_feat_s2pie_supported()) {
1430 		write_el2_ctx_s2pie(el2_sysregs_ctx, s2pir_el2, read_s2pir_el2());
1431 	}
1432 
1433 	if (is_feat_gcs_supported()) {
1434 		write_el2_ctx_gcs(el2_sysregs_ctx, gcscr_el2, read_gcscr_el2());
1435 		write_el2_ctx_gcs(el2_sysregs_ctx, gcspr_el2, read_gcspr_el2());
1436 	}
1437 }
1438 
1439 /*******************************************************************************
1440  * Restore EL2 sysreg context
1441  ******************************************************************************/
1442 void cm_el2_sysregs_context_restore(uint32_t security_state)
1443 {
1444 	cpu_context_t *ctx;
1445 	el2_sysregs_t *el2_sysregs_ctx;
1446 
1447 	ctx = cm_get_context(security_state);
1448 	assert(ctx != NULL);
1449 
1450 	el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
1451 
1452 	el2_sysregs_context_restore_common(el2_sysregs_ctx);
1453 	el2_sysregs_context_restore_gic(el2_sysregs_ctx);
1454 
1455 	if (is_feat_mte2_supported()) {
1456 		write_tfsr_el2(read_el2_ctx_mte2(el2_sysregs_ctx, tfsr_el2));
1457 	}
1458 
1459 	if (is_feat_mpam_supported()) {
1460 		el2_sysregs_context_restore_mpam(el2_sysregs_ctx);
1461 	}
1462 
1463 	if (is_feat_fgt_supported()) {
1464 		el2_sysregs_context_restore_fgt(el2_sysregs_ctx);
1465 	}
1466 
1467 	if (is_feat_fgt2_supported()) {
1468 		el2_sysregs_context_restore_fgt2(el2_sysregs_ctx);
1469 	}
1470 
1471 	if (is_feat_ecv_v2_supported()) {
1472 		write_cntpoff_el2(read_el2_ctx_ecv(el2_sysregs_ctx, cntpoff_el2));
1473 	}
1474 
1475 	if (is_feat_vhe_supported()) {
1476 		write_contextidr_el2(read_el2_ctx_vhe(el2_sysregs_ctx,
1477 					contextidr_el2));
1478 		write_ttbr1_el2(read_el2_ctx_vhe(el2_sysregs_ctx, ttbr1_el2));
1479 	}
1480 
1481 	if (is_feat_ras_supported()) {
1482 		write_vdisr_el2(read_el2_ctx_ras(el2_sysregs_ctx, vdisr_el2));
1483 		write_vsesr_el2(read_el2_ctx_ras(el2_sysregs_ctx, vsesr_el2));
1484 	}
1485 
1486 	if (is_feat_nv2_supported()) {
1487 		write_vncr_el2(read_el2_ctx_neve(el2_sysregs_ctx, vncr_el2));
1488 	}
1489 
1490 	if (is_feat_trf_supported()) {
1491 		write_trfcr_el2(read_el2_ctx_trf(el2_sysregs_ctx, trfcr_el2));
1492 	}
1493 
1494 	if (is_feat_csv2_2_supported()) {
1495 		write_scxtnum_el2(read_el2_ctx_csv2_2(el2_sysregs_ctx,
1496 					scxtnum_el2));
1497 	}
1498 
1499 	if (is_feat_hcx_supported()) {
1500 		write_hcrx_el2(read_el2_ctx_hcx(el2_sysregs_ctx, hcrx_el2));
1501 	}
1502 
1503 	if (is_feat_tcr2_supported()) {
1504 		write_tcr2_el2(read_el2_ctx_tcr2(el2_sysregs_ctx, tcr2_el2));
1505 	}
1506 
1507 	if (is_feat_sxpie_supported()) {
1508 		write_pire0_el2(read_el2_ctx_sxpie(el2_sysregs_ctx, pire0_el2));
1509 		write_pir_el2(read_el2_ctx_sxpie(el2_sysregs_ctx, pir_el2));
1510 	}
1511 
1512 	if (is_feat_sxpoe_supported()) {
1513 		write_por_el2(read_el2_ctx_sxpoe(el2_sysregs_ctx, por_el2));
1514 	}
1515 
1516 	if (is_feat_s2pie_supported()) {
1517 		write_s2pir_el2(read_el2_ctx_s2pie(el2_sysregs_ctx, s2pir_el2));
1518 	}
1519 
1520 	if (is_feat_gcs_supported()) {
1521 		write_gcscr_el2(read_el2_ctx_gcs(el2_sysregs_ctx, gcscr_el2));
1522 		write_gcspr_el2(read_el2_ctx_gcs(el2_sysregs_ctx, gcspr_el2));
1523 	}
1524 }
1525 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
1526 
1527 #if IMAGE_BL31
1528 /*********************************************************************************
1529 * This function allows Architecture features asymmetry among cores.
1530 * TF-A assumes that all the cores in the platform has architecture feature parity
1531 * and hence the context is setup on different core (e.g. primary sets up the
1532 * context for secondary cores).This assumption may not be true for systems where
1533 * cores are not conforming to same Arch version or there is CPU Erratum which
1534 * requires certain feature to be be disabled only on a given core.
1535 *
1536 * This function is called on secondary cores to override any disparity in context
1537 * setup by primary, this would be called during warmboot path.
1538 *********************************************************************************/
1539 void cm_handle_asymmetric_features(void)
1540 {
1541 #if ENABLE_SPE_FOR_NS == FEAT_STATE_CHECK_ASYMMETRIC
1542 	cpu_context_t *spe_ctx = cm_get_context(NON_SECURE);
1543 
1544 	assert(spe_ctx != NULL);
1545 
1546 	if (is_feat_spe_supported()) {
1547 		spe_enable(spe_ctx);
1548 	} else {
1549 		spe_disable(spe_ctx);
1550 	}
1551 #endif
1552 #if ERRATA_A520_2938996 || ERRATA_X4_2726228
1553 	cpu_context_t *trbe_ctx = cm_get_context(NON_SECURE);
1554 
1555 	assert(trbe_ctx != NULL);
1556 
1557 	if (check_if_affected_core() == ERRATA_APPLIES) {
1558 		if (is_feat_trbe_supported()) {
1559 			trbe_disable(trbe_ctx);
1560 		}
1561 	}
1562 #endif
1563 }
1564 #endif
1565 
1566 /*******************************************************************************
1567  * This function is used to exit to Non-secure world. If CTX_INCLUDE_EL2_REGS
1568  * is enabled, it restores EL1 and EL2 sysreg contexts instead of directly
1569  * updating EL1 and EL2 registers. Otherwise, it calls the generic
1570  * cm_prepare_el3_exit function.
1571  ******************************************************************************/
1572 void cm_prepare_el3_exit_ns(void)
1573 {
1574 #if IMAGE_BL31
1575 	/*
1576 	 * Check and handle Architecture feature asymmetry among cores.
1577 	 *
1578 	 * In warmboot path secondary cores context is initialized on core which
1579 	 * did CPU_ON SMC call, if there is feature asymmetry in these cores handle
1580 	 * it in this function call.
1581 	 * For Symmetric cores this is an empty function.
1582 	 */
1583 	cm_handle_asymmetric_features();
1584 #endif
1585 
1586 #if (CTX_INCLUDE_EL2_REGS && IMAGE_BL31)
1587 #if ENABLE_ASSERTIONS
1588 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
1589 	assert(ctx != NULL);
1590 
1591 	/* Assert that EL2 is used. */
1592 	u_register_t scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
1593 	assert(((scr_el3 & SCR_HCE_BIT) != 0UL) &&
1594 			(el_implemented(2U) != EL_IMPL_NONE));
1595 #endif /* ENABLE_ASSERTIONS */
1596 
1597 	/* Restore EL2 sysreg contexts */
1598 	cm_el2_sysregs_context_restore(NON_SECURE);
1599 	cm_set_next_eret_context(NON_SECURE);
1600 #else
1601 	cm_prepare_el3_exit(NON_SECURE);
1602 #endif /* (CTX_INCLUDE_EL2_REGS && IMAGE_BL31) */
1603 }
1604 
1605 #if ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS)))
1606 /*******************************************************************************
1607  * The next set of six functions are used by runtime services to save and restore
1608  * EL1 context on the 'cpu_context' structure for the specified security state.
1609  ******************************************************************************/
1610 static void el1_sysregs_context_save(el1_sysregs_t *ctx)
1611 {
1612 	write_el1_ctx_common(ctx, spsr_el1, read_spsr_el1());
1613 	write_el1_ctx_common(ctx, elr_el1, read_elr_el1());
1614 
1615 #if (!ERRATA_SPECULATIVE_AT)
1616 	write_el1_ctx_common(ctx, sctlr_el1, read_sctlr_el1());
1617 	write_el1_ctx_common(ctx, tcr_el1, read_tcr_el1());
1618 #endif /* (!ERRATA_SPECULATIVE_AT) */
1619 
1620 	write_el1_ctx_common(ctx, cpacr_el1, read_cpacr_el1());
1621 	write_el1_ctx_common(ctx, csselr_el1, read_csselr_el1());
1622 	write_el1_ctx_common(ctx, sp_el1, read_sp_el1());
1623 	write_el1_ctx_common(ctx, esr_el1, read_esr_el1());
1624 	write_el1_ctx_common(ctx, ttbr0_el1, read_ttbr0_el1());
1625 	write_el1_ctx_common(ctx, ttbr1_el1, read_ttbr1_el1());
1626 	write_el1_ctx_common(ctx, mair_el1, read_mair_el1());
1627 	write_el1_ctx_common(ctx, amair_el1, read_amair_el1());
1628 	write_el1_ctx_common(ctx, actlr_el1, read_actlr_el1());
1629 	write_el1_ctx_common(ctx, tpidr_el1, read_tpidr_el1());
1630 	write_el1_ctx_common(ctx, tpidr_el0, read_tpidr_el0());
1631 	write_el1_ctx_common(ctx, tpidrro_el0, read_tpidrro_el0());
1632 	write_el1_ctx_common(ctx, par_el1, read_par_el1());
1633 	write_el1_ctx_common(ctx, far_el1, read_far_el1());
1634 	write_el1_ctx_common(ctx, afsr0_el1, read_afsr0_el1());
1635 	write_el1_ctx_common(ctx, afsr1_el1, read_afsr1_el1());
1636 	write_el1_ctx_common(ctx, contextidr_el1, read_contextidr_el1());
1637 	write_el1_ctx_common(ctx, vbar_el1, read_vbar_el1());
1638 	write_el1_ctx_common(ctx, mdccint_el1, read_mdccint_el1());
1639 	write_el1_ctx_common(ctx, mdscr_el1, read_mdscr_el1());
1640 
1641 	if (CTX_INCLUDE_AARCH32_REGS) {
1642 		/* Save Aarch32 registers */
1643 		write_el1_ctx_aarch32(ctx, spsr_abt, read_spsr_abt());
1644 		write_el1_ctx_aarch32(ctx, spsr_und, read_spsr_und());
1645 		write_el1_ctx_aarch32(ctx, spsr_irq, read_spsr_irq());
1646 		write_el1_ctx_aarch32(ctx, spsr_fiq, read_spsr_fiq());
1647 		write_el1_ctx_aarch32(ctx, dacr32_el2, read_dacr32_el2());
1648 		write_el1_ctx_aarch32(ctx, ifsr32_el2, read_ifsr32_el2());
1649 	}
1650 
1651 	if (NS_TIMER_SWITCH) {
1652 		/* Save NS Timer registers */
1653 		write_el1_ctx_arch_timer(ctx, cntp_ctl_el0, read_cntp_ctl_el0());
1654 		write_el1_ctx_arch_timer(ctx, cntp_cval_el0, read_cntp_cval_el0());
1655 		write_el1_ctx_arch_timer(ctx, cntv_ctl_el0, read_cntv_ctl_el0());
1656 		write_el1_ctx_arch_timer(ctx, cntv_cval_el0, read_cntv_cval_el0());
1657 		write_el1_ctx_arch_timer(ctx, cntkctl_el1, read_cntkctl_el1());
1658 	}
1659 
1660 	if (is_feat_mte2_supported()) {
1661 		write_el1_ctx_mte2(ctx, tfsre0_el1, read_tfsre0_el1());
1662 		write_el1_ctx_mte2(ctx, tfsr_el1, read_tfsr_el1());
1663 		write_el1_ctx_mte2(ctx, rgsr_el1, read_rgsr_el1());
1664 		write_el1_ctx_mte2(ctx, gcr_el1, read_gcr_el1());
1665 	}
1666 
1667 	if (is_feat_ras_supported()) {
1668 		write_el1_ctx_ras(ctx, disr_el1, read_disr_el1());
1669 	}
1670 
1671 	if (is_feat_s1pie_supported()) {
1672 		write_el1_ctx_s1pie(ctx, pire0_el1, read_pire0_el1());
1673 		write_el1_ctx_s1pie(ctx, pir_el1, read_pir_el1());
1674 	}
1675 
1676 	if (is_feat_s1poe_supported()) {
1677 		write_el1_ctx_s1poe(ctx, por_el1, read_por_el1());
1678 	}
1679 
1680 	if (is_feat_s2poe_supported()) {
1681 		write_el1_ctx_s2poe(ctx, s2por_el1, read_s2por_el1());
1682 	}
1683 
1684 	if (is_feat_tcr2_supported()) {
1685 		write_el1_ctx_tcr2(ctx, tcr2_el1, read_tcr2_el1());
1686 	}
1687 
1688 	if (is_feat_trf_supported()) {
1689 		write_el1_ctx_trf(ctx, trfcr_el1, read_trfcr_el1());
1690 	}
1691 
1692 	if (is_feat_csv2_2_supported()) {
1693 		write_el1_ctx_csv2_2(ctx, scxtnum_el0, read_scxtnum_el0());
1694 		write_el1_ctx_csv2_2(ctx, scxtnum_el1, read_scxtnum_el1());
1695 	}
1696 
1697 	if (is_feat_gcs_supported()) {
1698 		write_el1_ctx_gcs(ctx, gcscr_el1, read_gcscr_el1());
1699 		write_el1_ctx_gcs(ctx, gcscre0_el1, read_gcscre0_el1());
1700 		write_el1_ctx_gcs(ctx, gcspr_el1, read_gcspr_el1());
1701 		write_el1_ctx_gcs(ctx, gcspr_el0, read_gcspr_el0());
1702 	}
1703 }
1704 
1705 static void el1_sysregs_context_restore(el1_sysregs_t *ctx)
1706 {
1707 	write_spsr_el1(read_el1_ctx_common(ctx, spsr_el1));
1708 	write_elr_el1(read_el1_ctx_common(ctx, elr_el1));
1709 
1710 #if (!ERRATA_SPECULATIVE_AT)
1711 	write_sctlr_el1(read_el1_ctx_common(ctx, sctlr_el1));
1712 	write_tcr_el1(read_el1_ctx_common(ctx, tcr_el1));
1713 #endif /* (!ERRATA_SPECULATIVE_AT) */
1714 
1715 	write_cpacr_el1(read_el1_ctx_common(ctx, cpacr_el1));
1716 	write_csselr_el1(read_el1_ctx_common(ctx, csselr_el1));
1717 	write_sp_el1(read_el1_ctx_common(ctx, sp_el1));
1718 	write_esr_el1(read_el1_ctx_common(ctx, esr_el1));
1719 	write_ttbr0_el1(read_el1_ctx_common(ctx, ttbr0_el1));
1720 	write_ttbr1_el1(read_el1_ctx_common(ctx, ttbr1_el1));
1721 	write_mair_el1(read_el1_ctx_common(ctx, mair_el1));
1722 	write_amair_el1(read_el1_ctx_common(ctx, amair_el1));
1723 	write_actlr_el1(read_el1_ctx_common(ctx, actlr_el1));
1724 	write_tpidr_el1(read_el1_ctx_common(ctx, tpidr_el1));
1725 	write_tpidr_el0(read_el1_ctx_common(ctx, tpidr_el0));
1726 	write_tpidrro_el0(read_el1_ctx_common(ctx, tpidrro_el0));
1727 	write_par_el1(read_el1_ctx_common(ctx, par_el1));
1728 	write_far_el1(read_el1_ctx_common(ctx, far_el1));
1729 	write_afsr0_el1(read_el1_ctx_common(ctx, afsr0_el1));
1730 	write_afsr1_el1(read_el1_ctx_common(ctx, afsr1_el1));
1731 	write_contextidr_el1(read_el1_ctx_common(ctx, contextidr_el1));
1732 	write_vbar_el1(read_el1_ctx_common(ctx, vbar_el1));
1733 	write_mdccint_el1(read_el1_ctx_common(ctx, mdccint_el1));
1734 	write_mdscr_el1(read_el1_ctx_common(ctx, mdscr_el1));
1735 
1736 	if (CTX_INCLUDE_AARCH32_REGS) {
1737 		/* Restore Aarch32 registers */
1738 		write_spsr_abt(read_el1_ctx_aarch32(ctx, spsr_abt));
1739 		write_spsr_und(read_el1_ctx_aarch32(ctx, spsr_und));
1740 		write_spsr_irq(read_el1_ctx_aarch32(ctx, spsr_irq));
1741 		write_spsr_fiq(read_el1_ctx_aarch32(ctx, spsr_fiq));
1742 		write_dacr32_el2(read_el1_ctx_aarch32(ctx, dacr32_el2));
1743 		write_ifsr32_el2(read_el1_ctx_aarch32(ctx, ifsr32_el2));
1744 	}
1745 
1746 	if (NS_TIMER_SWITCH) {
1747 		/* Restore NS Timer registers */
1748 		write_cntp_ctl_el0(read_el1_ctx_arch_timer(ctx, cntp_ctl_el0));
1749 		write_cntp_cval_el0(read_el1_ctx_arch_timer(ctx, cntp_cval_el0));
1750 		write_cntv_ctl_el0(read_el1_ctx_arch_timer(ctx, cntv_ctl_el0));
1751 		write_cntv_cval_el0(read_el1_ctx_arch_timer(ctx, cntv_cval_el0));
1752 		write_cntkctl_el1(read_el1_ctx_arch_timer(ctx, cntkctl_el1));
1753 	}
1754 
1755 	if (is_feat_mte2_supported()) {
1756 		write_tfsre0_el1(read_el1_ctx_mte2(ctx, tfsre0_el1));
1757 		write_tfsr_el1(read_el1_ctx_mte2(ctx, tfsr_el1));
1758 		write_rgsr_el1(read_el1_ctx_mte2(ctx, rgsr_el1));
1759 		write_gcr_el1(read_el1_ctx_mte2(ctx, gcr_el1));
1760 	}
1761 
1762 	if (is_feat_ras_supported()) {
1763 		write_disr_el1(read_el1_ctx_ras(ctx, disr_el1));
1764 	}
1765 
1766 	if (is_feat_s1pie_supported()) {
1767 		write_pire0_el1(read_el1_ctx_s1pie(ctx, pire0_el1));
1768 		write_pir_el1(read_el1_ctx_s1pie(ctx, pir_el1));
1769 	}
1770 
1771 	if (is_feat_s1poe_supported()) {
1772 		write_por_el1(read_el1_ctx_s1poe(ctx, por_el1));
1773 	}
1774 
1775 	if (is_feat_s2poe_supported()) {
1776 		write_s2por_el1(read_el1_ctx_s2poe(ctx, s2por_el1));
1777 	}
1778 
1779 	if (is_feat_tcr2_supported()) {
1780 		write_tcr2_el1(read_el1_ctx_tcr2(ctx, tcr2_el1));
1781 	}
1782 
1783 	if (is_feat_trf_supported()) {
1784 		write_trfcr_el1(read_el1_ctx_trf(ctx, trfcr_el1));
1785 	}
1786 
1787 	if (is_feat_csv2_2_supported()) {
1788 		write_scxtnum_el0(read_el1_ctx_csv2_2(ctx, scxtnum_el0));
1789 		write_scxtnum_el1(read_el1_ctx_csv2_2(ctx, scxtnum_el1));
1790 	}
1791 
1792 	if (is_feat_gcs_supported()) {
1793 		write_gcscr_el1(read_el1_ctx_gcs(ctx, gcscr_el1));
1794 		write_gcscre0_el1(read_el1_ctx_gcs(ctx, gcscre0_el1));
1795 		write_gcspr_el1(read_el1_ctx_gcs(ctx, gcspr_el1));
1796 		write_gcspr_el0(read_el1_ctx_gcs(ctx, gcspr_el0));
1797 	}
1798 }
1799 
1800 /*******************************************************************************
1801  * The next couple of functions are used by runtime services to save and restore
1802  * EL1 context on the 'cpu_context' structure for the specified security state.
1803  ******************************************************************************/
1804 void cm_el1_sysregs_context_save(uint32_t security_state)
1805 {
1806 	cpu_context_t *ctx;
1807 
1808 	ctx = cm_get_context(security_state);
1809 	assert(ctx != NULL);
1810 
1811 	el1_sysregs_context_save(get_el1_sysregs_ctx(ctx));
1812 
1813 #if IMAGE_BL31
1814 	if (security_state == SECURE)
1815 		PUBLISH_EVENT(cm_exited_secure_world);
1816 	else
1817 		PUBLISH_EVENT(cm_exited_normal_world);
1818 #endif
1819 }
1820 
1821 void cm_el1_sysregs_context_restore(uint32_t security_state)
1822 {
1823 	cpu_context_t *ctx;
1824 
1825 	ctx = cm_get_context(security_state);
1826 	assert(ctx != NULL);
1827 
1828 	el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx));
1829 
1830 #if IMAGE_BL31
1831 	if (security_state == SECURE)
1832 		PUBLISH_EVENT(cm_entering_secure_world);
1833 	else
1834 		PUBLISH_EVENT(cm_entering_normal_world);
1835 #endif
1836 }
1837 
1838 #endif /* ((IMAGE_BL1) || (IMAGE_BL31 && (!CTX_INCLUDE_EL2_REGS))) */
1839 
1840 /*******************************************************************************
1841  * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
1842  * given security state with the given entrypoint
1843  ******************************************************************************/
1844 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
1845 {
1846 	cpu_context_t *ctx;
1847 	el3_state_t *state;
1848 
1849 	ctx = cm_get_context(security_state);
1850 	assert(ctx != NULL);
1851 
1852 	/* Populate EL3 state so that ERET jumps to the correct entry */
1853 	state = get_el3state_ctx(ctx);
1854 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
1855 }
1856 
1857 /*******************************************************************************
1858  * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
1859  * pertaining to the given security state
1860  ******************************************************************************/
1861 void cm_set_elr_spsr_el3(uint32_t security_state,
1862 			uintptr_t entrypoint, uint32_t spsr)
1863 {
1864 	cpu_context_t *ctx;
1865 	el3_state_t *state;
1866 
1867 	ctx = cm_get_context(security_state);
1868 	assert(ctx != NULL);
1869 
1870 	/* Populate EL3 state so that ERET jumps to the correct entry */
1871 	state = get_el3state_ctx(ctx);
1872 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
1873 	write_ctx_reg(state, CTX_SPSR_EL3, spsr);
1874 }
1875 
1876 /*******************************************************************************
1877  * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
1878  * pertaining to the given security state using the value and bit position
1879  * specified in the parameters. It preserves all other bits.
1880  ******************************************************************************/
1881 void cm_write_scr_el3_bit(uint32_t security_state,
1882 			  uint32_t bit_pos,
1883 			  uint32_t value)
1884 {
1885 	cpu_context_t *ctx;
1886 	el3_state_t *state;
1887 	u_register_t scr_el3;
1888 
1889 	ctx = cm_get_context(security_state);
1890 	assert(ctx != NULL);
1891 
1892 	/* Ensure that the bit position is a valid one */
1893 	assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U);
1894 
1895 	/* Ensure that the 'value' is only a bit wide */
1896 	assert(value <= 1U);
1897 
1898 	/*
1899 	 * Get the SCR_EL3 value from the cpu context, clear the desired bit
1900 	 * and set it to its new value.
1901 	 */
1902 	state = get_el3state_ctx(ctx);
1903 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
1904 	scr_el3 &= ~(1UL << bit_pos);
1905 	scr_el3 |= (u_register_t)value << bit_pos;
1906 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
1907 }
1908 
1909 /*******************************************************************************
1910  * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
1911  * given security state.
1912  ******************************************************************************/
1913 u_register_t cm_get_scr_el3(uint32_t security_state)
1914 {
1915 	cpu_context_t *ctx;
1916 	el3_state_t *state;
1917 
1918 	ctx = cm_get_context(security_state);
1919 	assert(ctx != NULL);
1920 
1921 	/* Populate EL3 state so that ERET jumps to the correct entry */
1922 	state = get_el3state_ctx(ctx);
1923 	return read_ctx_reg(state, CTX_SCR_EL3);
1924 }
1925 
1926 /*******************************************************************************
1927  * This function is used to program the context that's used for exception
1928  * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
1929  * the required security state
1930  ******************************************************************************/
1931 void cm_set_next_eret_context(uint32_t security_state)
1932 {
1933 	cpu_context_t *ctx;
1934 
1935 	ctx = cm_get_context(security_state);
1936 	assert(ctx != NULL);
1937 
1938 	cm_set_next_context(ctx);
1939 }
1940