xref: /rk3399_ARM-atf/lib/el3_runtime/aarch64/context_mgmt.c (revision 4ce3e99a336b74611349595ea7fd5ed0277c3eeb)
1 /*
2  * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <stdbool.h>
9 #include <string.h>
10 
11 #include <platform_def.h>
12 
13 #include <arch.h>
14 #include <arch_helpers.h>
15 #include <arch_features.h>
16 #include <bl31/interrupt_mgmt.h>
17 #include <common/bl_common.h>
18 #include <context.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/el3_runtime/pubsub_events.h>
21 #include <lib/extensions/amu.h>
22 #include <lib/extensions/mpam.h>
23 #include <lib/extensions/spe.h>
24 #include <lib/extensions/sve.h>
25 #include <lib/extensions/sys_reg_trace.h>
26 #include <lib/extensions/trbe.h>
27 #include <lib/extensions/trf.h>
28 #include <lib/extensions/twed.h>
29 #include <lib/utils.h>
30 
31 static void enable_extensions_secure(cpu_context_t *ctx);
32 
33 /*******************************************************************************
34  * Context management library initialisation routine. This library is used by
35  * runtime services to share pointers to 'cpu_context' structures for the secure
36  * and non-secure states. Management of the structures and their associated
37  * memory is not done by the context management library e.g. the PSCI service
38  * manages the cpu context used for entry from and exit to the non-secure state.
39  * The Secure payload dispatcher service manages the context(s) corresponding to
40  * the secure state. It also uses this library to get access to the non-secure
41  * state cpu context pointers.
42  * Lastly, this library provides the api to make SP_EL3 point to the cpu context
43  * which will used for programming an entry into a lower EL. The same context
44  * will used to save state upon exception entry from that EL.
45  ******************************************************************************/
46 void __init cm_init(void)
47 {
48 	/*
49 	 * The context management library has only global data to intialize, but
50 	 * that will be done when the BSS is zeroed out
51 	 */
52 }
53 
54 /*******************************************************************************
55  * The following function initializes the cpu_context 'ctx' for
56  * first use, and sets the initial entrypoint state as specified by the
57  * entry_point_info structure.
58  *
59  * The security state to initialize is determined by the SECURE attribute
60  * of the entry_point_info.
61  *
62  * The EE and ST attributes are used to configure the endianness and secure
63  * timer availability for the new execution context.
64  *
65  * To prepare the register state for entry call cm_prepare_el3_exit() and
66  * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
67  * cm_el1_sysregs_context_restore().
68  ******************************************************************************/
69 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
70 {
71 	unsigned int security_state;
72 	u_register_t scr_el3;
73 	el3_state_t *state;
74 	gp_regs_t *gp_regs;
75 	u_register_t sctlr_elx, actlr_elx;
76 
77 	assert(ctx != NULL);
78 
79 	security_state = GET_SECURITY_STATE(ep->h.attr);
80 
81 	/* Clear any residual register values from the context */
82 	zeromem(ctx, sizeof(*ctx));
83 
84 	/*
85 	 * SCR_EL3 was initialised during reset sequence in macro
86 	 * el3_arch_init_common. This code modifies the SCR_EL3 fields that
87 	 * affect the next EL.
88 	 *
89 	 * The following fields are initially set to zero and then updated to
90 	 * the required value depending on the state of the SPSR_EL3 and the
91 	 * Security state and entrypoint attributes of the next EL.
92 	 */
93 	scr_el3 = read_scr();
94 	scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
95 			SCR_ST_BIT | SCR_HCE_BIT);
96 
97 #if ENABLE_RME
98 	/* When RME support is enabled, clear the NSE bit as well. */
99 	scr_el3 &= ~SCR_NSE_BIT;
100 #endif /* ENABLE_RME */
101 
102 	/*
103 	 * SCR_NS: Set the security state of the next EL.
104 	 */
105 	if (security_state == NON_SECURE) {
106 		scr_el3 |= SCR_NS_BIT;
107 	}
108 
109 #if ENABLE_RME
110 	/* Check for realm state if RME support enabled. */
111 	if (security_state == REALM) {
112 		scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT | SCR_EnSCXT_BIT;
113 	}
114 #endif /* ENABLE_RME */
115 
116 	/*
117 	 * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next
118 	 *  Exception level as specified by SPSR.
119 	 */
120 	if (GET_RW(ep->spsr) == MODE_RW_64) {
121 		scr_el3 |= SCR_RW_BIT;
122 	}
123 	/*
124 	 * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical
125 	 *  Secure timer registers to EL3, from AArch64 state only, if specified
126 	 *  by the entrypoint attributes.
127 	 */
128 	if (EP_GET_ST(ep->h.attr) != 0U) {
129 		scr_el3 |= SCR_ST_BIT;
130 	}
131 
132 	/*
133 	 * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting
134 	 * SCR_EL3.HXEn.
135 	 */
136 #if ENABLE_FEAT_HCX
137 	scr_el3 |= SCR_HXEn_BIT;
138 #endif
139 
140 #if RAS_TRAP_LOWER_EL_ERR_ACCESS
141 	/*
142 	 * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR
143 	 * and RAS ERX registers from EL1 and EL2 are trapped to EL3.
144 	 */
145 	scr_el3 |= SCR_TERR_BIT;
146 #endif
147 
148 #if !HANDLE_EA_EL3_FIRST
149 	/*
150 	 * SCR_EL3.EA: Do not route External Abort and SError Interrupt External
151 	 *  to EL3 when executing at a lower EL. When executing at EL3, External
152 	 *  Aborts are taken to EL3.
153 	 */
154 	scr_el3 &= ~SCR_EA_BIT;
155 #endif
156 
157 #if FAULT_INJECTION_SUPPORT
158 	/* Enable fault injection from lower ELs */
159 	scr_el3 |= SCR_FIEN_BIT;
160 #endif
161 
162 #if !CTX_INCLUDE_PAUTH_REGS
163 	/*
164 	 * If the pointer authentication registers aren't saved during world
165 	 * switches the value of the registers can be leaked from the Secure to
166 	 * the Non-secure world. To prevent this, rather than enabling pointer
167 	 * authentication everywhere, we only enable it in the Non-secure world.
168 	 *
169 	 * If the Secure world wants to use pointer authentication,
170 	 * CTX_INCLUDE_PAUTH_REGS must be set to 1.
171 	 */
172 	if (security_state == NON_SECURE) {
173 		scr_el3 |= SCR_API_BIT | SCR_APK_BIT;
174 	}
175 #endif /* !CTX_INCLUDE_PAUTH_REGS */
176 
177 #if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS
178 	/* Get Memory Tagging Extension support level */
179 	unsigned int mte = get_armv8_5_mte_support();
180 #endif
181 	/*
182 	 * Enable MTE support. Support is enabled unilaterally for the normal
183 	 * world, and only for the secure world when CTX_INCLUDE_MTE_REGS is
184 	 * set.
185 	 */
186 #if CTX_INCLUDE_MTE_REGS
187 	assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY));
188 	scr_el3 |= SCR_ATA_BIT;
189 #else
190 	/*
191 	 * When MTE is only implemented at EL0, it can be enabled
192 	 * across both worlds as no MTE registers are used.
193 	 */
194 	if ((mte == MTE_IMPLEMENTED_EL0) ||
195 	/*
196 	 * When MTE is implemented at all ELs, it can be only enabled
197 	 * in Non-Secure world without register saving.
198 	 */
199 	  (((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY)) &&
200 	    (security_state == NON_SECURE))) {
201 		scr_el3 |= SCR_ATA_BIT;
202 	}
203 #endif	/* CTX_INCLUDE_MTE_REGS */
204 
205 #ifdef IMAGE_BL31
206 	/*
207 	 * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as
208 	 *  indicated by the interrupt routing model for BL31.
209 	 *
210 	 * TODO: The interrupt routing model code is not updated for REALM
211 	 * state. Use the default values of IRQ = FIQ = 0 for REALM security
212 	 * state for now.
213 	 */
214 	if (security_state != REALM) {
215 		scr_el3 |= get_scr_el3_from_routing_model(security_state);
216 	}
217 #endif
218 
219 	/* Save the initialized value of CPTR_EL3 register */
220 	write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3());
221 	if (security_state == SECURE) {
222 		enable_extensions_secure(ctx);
223 	}
224 
225 	/*
226 	 * SCR_EL3.HCE: Enable HVC instructions if next execution state is
227 	 * AArch64 and next EL is EL2, or if next execution state is AArch32 and
228 	 * next mode is Hyp.
229 	 * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the
230 	 * same conditions as HVC instructions and when the processor supports
231 	 * ARMv8.6-FGT.
232 	 * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV)
233 	 * CNTPOFF_EL2 register under the same conditions as HVC instructions
234 	 * and when the processor supports ECV.
235 	 */
236 	if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2))
237 	    || ((GET_RW(ep->spsr) != MODE_RW_64)
238 		&& (GET_M32(ep->spsr) == MODE32_hyp))) {
239 		scr_el3 |= SCR_HCE_BIT;
240 
241 		if (is_armv8_6_fgt_present()) {
242 			scr_el3 |= SCR_FGTEN_BIT;
243 		}
244 
245 		if (get_armv8_6_ecv_support()
246 		    == ID_AA64MMFR0_EL1_ECV_SELF_SYNCH) {
247 			scr_el3 |= SCR_ECVEN_BIT;
248 		}
249 	}
250 
251 	/* Enable S-EL2 if the next EL is EL2 and security state is secure */
252 	if ((security_state == SECURE) && (GET_EL(ep->spsr) == MODE_EL2)) {
253 		if (GET_RW(ep->spsr) != MODE_RW_64) {
254 			ERROR("S-EL2 can not be used in AArch32.");
255 			panic();
256 		}
257 
258 		scr_el3 |= SCR_EEL2_BIT;
259 	}
260 
261 	/*
262 	 * FEAT_AMUv1p1 virtual offset registers are only accessible from EL3
263 	 * and EL2, when clear, this bit traps accesses from EL2 so we set it
264 	 * to 1 when EL2 is present.
265 	 */
266 	if (is_armv8_6_feat_amuv1p1_present() &&
267 		(el_implemented(2) != EL_IMPL_NONE)) {
268 		scr_el3 |= SCR_AMVOFFEN_BIT;
269 	}
270 
271 	/*
272 	 * Initialise SCTLR_EL1 to the reset value corresponding to the target
273 	 * execution state setting all fields rather than relying of the hw.
274 	 * Some fields have architecturally UNKNOWN reset values and these are
275 	 * set to zero.
276 	 *
277 	 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
278 	 *
279 	 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
280 	 *  required by PSCI specification)
281 	 */
282 	sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U;
283 	if (GET_RW(ep->spsr) == MODE_RW_64) {
284 		sctlr_elx |= SCTLR_EL1_RES1;
285 	} else {
286 		/*
287 		 * If the target execution state is AArch32 then the following
288 		 * fields need to be set.
289 		 *
290 		 * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE
291 		 *  instructions are not trapped to EL1.
292 		 *
293 		 * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI
294 		 *  instructions are not trapped to EL1.
295 		 *
296 		 * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the
297 		 *  CP15DMB, CP15DSB, and CP15ISB instructions.
298 		 */
299 		sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT
300 					| SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
301 	}
302 
303 #if ERRATA_A75_764081
304 	/*
305 	 * If workaround of errata 764081 for Cortex-A75 is used then set
306 	 * SCTLR_EL1.IESB to enable Implicit Error Synchronization Barrier.
307 	 */
308 	sctlr_elx |= SCTLR_IESB_BIT;
309 #endif
310 
311 	/* Enable WFE trap delay in SCR_EL3 if supported and configured */
312 	if (is_armv8_6_twed_present()) {
313 		uint32_t delay = plat_arm_set_twedel_scr_el3();
314 
315 		if (delay != TWED_DISABLED) {
316 			/* Make sure delay value fits */
317 			assert((delay & ~SCR_TWEDEL_MASK) == 0U);
318 
319 			/* Set delay in SCR_EL3 */
320 			scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT);
321 			scr_el3 |= ((delay & SCR_TWEDEL_MASK)
322 					<< SCR_TWEDEL_SHIFT);
323 
324 			/* Enable WFE delay */
325 			scr_el3 |= SCR_TWEDEn_BIT;
326 		}
327 	}
328 
329 	/*
330 	 * Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2
331 	 * and other EL2 registers are set up by cm_prepare_el3_exit() as they
332 	 * are not part of the stored cpu_context.
333 	 */
334 	write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
335 
336 	/*
337 	 * Base the context ACTLR_EL1 on the current value, as it is
338 	 * implementation defined. The context restore process will write
339 	 * the value from the context to the actual register and can cause
340 	 * problems for processor cores that don't expect certain bits to
341 	 * be zero.
342 	 */
343 	actlr_elx = read_actlr_el1();
344 	write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
345 
346 	/*
347 	 * Populate EL3 state so that we've the right context
348 	 * before doing ERET
349 	 */
350 	state = get_el3state_ctx(ctx);
351 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
352 	write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
353 	write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
354 
355 	/*
356 	 * Store the X0-X7 value from the entrypoint into the context
357 	 * Use memcpy as we are in control of the layout of the structures
358 	 */
359 	gp_regs = get_gpregs_ctx(ctx);
360 	memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
361 }
362 
363 /*******************************************************************************
364  * Enable architecture extensions on first entry to Non-secure world.
365  * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
366  * it is zero.
367  ******************************************************************************/
368 static void enable_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
369 {
370 #if IMAGE_BL31
371 #if ENABLE_SPE_FOR_LOWER_ELS
372 	spe_enable(el2_unused);
373 #endif
374 
375 #if ENABLE_AMU
376 	amu_enable(el2_unused, ctx);
377 #endif
378 
379 #if ENABLE_SVE_FOR_NS
380 	sve_enable(ctx);
381 #endif
382 
383 #if ENABLE_MPAM_FOR_LOWER_ELS
384 	mpam_enable(el2_unused);
385 #endif
386 
387 #if ENABLE_TRBE_FOR_NS
388 	trbe_enable();
389 #endif /* ENABLE_TRBE_FOR_NS */
390 
391 #if ENABLE_SYS_REG_TRACE_FOR_NS
392 	sys_reg_trace_enable(ctx);
393 #endif /* ENABLE_SYS_REG_TRACE_FOR_NS */
394 
395 #if ENABLE_TRF_FOR_NS
396 	trf_enable();
397 #endif /* ENABLE_TRF_FOR_NS */
398 
399 #endif
400 }
401 
402 /*******************************************************************************
403  * Enable architecture extensions on first entry to Secure world.
404  ******************************************************************************/
405 static void enable_extensions_secure(cpu_context_t *ctx)
406 {
407 #if IMAGE_BL31
408 #if ENABLE_SVE_FOR_SWD
409 	sve_enable(ctx);
410 #endif
411 #endif
412 }
413 
414 /*******************************************************************************
415  * The following function initializes the cpu_context for a CPU specified by
416  * its `cpu_idx` for first use, and sets the initial entrypoint state as
417  * specified by the entry_point_info structure.
418  ******************************************************************************/
419 void cm_init_context_by_index(unsigned int cpu_idx,
420 			      const entry_point_info_t *ep)
421 {
422 	cpu_context_t *ctx;
423 	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
424 	cm_setup_context(ctx, ep);
425 }
426 
427 /*******************************************************************************
428  * The following function initializes the cpu_context for the current CPU
429  * for first use, and sets the initial entrypoint state as specified by the
430  * entry_point_info structure.
431  ******************************************************************************/
432 void cm_init_my_context(const entry_point_info_t *ep)
433 {
434 	cpu_context_t *ctx;
435 	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
436 	cm_setup_context(ctx, ep);
437 }
438 
439 /*******************************************************************************
440  * Prepare the CPU system registers for first entry into realm, secure, or
441  * normal world.
442  *
443  * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
444  * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
445  * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
446  * For all entries, the EL1 registers are initialized from the cpu_context
447  ******************************************************************************/
448 void cm_prepare_el3_exit(uint32_t security_state)
449 {
450 	u_register_t sctlr_elx, scr_el3, mdcr_el2;
451 	cpu_context_t *ctx = cm_get_context(security_state);
452 	bool el2_unused = false;
453 	uint64_t hcr_el2 = 0U;
454 
455 	assert(ctx != NULL);
456 
457 	if (security_state == NON_SECURE) {
458 		scr_el3 = read_ctx_reg(get_el3state_ctx(ctx),
459 						 CTX_SCR_EL3);
460 		if ((scr_el3 & SCR_HCE_BIT) != 0U) {
461 			/* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
462 			sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx),
463 							   CTX_SCTLR_EL1);
464 			sctlr_elx &= SCTLR_EE_BIT;
465 			sctlr_elx |= SCTLR_EL2_RES1;
466 #if ERRATA_A75_764081
467 			/*
468 			 * If workaround of errata 764081 for Cortex-A75 is used
469 			 * then set SCTLR_EL2.IESB to enable Implicit Error
470 			 * Synchronization Barrier.
471 			 */
472 			sctlr_elx |= SCTLR_IESB_BIT;
473 #endif
474 			write_sctlr_el2(sctlr_elx);
475 		} else if (el_implemented(2) != EL_IMPL_NONE) {
476 			el2_unused = true;
477 
478 			/*
479 			 * EL2 present but unused, need to disable safely.
480 			 * SCTLR_EL2 can be ignored in this case.
481 			 *
482 			 * Set EL2 register width appropriately: Set HCR_EL2
483 			 * field to match SCR_EL3.RW.
484 			 */
485 			if ((scr_el3 & SCR_RW_BIT) != 0U)
486 				hcr_el2 |= HCR_RW_BIT;
487 
488 			/*
489 			 * For Armv8.3 pointer authentication feature, disable
490 			 * traps to EL2 when accessing key registers or using
491 			 * pointer authentication instructions from lower ELs.
492 			 */
493 			hcr_el2 |= (HCR_API_BIT | HCR_APK_BIT);
494 
495 			write_hcr_el2(hcr_el2);
496 
497 			/*
498 			 * Initialise CPTR_EL2 setting all fields rather than
499 			 * relying on the hw. All fields have architecturally
500 			 * UNKNOWN reset values.
501 			 *
502 			 * CPTR_EL2.TCPAC: Set to zero so that Non-secure EL1
503 			 *  accesses to the CPACR_EL1 or CPACR from both
504 			 *  Execution states do not trap to EL2.
505 			 *
506 			 * CPTR_EL2.TTA: Set to zero so that Non-secure System
507 			 *  register accesses to the trace registers from both
508 			 *  Execution states do not trap to EL2.
509 			 *  If PE trace unit System registers are not implemented
510 			 *  then this bit is reserved, and must be set to zero.
511 			 *
512 			 * CPTR_EL2.TFP: Set to zero so that Non-secure accesses
513 			 *  to SIMD and floating-point functionality from both
514 			 *  Execution states do not trap to EL2.
515 			 */
516 			write_cptr_el2(CPTR_EL2_RESET_VAL &
517 					~(CPTR_EL2_TCPAC_BIT | CPTR_EL2_TTA_BIT
518 					| CPTR_EL2_TFP_BIT));
519 
520 			/*
521 			 * Initialise CNTHCTL_EL2. All fields are
522 			 * architecturally UNKNOWN on reset and are set to zero
523 			 * except for field(s) listed below.
524 			 *
525 			 * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to
526 			 *  Hyp mode of Non-secure EL0 and EL1 accesses to the
527 			 *  physical timer registers.
528 			 *
529 			 * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to
530 			 *  Hyp mode of  Non-secure EL0 and EL1 accesses to the
531 			 *  physical counter registers.
532 			 */
533 			write_cnthctl_el2(CNTHCTL_RESET_VAL |
534 						EL1PCEN_BIT | EL1PCTEN_BIT);
535 
536 			/*
537 			 * Initialise CNTVOFF_EL2 to zero as it resets to an
538 			 * architecturally UNKNOWN value.
539 			 */
540 			write_cntvoff_el2(0);
541 
542 			/*
543 			 * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and
544 			 * MPIDR_EL1 respectively.
545 			 */
546 			write_vpidr_el2(read_midr_el1());
547 			write_vmpidr_el2(read_mpidr_el1());
548 
549 			/*
550 			 * Initialise VTTBR_EL2. All fields are architecturally
551 			 * UNKNOWN on reset.
552 			 *
553 			 * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage
554 			 *  2 address translation is disabled, cache maintenance
555 			 *  operations depend on the VMID.
556 			 *
557 			 * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address
558 			 *  translation is disabled.
559 			 */
560 			write_vttbr_el2(VTTBR_RESET_VAL &
561 				~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT)
562 				| (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
563 
564 			/*
565 			 * Initialise MDCR_EL2, setting all fields rather than
566 			 * relying on hw. Some fields are architecturally
567 			 * UNKNOWN on reset.
568 			 *
569 			 * MDCR_EL2.HLP: Set to one so that event counter
570 			 *  overflow, that is recorded in PMOVSCLR_EL0[0-30],
571 			 *  occurs on the increment that changes
572 			 *  PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is
573 			 *  implemented. This bit is RES0 in versions of the
574 			 *  architecture earlier than ARMv8.5, setting it to 1
575 			 *  doesn't have any effect on them.
576 			 *
577 			 * MDCR_EL2.TTRF: Set to zero so that access to Trace
578 			 *  Filter Control register TRFCR_EL1 at EL1 is not
579 			 *  trapped to EL2. This bit is RES0 in versions of
580 			 *  the architecture earlier than ARMv8.4.
581 			 *
582 			 * MDCR_EL2.HPMD: Set to one so that event counting is
583 			 *  prohibited at EL2. This bit is RES0 in versions of
584 			 *  the architecture earlier than ARMv8.1, setting it
585 			 *  to 1 doesn't have any effect on them.
586 			 *
587 			 * MDCR_EL2.TPMS: Set to zero so that accesses to
588 			 *  Statistical Profiling control registers from EL1
589 			 *  do not trap to EL2. This bit is RES0 when SPE is
590 			 *  not implemented.
591 			 *
592 			 * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
593 			 *  EL1 System register accesses to the Debug ROM
594 			 *  registers are not trapped to EL2.
595 			 *
596 			 * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1
597 			 *  System register accesses to the powerdown debug
598 			 *  registers are not trapped to EL2.
599 			 *
600 			 * MDCR_EL2.TDA: Set to zero so that System register
601 			 *  accesses to the debug registers do not trap to EL2.
602 			 *
603 			 * MDCR_EL2.TDE: Set to zero so that debug exceptions
604 			 *  are not routed to EL2.
605 			 *
606 			 * MDCR_EL2.HPME: Set to zero to disable EL2 Performance
607 			 *  Monitors.
608 			 *
609 			 * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and
610 			 *  EL1 accesses to all Performance Monitors registers
611 			 *  are not trapped to EL2.
612 			 *
613 			 * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0
614 			 *  and EL1 accesses to the PMCR_EL0 or PMCR are not
615 			 *  trapped to EL2.
616 			 *
617 			 * MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the
618 			 *  architecturally-defined reset value.
619 			 *
620 			 * MDCR_EL2.E2TB: Set to zero so that the trace Buffer
621 			 *  owning exception level is NS-EL1 and, tracing is
622 			 *  prohibited at NS-EL2. These bits are RES0 when
623 			 *  FEAT_TRBE is not implemented.
624 			 */
625 			mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HLP |
626 				     MDCR_EL2_HPMD) |
627 				   ((read_pmcr_el0() & PMCR_EL0_N_BITS)
628 				   >> PMCR_EL0_N_SHIFT)) &
629 				   ~(MDCR_EL2_TTRF | MDCR_EL2_TPMS |
630 				     MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT |
631 				     MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT |
632 				     MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT |
633 				     MDCR_EL2_TPMCR_BIT |
634 				     MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1));
635 
636 			write_mdcr_el2(mdcr_el2);
637 
638 			/*
639 			 * Initialise HSTR_EL2. All fields are architecturally
640 			 * UNKNOWN on reset.
641 			 *
642 			 * HSTR_EL2.T<n>: Set all these fields to zero so that
643 			 *  Non-secure EL0 or EL1 accesses to System registers
644 			 *  do not trap to EL2.
645 			 */
646 			write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK));
647 			/*
648 			 * Initialise CNTHP_CTL_EL2. All fields are
649 			 * architecturally UNKNOWN on reset.
650 			 *
651 			 * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2
652 			 *  physical timer and prevent timer interrupts.
653 			 */
654 			write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
655 						~(CNTHP_CTL_ENABLE_BIT));
656 		}
657 		enable_extensions_nonsecure(el2_unused, ctx);
658 	}
659 
660 	cm_el1_sysregs_context_restore(security_state);
661 	cm_set_next_eret_context(security_state);
662 }
663 
664 #if CTX_INCLUDE_EL2_REGS
665 /*******************************************************************************
666  * Save EL2 sysreg context
667  ******************************************************************************/
668 void cm_el2_sysregs_context_save(uint32_t security_state)
669 {
670 	u_register_t scr_el3 = read_scr();
671 
672 	/*
673 	 * Always save the non-secure and realm EL2 context, only save the
674 	 * S-EL2 context if S-EL2 is enabled.
675 	 */
676 	if ((security_state != SECURE) ||
677 	    ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
678 		cpu_context_t *ctx;
679 
680 		ctx = cm_get_context(security_state);
681 		assert(ctx != NULL);
682 
683 		el2_sysregs_context_save(get_el2_sysregs_ctx(ctx));
684 	}
685 }
686 
687 /*******************************************************************************
688  * Restore EL2 sysreg context
689  ******************************************************************************/
690 void cm_el2_sysregs_context_restore(uint32_t security_state)
691 {
692 	u_register_t scr_el3 = read_scr();
693 
694 	/*
695 	 * Always restore the non-secure and realm EL2 context, only restore the
696 	 * S-EL2 context if S-EL2 is enabled.
697 	 */
698 	if ((security_state != SECURE) ||
699 	    ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) {
700 		cpu_context_t *ctx;
701 
702 		ctx = cm_get_context(security_state);
703 		assert(ctx != NULL);
704 
705 		el2_sysregs_context_restore(get_el2_sysregs_ctx(ctx));
706 	}
707 }
708 #endif /* CTX_INCLUDE_EL2_REGS */
709 
710 /*******************************************************************************
711  * The next four functions are used by runtime services to save and restore
712  * EL1 context on the 'cpu_context' structure for the specified security
713  * state.
714  ******************************************************************************/
715 void cm_el1_sysregs_context_save(uint32_t security_state)
716 {
717 	cpu_context_t *ctx;
718 
719 	ctx = cm_get_context(security_state);
720 	assert(ctx != NULL);
721 
722 	el1_sysregs_context_save(get_el1_sysregs_ctx(ctx));
723 
724 #if IMAGE_BL31
725 	if (security_state == SECURE)
726 		PUBLISH_EVENT(cm_exited_secure_world);
727 	else
728 		PUBLISH_EVENT(cm_exited_normal_world);
729 #endif
730 }
731 
732 void cm_el1_sysregs_context_restore(uint32_t security_state)
733 {
734 	cpu_context_t *ctx;
735 
736 	ctx = cm_get_context(security_state);
737 	assert(ctx != NULL);
738 
739 	el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx));
740 
741 #if IMAGE_BL31
742 	if (security_state == SECURE)
743 		PUBLISH_EVENT(cm_entering_secure_world);
744 	else
745 		PUBLISH_EVENT(cm_entering_normal_world);
746 #endif
747 }
748 
749 /*******************************************************************************
750  * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
751  * given security state with the given entrypoint
752  ******************************************************************************/
753 void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
754 {
755 	cpu_context_t *ctx;
756 	el3_state_t *state;
757 
758 	ctx = cm_get_context(security_state);
759 	assert(ctx != NULL);
760 
761 	/* Populate EL3 state so that ERET jumps to the correct entry */
762 	state = get_el3state_ctx(ctx);
763 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
764 }
765 
766 /*******************************************************************************
767  * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
768  * pertaining to the given security state
769  ******************************************************************************/
770 void cm_set_elr_spsr_el3(uint32_t security_state,
771 			uintptr_t entrypoint, uint32_t spsr)
772 {
773 	cpu_context_t *ctx;
774 	el3_state_t *state;
775 
776 	ctx = cm_get_context(security_state);
777 	assert(ctx != NULL);
778 
779 	/* Populate EL3 state so that ERET jumps to the correct entry */
780 	state = get_el3state_ctx(ctx);
781 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
782 	write_ctx_reg(state, CTX_SPSR_EL3, spsr);
783 }
784 
785 /*******************************************************************************
786  * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
787  * pertaining to the given security state using the value and bit position
788  * specified in the parameters. It preserves all other bits.
789  ******************************************************************************/
790 void cm_write_scr_el3_bit(uint32_t security_state,
791 			  uint32_t bit_pos,
792 			  uint32_t value)
793 {
794 	cpu_context_t *ctx;
795 	el3_state_t *state;
796 	u_register_t scr_el3;
797 
798 	ctx = cm_get_context(security_state);
799 	assert(ctx != NULL);
800 
801 	/* Ensure that the bit position is a valid one */
802 	assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U);
803 
804 	/* Ensure that the 'value' is only a bit wide */
805 	assert(value <= 1U);
806 
807 	/*
808 	 * Get the SCR_EL3 value from the cpu context, clear the desired bit
809 	 * and set it to its new value.
810 	 */
811 	state = get_el3state_ctx(ctx);
812 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
813 	scr_el3 &= ~(1UL << bit_pos);
814 	scr_el3 |= (u_register_t)value << bit_pos;
815 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
816 }
817 
818 /*******************************************************************************
819  * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
820  * given security state.
821  ******************************************************************************/
822 u_register_t cm_get_scr_el3(uint32_t security_state)
823 {
824 	cpu_context_t *ctx;
825 	el3_state_t *state;
826 
827 	ctx = cm_get_context(security_state);
828 	assert(ctx != NULL);
829 
830 	/* Populate EL3 state so that ERET jumps to the correct entry */
831 	state = get_el3state_ctx(ctx);
832 	return read_ctx_reg(state, CTX_SCR_EL3);
833 }
834 
835 /*******************************************************************************
836  * This function is used to program the context that's used for exception
837  * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
838  * the required security state
839  ******************************************************************************/
840 void cm_set_next_eret_context(uint32_t security_state)
841 {
842 	cpu_context_t *ctx;
843 
844 	ctx = cm_get_context(security_state);
845 	assert(ctx != NULL);
846 
847 	cm_set_next_context(ctx);
848 }
849