xref: /rk3399_ARM-atf/lib/el3_runtime/aarch32/context_mgmt.c (revision 6bb49c876c7593ed5f61c20ef3d989dcff8e8d8c)
1 /*
2  * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <stdbool.h>
9 #include <string.h>
10 
11 #include <platform_def.h>
12 
13 #include <arch.h>
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <common/bl_common.h>
17 #include <context.h>
18 #include <lib/el3_runtime/context_mgmt.h>
19 #include <lib/extensions/amu.h>
20 #include <lib/extensions/sys_reg_trace.h>
21 #include <lib/extensions/trf.h>
22 #include <lib/utils.h>
23 
24 /*******************************************************************************
25  * Context management library initialisation routine. This library is used by
26  * runtime services to share pointers to 'cpu_context' structures for the secure
27  * and non-secure states. Management of the structures and their associated
28  * memory is not done by the context management library e.g. the PSCI service
29  * manages the cpu context used for entry from and exit to the non-secure state.
30  * The Secure payload manages the context(s) corresponding to the secure state.
31  * It also uses this library to get access to the non-secure
32  * state cpu context pointers.
33  ******************************************************************************/
34 void cm_init(void)
35 {
36 	/*
37 	 * The context management library has only global data to initialize, but
38 	 * that will be done when the BSS is zeroed out
39 	 */
40 }
41 
42 /*******************************************************************************
43  * The following function initializes the cpu_context 'ctx' for
44  * first use, and sets the initial entrypoint state as specified by the
45  * entry_point_info structure.
46  *
47  * The security state to initialize is determined by the SECURE attribute
48  * of the entry_point_info.
49  *
50  * The EE and ST attributes are used to configure the endianness and secure
51  * timer availability for the new execution context.
52  *
53  * To prepare the register state for entry call cm_prepare_el3_exit() and
54  * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
55  * cm_el1_sysregs_context_restore().
56  ******************************************************************************/
57 void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
58 {
59 	unsigned int security_state;
60 	uint32_t scr, sctlr;
61 	regs_t *reg_ctx;
62 
63 	assert(ctx != NULL);
64 
65 	security_state = GET_SECURITY_STATE(ep->h.attr);
66 
67 	/* Clear any residual register values from the context */
68 	zeromem(ctx, sizeof(*ctx));
69 
70 	reg_ctx = get_regs_ctx(ctx);
71 
72 	/*
73 	 * Base the context SCR on the current value, adjust for entry point
74 	 * specific requirements
75 	 */
76 	scr = read_scr();
77 	scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
78 
79 	if (security_state != SECURE)
80 		scr |= SCR_NS_BIT;
81 
82 	if (security_state != SECURE) {
83 		/*
84 		 * Set up SCTLR for the Non-secure context.
85 		 *
86 		 * SCTLR.EE: Endianness is taken from the entrypoint attributes.
87 		 *
88 		 * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
89 		 *  required by PSCI specification)
90 		 *
91 		 * Set remaining SCTLR fields to their architecturally defined
92 		 * values. Some fields reset to an IMPLEMENTATION DEFINED value:
93 		 *
94 		 * SCTLR.TE: Set to zero so that exceptions to an Exception
95 		 *  Level executing at PL1 are taken to A32 state.
96 		 *
97 		 * SCTLR.V: Set to zero to select the normal exception vectors
98 		 *  with base address held in VBAR.
99 		 */
100 		assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) ==
101 			(EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT));
102 
103 		sctlr = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U;
104 		sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT));
105 		write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
106 	}
107 
108 	/*
109 	 * The target exception level is based on the spsr mode requested. If
110 	 * execution is requested to hyp mode, HVC is enabled via SCR.HCE.
111 	 */
112 	if (GET_M32(ep->spsr) == MODE32_hyp)
113 		scr |= SCR_HCE_BIT;
114 
115 	/*
116 	 * Store the initialised values for SCTLR and SCR in the cpu_context.
117 	 * The Hyp mode registers are not part of the saved context and are
118 	 * set-up in cm_prepare_el3_exit().
119 	 */
120 	write_ctx_reg(reg_ctx, CTX_SCR, scr);
121 	write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
122 	write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
123 
124 	/*
125 	 * Store the r0-r3 value from the entrypoint into the context
126 	 * Use memcpy as we are in control of the layout of the structures
127 	 */
128 	memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
129 }
130 
131 /*******************************************************************************
132  * Enable architecture extensions on first entry to Non-secure world.
133  * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
134  * it is zero.
135  ******************************************************************************/
136 static void enable_extensions_nonsecure(bool el2_unused)
137 {
138 #if IMAGE_BL32
139 #if ENABLE_AMU
140 	amu_enable(el2_unused);
141 #endif
142 
143 #if ENABLE_SYS_REG_TRACE_FOR_NS
144 	sys_reg_trace_enable();
145 #endif /* ENABLE_SYS_REG_TRACE_FOR_NS */
146 
147 	if (is_feat_trf_supported()) {
148 		trf_enable();
149 	}
150 #endif
151 }
152 
153 /*******************************************************************************
154  * The following function initializes the cpu_context for a CPU specified by
155  * its `cpu_idx` for first use, and sets the initial entrypoint state as
156  * specified by the entry_point_info structure.
157  ******************************************************************************/
158 void cm_init_context_by_index(unsigned int cpu_idx,
159 			      const entry_point_info_t *ep)
160 {
161 	cpu_context_t *ctx;
162 	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
163 	cm_setup_context(ctx, ep);
164 }
165 
166 /*******************************************************************************
167  * The following function initializes the cpu_context for the current CPU
168  * for first use, and sets the initial entrypoint state as specified by the
169  * entry_point_info structure.
170  ******************************************************************************/
171 void cm_init_my_context(const entry_point_info_t *ep)
172 {
173 	cpu_context_t *ctx;
174 	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
175 	cm_setup_context(ctx, ep);
176 }
177 
178 /*******************************************************************************
179  * Prepare the CPU system registers for first entry into secure or normal world
180  *
181  * If execution is requested to hyp mode, HSCTLR is initialized
182  * If execution is requested to non-secure PL1, and the CPU supports
183  * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
184  * registers.
185  ******************************************************************************/
186 void cm_prepare_el3_exit(uint32_t security_state)
187 {
188 	uint32_t hsctlr, scr;
189 	cpu_context_t *ctx = cm_get_context(security_state);
190 	bool el2_unused = false;
191 
192 	assert(ctx != NULL);
193 
194 	if (security_state == NON_SECURE) {
195 		scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
196 		if ((scr & SCR_HCE_BIT) != 0U) {
197 			/* Use SCTLR value to initialize HSCTLR */
198 			hsctlr = read_ctx_reg(get_regs_ctx(ctx),
199 						 CTX_NS_SCTLR);
200 			hsctlr |= HSCTLR_RES1;
201 			/* Temporarily set the NS bit to access HSCTLR */
202 			write_scr(read_scr() | SCR_NS_BIT);
203 			/*
204 			 * Make sure the write to SCR is complete so that
205 			 * we can access HSCTLR
206 			 */
207 			isb();
208 			write_hsctlr(hsctlr);
209 			isb();
210 
211 			write_scr(read_scr() & ~SCR_NS_BIT);
212 			isb();
213 		} else if ((read_id_pfr1() &
214 			(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) != 0U) {
215 			el2_unused = true;
216 
217 			/*
218 			 * Set the NS bit to access NS copies of certain banked
219 			 * registers
220 			 */
221 			write_scr(read_scr() | SCR_NS_BIT);
222 			isb();
223 
224 			/*
225 			 * Hyp / PL2 present but unused, need to disable safely.
226 			 * HSCTLR can be ignored in this case.
227 			 *
228 			 * Set HCR to its architectural reset value so that
229 			 * Non-secure operations do not trap to Hyp mode.
230 			 */
231 			write_hcr(HCR_RESET_VAL);
232 
233 			/*
234 			 * Set HCPTR to its architectural reset value so that
235 			 * Non-secure access from EL1 or EL0 to trace and to
236 			 * Advanced SIMD and floating point functionality does
237 			 * not trap to Hyp mode.
238 			 */
239 			write_hcptr(HCPTR_RESET_VAL);
240 
241 			/*
242 			 * Initialise CNTHCTL. All fields are architecturally
243 			 * UNKNOWN on reset and are set to zero except for
244 			 * field(s) listed below.
245 			 *
246 			 * CNTHCTL.PL1PCEN: Disable traps to Hyp mode of
247 			 *  Non-secure EL0 and EL1 accessed to the physical
248 			 *  timer registers.
249 			 *
250 			 * CNTHCTL.PL1PCTEN: Disable traps to Hyp mode of
251 			 *  Non-secure EL0 and EL1 accessed to the physical
252 			 *  counter registers.
253 			 */
254 			write_cnthctl(CNTHCTL_RESET_VAL |
255 					PL1PCEN_BIT | PL1PCTEN_BIT);
256 
257 			/*
258 			 * Initialise CNTVOFF to zero as it resets to an
259 			 * IMPLEMENTATION DEFINED value.
260 			 */
261 			write64_cntvoff(0);
262 
263 			/*
264 			 * Set VPIDR and VMPIDR to match MIDR_EL1 and MPIDR
265 			 * respectively.
266 			 */
267 			write_vpidr(read_midr());
268 			write_vmpidr(read_mpidr());
269 
270 			/*
271 			 * Initialise VTTBR, setting all fields rather than
272 			 * relying on the hw. Some fields are architecturally
273 			 * UNKNOWN at reset.
274 			 *
275 			 * VTTBR.VMID: Set to zero which is the architecturally
276 			 *  defined reset value. Even though EL1&0 stage 2
277 			 *  address translation is disabled, cache maintenance
278 			 *  operations depend on the VMID.
279 			 *
280 			 * VTTBR.BADDR: Set to zero as EL1&0 stage 2 address
281 			 *  translation is disabled.
282 			 */
283 			write64_vttbr(VTTBR_RESET_VAL &
284 				~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT)
285 				| (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
286 
287 			/*
288 			 * Initialise HDCR, setting all the fields rather than
289 			 * relying on hw.
290 			 *
291 			 * HDCR.HPMN: Set to value of PMCR.N which is the
292 			 *  architecturally-defined reset value.
293 			 *
294 			 * HDCR.HLP: Set to one so that event counter
295 			 *  overflow, that is recorded in PMOVSCLR[0-30],
296 			 *  occurs on the increment that changes
297 			 *  PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU is
298 			 *  implemented. This bit is RES0 in versions of the
299 			 *  architecture earlier than ARMv8.5, setting it to 1
300 			 *  doesn't have any effect on them.
301 			 *  This bit is Reserved, UNK/SBZP in ARMv7.
302 			 *
303 			 * HDCR.HPME: Set to zero to disable EL2 Event
304 			 *  counters.
305 			 */
306 #if (ARM_ARCH_MAJOR > 7)
307 			write_hdcr((HDCR_RESET_VAL | HDCR_HLP_BIT |
308 				   ((read_pmcr() & PMCR_N_BITS) >>
309 				    PMCR_N_SHIFT)) & ~HDCR_HPME_BIT);
310 #else
311 			write_hdcr((HDCR_RESET_VAL |
312 				   ((read_pmcr() & PMCR_N_BITS) >>
313 				    PMCR_N_SHIFT)) & ~HDCR_HPME_BIT);
314 #endif
315 			/*
316 			 * Set HSTR to its architectural reset value so that
317 			 * access to system registers in the cproc=1111
318 			 * encoding space do not trap to Hyp mode.
319 			 */
320 			write_hstr(HSTR_RESET_VAL);
321 			/*
322 			 * Set CNTHP_CTL to its architectural reset value to
323 			 * disable the EL2 physical timer and prevent timer
324 			 * interrupts. Some fields are architecturally UNKNOWN
325 			 * on reset and are set to zero.
326 			 */
327 			write_cnthp_ctl(CNTHP_CTL_RESET_VAL);
328 			isb();
329 
330 			write_scr(read_scr() & ~SCR_NS_BIT);
331 			isb();
332 		}
333 		enable_extensions_nonsecure(el2_unused);
334 	}
335 }
336 
337 /*******************************************************************************
338  * This function is used to exit to Non-secure world. It simply calls the
339  * cm_prepare_el3_exit function for AArch32.
340  ******************************************************************************/
341 void cm_prepare_el3_exit_ns(void)
342 {
343 	cm_prepare_el3_exit(NON_SECURE);
344 }
345