xref: /rk3399_ARM-atf/lib/el3_runtime/aarch32/context_mgmt.c (revision 82cb2c1ad9897473743f08437d0a3995bed561b9)
1e33b78a6SSoby Mathew /*
232f0d3c6SDouglas Raillard  * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3e33b78a6SSoby Mathew  *
4*82cb2c1aSdp-arm  * SPDX-License-Identifier: BSD-3-Clause
5e33b78a6SSoby Mathew  */
6e33b78a6SSoby Mathew 
7e33b78a6SSoby Mathew #include <arch.h>
8e33b78a6SSoby Mathew #include <arch_helpers.h>
9e33b78a6SSoby Mathew #include <assert.h>
10e33b78a6SSoby Mathew #include <bl_common.h>
11e33b78a6SSoby Mathew #include <context.h>
12e33b78a6SSoby Mathew #include <context_mgmt.h>
13e33b78a6SSoby Mathew #include <platform.h>
14e33b78a6SSoby Mathew #include <platform_def.h>
15e33b78a6SSoby Mathew #include <smcc_helpers.h>
16e33b78a6SSoby Mathew #include <string.h>
1732f0d3c6SDouglas Raillard #include <utils.h>
18e33b78a6SSoby Mathew 
19e33b78a6SSoby Mathew /*******************************************************************************
20e33b78a6SSoby Mathew  * Context management library initialisation routine. This library is used by
21e33b78a6SSoby Mathew  * runtime services to share pointers to 'cpu_context' structures for the secure
22e33b78a6SSoby Mathew  * and non-secure states. Management of the structures and their associated
23e33b78a6SSoby Mathew  * memory is not done by the context management library e.g. the PSCI service
24e33b78a6SSoby Mathew  * manages the cpu context used for entry from and exit to the non-secure state.
25e33b78a6SSoby Mathew  * The Secure payload manages the context(s) corresponding to the secure state.
26e33b78a6SSoby Mathew  * It also uses this library to get access to the non-secure
27e33b78a6SSoby Mathew  * state cpu context pointers.
28e33b78a6SSoby Mathew  ******************************************************************************/
29e33b78a6SSoby Mathew void cm_init(void)
30e33b78a6SSoby Mathew {
31e33b78a6SSoby Mathew 	/*
32e33b78a6SSoby Mathew 	 * The context management library has only global data to initialize, but
33e33b78a6SSoby Mathew 	 * that will be done when the BSS is zeroed out
34e33b78a6SSoby Mathew 	 */
35e33b78a6SSoby Mathew }
36e33b78a6SSoby Mathew 
37e33b78a6SSoby Mathew /*******************************************************************************
38e33b78a6SSoby Mathew  * The following function initializes the cpu_context 'ctx' for
39e33b78a6SSoby Mathew  * first use, and sets the initial entrypoint state as specified by the
40e33b78a6SSoby Mathew  * entry_point_info structure.
41e33b78a6SSoby Mathew  *
42e33b78a6SSoby Mathew  * The security state to initialize is determined by the SECURE attribute
43e33b78a6SSoby Mathew  * of the entry_point_info. The function returns a pointer to the initialized
44e33b78a6SSoby Mathew  * context and sets this as the next context to return to.
45e33b78a6SSoby Mathew  *
46e33b78a6SSoby Mathew  * The EE and ST attributes are used to configure the endianness and secure
47e33b78a6SSoby Mathew  * timer availability for the new execution context.
48e33b78a6SSoby Mathew  *
49e33b78a6SSoby Mathew  * To prepare the register state for entry call cm_prepare_el3_exit() and
50e33b78a6SSoby Mathew  * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
51e33b78a6SSoby Mathew  * cm_e1_sysreg_context_restore().
52e33b78a6SSoby Mathew  ******************************************************************************/
53e33b78a6SSoby Mathew static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
54e33b78a6SSoby Mathew {
55e33b78a6SSoby Mathew 	unsigned int security_state;
56e33b78a6SSoby Mathew 	uint32_t scr, sctlr;
57e33b78a6SSoby Mathew 	regs_t *reg_ctx;
58e33b78a6SSoby Mathew 
59e33b78a6SSoby Mathew 	assert(ctx);
60e33b78a6SSoby Mathew 
61e33b78a6SSoby Mathew 	security_state = GET_SECURITY_STATE(ep->h.attr);
62e33b78a6SSoby Mathew 
63e33b78a6SSoby Mathew 	/* Clear any residual register values from the context */
6432f0d3c6SDouglas Raillard 	zeromem(ctx, sizeof(*ctx));
65e33b78a6SSoby Mathew 
669e3b4cbbSSoby Mathew 	reg_ctx = get_regs_ctx(ctx);
679e3b4cbbSSoby Mathew 
68e33b78a6SSoby Mathew 	/*
69e33b78a6SSoby Mathew 	 * Base the context SCR on the current value, adjust for entry point
70e33b78a6SSoby Mathew 	 * specific requirements
71e33b78a6SSoby Mathew 	 */
72e33b78a6SSoby Mathew 	scr = read_scr();
73e33b78a6SSoby Mathew 	scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
74e33b78a6SSoby Mathew 
75e33b78a6SSoby Mathew 	if (security_state != SECURE)
76e33b78a6SSoby Mathew 		scr |= SCR_NS_BIT;
77e33b78a6SSoby Mathew 
78e33b78a6SSoby Mathew 	/*
79e33b78a6SSoby Mathew 	 * Set up SCTLR for the Non Secure context.
80e33b78a6SSoby Mathew 	 * EE bit is taken from the entrypoint attributes
81e33b78a6SSoby Mathew 	 * M, C and I bits must be zero (as required by PSCI specification)
82e33b78a6SSoby Mathew 	 *
83e33b78a6SSoby Mathew 	 * The target exception level is based on the spsr mode requested.
84e33b78a6SSoby Mathew 	 * If execution is requested to hyp mode, HVC is enabled
85e33b78a6SSoby Mathew 	 * via SCR.HCE.
86e33b78a6SSoby Mathew 	 *
87e33b78a6SSoby Mathew 	 * Always compute the SCTLR_EL1 value and save in the cpu_context
88e33b78a6SSoby Mathew 	 * - the HYP registers are set up by cm_preapre_ns_entry() as they
89e33b78a6SSoby Mathew 	 * are not part of the stored cpu_context
90e33b78a6SSoby Mathew 	 *
91e33b78a6SSoby Mathew 	 * TODO: In debug builds the spsr should be validated and checked
92e33b78a6SSoby Mathew 	 * against the CPU support, security state, endianness and pc
93e33b78a6SSoby Mathew 	 */
94e33b78a6SSoby Mathew 	if (security_state != SECURE) {
95e33b78a6SSoby Mathew 		sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
96b7b0787dSSoby Mathew 		/*
97b7b0787dSSoby Mathew 		 * In addition to SCTLR_RES1, set the CP15_BEN, nTWI & nTWE
98b7b0787dSSoby Mathew 		 * bits that architecturally reset to 1.
99b7b0787dSSoby Mathew 		 */
100b7b0787dSSoby Mathew 		sctlr |= SCTLR_RES1 | SCTLR_CP15BEN_BIT |
101b7b0787dSSoby Mathew 				SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
102e33b78a6SSoby Mathew 		write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
103e33b78a6SSoby Mathew 	}
104e33b78a6SSoby Mathew 
105e33b78a6SSoby Mathew 	if (GET_M32(ep->spsr) == MODE32_hyp)
106e33b78a6SSoby Mathew 		scr |= SCR_HCE_BIT;
107e33b78a6SSoby Mathew 
108e33b78a6SSoby Mathew 	write_ctx_reg(reg_ctx, CTX_SCR, scr);
109e33b78a6SSoby Mathew 	write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
110e33b78a6SSoby Mathew 	write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
111e33b78a6SSoby Mathew 
112e33b78a6SSoby Mathew 	/*
113e33b78a6SSoby Mathew 	 * Store the r0-r3 value from the entrypoint into the context
114e33b78a6SSoby Mathew 	 * Use memcpy as we are in control of the layout of the structures
115e33b78a6SSoby Mathew 	 */
116e33b78a6SSoby Mathew 	memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
117e33b78a6SSoby Mathew }
118e33b78a6SSoby Mathew 
119e33b78a6SSoby Mathew /*******************************************************************************
120e33b78a6SSoby Mathew  * The following function initializes the cpu_context for a CPU specified by
121e33b78a6SSoby Mathew  * its `cpu_idx` for first use, and sets the initial entrypoint state as
122e33b78a6SSoby Mathew  * specified by the entry_point_info structure.
123e33b78a6SSoby Mathew  ******************************************************************************/
124e33b78a6SSoby Mathew void cm_init_context_by_index(unsigned int cpu_idx,
125e33b78a6SSoby Mathew 			      const entry_point_info_t *ep)
126e33b78a6SSoby Mathew {
127e33b78a6SSoby Mathew 	cpu_context_t *ctx;
128e33b78a6SSoby Mathew 	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
129e33b78a6SSoby Mathew 	cm_init_context_common(ctx, ep);
130e33b78a6SSoby Mathew }
131e33b78a6SSoby Mathew 
132e33b78a6SSoby Mathew /*******************************************************************************
133e33b78a6SSoby Mathew  * The following function initializes the cpu_context for the current CPU
134e33b78a6SSoby Mathew  * for first use, and sets the initial entrypoint state as specified by the
135e33b78a6SSoby Mathew  * entry_point_info structure.
136e33b78a6SSoby Mathew  ******************************************************************************/
137e33b78a6SSoby Mathew void cm_init_my_context(const entry_point_info_t *ep)
138e33b78a6SSoby Mathew {
139e33b78a6SSoby Mathew 	cpu_context_t *ctx;
140e33b78a6SSoby Mathew 	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
141e33b78a6SSoby Mathew 	cm_init_context_common(ctx, ep);
142e33b78a6SSoby Mathew }
143e33b78a6SSoby Mathew 
144e33b78a6SSoby Mathew /*******************************************************************************
145e33b78a6SSoby Mathew  * Prepare the CPU system registers for first entry into secure or normal world
146e33b78a6SSoby Mathew  *
147e33b78a6SSoby Mathew  * If execution is requested to hyp mode, HSCTLR is initialized
148e33b78a6SSoby Mathew  * If execution is requested to non-secure PL1, and the CPU supports
149e33b78a6SSoby Mathew  * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
150e33b78a6SSoby Mathew  * registers.
151e33b78a6SSoby Mathew  ******************************************************************************/
152e33b78a6SSoby Mathew void cm_prepare_el3_exit(uint32_t security_state)
153e33b78a6SSoby Mathew {
154e33b78a6SSoby Mathew 	uint32_t sctlr, scr, hcptr;
155e33b78a6SSoby Mathew 	cpu_context_t *ctx = cm_get_context(security_state);
156e33b78a6SSoby Mathew 
157e33b78a6SSoby Mathew 	assert(ctx);
158e33b78a6SSoby Mathew 
159e33b78a6SSoby Mathew 	if (security_state == NON_SECURE) {
160e33b78a6SSoby Mathew 		scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
161e33b78a6SSoby Mathew 		if (scr & SCR_HCE_BIT) {
162e33b78a6SSoby Mathew 			/* Use SCTLR value to initialize HSCTLR */
163e33b78a6SSoby Mathew 			sctlr = read_ctx_reg(get_regs_ctx(ctx),
164e33b78a6SSoby Mathew 						 CTX_NS_SCTLR);
165e33b78a6SSoby Mathew 			sctlr |= HSCTLR_RES1;
166e33b78a6SSoby Mathew 			/* Temporarily set the NS bit to access HSCTLR */
167e33b78a6SSoby Mathew 			write_scr(read_scr() | SCR_NS_BIT);
168e33b78a6SSoby Mathew 			/*
169e33b78a6SSoby Mathew 			 * Make sure the write to SCR is complete so that
170e33b78a6SSoby Mathew 			 * we can access HSCTLR
171e33b78a6SSoby Mathew 			 */
172e33b78a6SSoby Mathew 			isb();
173e33b78a6SSoby Mathew 			write_hsctlr(sctlr);
174e33b78a6SSoby Mathew 			isb();
175e33b78a6SSoby Mathew 
176e33b78a6SSoby Mathew 			write_scr(read_scr() & ~SCR_NS_BIT);
177e33b78a6SSoby Mathew 			isb();
178e33b78a6SSoby Mathew 		} else if (read_id_pfr1() &
179e33b78a6SSoby Mathew 			(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
180495f3d3cSDavid Cunado 			/*
181495f3d3cSDavid Cunado 			 * Set the NS bit to access NS copies of certain banked
182495f3d3cSDavid Cunado 			 * registers
183495f3d3cSDavid Cunado 			 */
184e33b78a6SSoby Mathew 			write_scr(read_scr() | SCR_NS_BIT);
185e33b78a6SSoby Mathew 			isb();
186e33b78a6SSoby Mathew 
187e33b78a6SSoby Mathew 			/* PL2 present but unused, need to disable safely */
188e33b78a6SSoby Mathew 			write_hcr(0);
189e33b78a6SSoby Mathew 
190e33b78a6SSoby Mathew 			/* HSCTLR : can be ignored when bypassing */
191e33b78a6SSoby Mathew 
192e33b78a6SSoby Mathew 			/* HCPTR : disable all traps TCPAC, TTA, TCP */
193e33b78a6SSoby Mathew 			hcptr = read_hcptr();
194e33b78a6SSoby Mathew 			hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
195e33b78a6SSoby Mathew 			write_hcptr(hcptr);
196e33b78a6SSoby Mathew 
197e33b78a6SSoby Mathew 			/* Enable EL1 access to timer */
198e33b78a6SSoby Mathew 			write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);
199e33b78a6SSoby Mathew 
200e33b78a6SSoby Mathew 			/* Reset CNTVOFF_EL2 */
201e33b78a6SSoby Mathew 			write64_cntvoff(0);
202e33b78a6SSoby Mathew 
203e33b78a6SSoby Mathew 			/* Set VPIDR, VMPIDR to match MIDR, MPIDR */
204e33b78a6SSoby Mathew 			write_vpidr(read_midr());
205e33b78a6SSoby Mathew 			write_vmpidr(read_mpidr());
206e33b78a6SSoby Mathew 
207e33b78a6SSoby Mathew 			/*
208e33b78a6SSoby Mathew 			 * Reset VTTBR.
209e33b78a6SSoby Mathew 			 * Needed because cache maintenance operations depend on
210e33b78a6SSoby Mathew 			 * the VMID even when non-secure EL1&0 stage 2 address
211e33b78a6SSoby Mathew 			 * translation are disabled.
212e33b78a6SSoby Mathew 			 */
213e33b78a6SSoby Mathew 			write64_vttbr(0);
214495f3d3cSDavid Cunado 
215495f3d3cSDavid Cunado 			/*
216495f3d3cSDavid Cunado 			 * Avoid unexpected debug traps in case where HDCR
217495f3d3cSDavid Cunado 			 * is not completely reset by the hardware - set
218495f3d3cSDavid Cunado 			 * HDCR.HPMN to PMCR.N and zero the remaining bits.
219495f3d3cSDavid Cunado 			 * The HDCR.HPMN and PMCR.N fields are the same size
220495f3d3cSDavid Cunado 			 * (5 bits) and HPMN is at offset zero within HDCR.
221495f3d3cSDavid Cunado 			 */
222495f3d3cSDavid Cunado 			write_hdcr((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT);
223939f66d6SDavid Cunado 
224939f66d6SDavid Cunado 			/*
225939f66d6SDavid Cunado 			 * Reset CNTHP_CTL to disable the EL2 physical timer and
226939f66d6SDavid Cunado 			 * therefore prevent timer interrupts.
227939f66d6SDavid Cunado 			 */
228939f66d6SDavid Cunado 			write_cnthp_ctl(0);
229e33b78a6SSoby Mathew 			isb();
230e33b78a6SSoby Mathew 
231e33b78a6SSoby Mathew 			write_scr(read_scr() & ~SCR_NS_BIT);
232e33b78a6SSoby Mathew 			isb();
233e33b78a6SSoby Mathew 		}
234e33b78a6SSoby Mathew 	}
235e33b78a6SSoby Mathew }
236