xref: /rk3399_ARM-atf/lib/el3_runtime/aarch32/context_mgmt.c (revision e33b78a658bd54a815c780e17c2d0073db6f59db)
1*e33b78a6SSoby Mathew /*
2*e33b78a6SSoby Mathew  * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3*e33b78a6SSoby Mathew  *
4*e33b78a6SSoby Mathew  * Redistribution and use in source and binary forms, with or without
5*e33b78a6SSoby Mathew  * modification, are permitted provided that the following conditions are met:
6*e33b78a6SSoby Mathew  *
7*e33b78a6SSoby Mathew  * Redistributions of source code must retain the above copyright notice, this
8*e33b78a6SSoby Mathew  * list of conditions and the following disclaimer.
9*e33b78a6SSoby Mathew  *
10*e33b78a6SSoby Mathew  * Redistributions in binary form must reproduce the above copyright notice,
11*e33b78a6SSoby Mathew  * this list of conditions and the following disclaimer in the documentation
12*e33b78a6SSoby Mathew  * and/or other materials provided with the distribution.
13*e33b78a6SSoby Mathew  *
14*e33b78a6SSoby Mathew  * Neither the name of ARM nor the names of its contributors may be used
15*e33b78a6SSoby Mathew  * to endorse or promote products derived from this software without specific
16*e33b78a6SSoby Mathew  * prior written permission.
17*e33b78a6SSoby Mathew  *
18*e33b78a6SSoby Mathew  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19*e33b78a6SSoby Mathew  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*e33b78a6SSoby Mathew  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*e33b78a6SSoby Mathew  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22*e33b78a6SSoby Mathew  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23*e33b78a6SSoby Mathew  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24*e33b78a6SSoby Mathew  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25*e33b78a6SSoby Mathew  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26*e33b78a6SSoby Mathew  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27*e33b78a6SSoby Mathew  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28*e33b78a6SSoby Mathew  * POSSIBILITY OF SUCH DAMAGE.
29*e33b78a6SSoby Mathew  */
30*e33b78a6SSoby Mathew 
31*e33b78a6SSoby Mathew #include <arch.h>
32*e33b78a6SSoby Mathew #include <arch_helpers.h>
33*e33b78a6SSoby Mathew #include <assert.h>
34*e33b78a6SSoby Mathew #include <bl_common.h>
35*e33b78a6SSoby Mathew #include <context.h>
36*e33b78a6SSoby Mathew #include <context_mgmt.h>
37*e33b78a6SSoby Mathew #include <platform.h>
38*e33b78a6SSoby Mathew #include <platform_def.h>
39*e33b78a6SSoby Mathew #include <smcc_helpers.h>
40*e33b78a6SSoby Mathew #include <string.h>
41*e33b78a6SSoby Mathew 
42*e33b78a6SSoby Mathew /*******************************************************************************
43*e33b78a6SSoby Mathew  * Context management library initialisation routine. This library is used by
44*e33b78a6SSoby Mathew  * runtime services to share pointers to 'cpu_context' structures for the secure
45*e33b78a6SSoby Mathew  * and non-secure states. Management of the structures and their associated
46*e33b78a6SSoby Mathew  * memory is not done by the context management library e.g. the PSCI service
47*e33b78a6SSoby Mathew  * manages the cpu context used for entry from and exit to the non-secure state.
48*e33b78a6SSoby Mathew  * The Secure payload manages the context(s) corresponding to the secure state.
49*e33b78a6SSoby Mathew  * It also uses this library to get access to the non-secure
50*e33b78a6SSoby Mathew  * state cpu context pointers.
51*e33b78a6SSoby Mathew  ******************************************************************************/
52*e33b78a6SSoby Mathew void cm_init(void)
53*e33b78a6SSoby Mathew {
54*e33b78a6SSoby Mathew 	/*
55*e33b78a6SSoby Mathew 	 * The context management library has only global data to initialize, but
56*e33b78a6SSoby Mathew 	 * that will be done when the BSS is zeroed out
57*e33b78a6SSoby Mathew 	 */
58*e33b78a6SSoby Mathew }
59*e33b78a6SSoby Mathew 
60*e33b78a6SSoby Mathew /*******************************************************************************
61*e33b78a6SSoby Mathew  * The following function initializes the cpu_context 'ctx' for
62*e33b78a6SSoby Mathew  * first use, and sets the initial entrypoint state as specified by the
63*e33b78a6SSoby Mathew  * entry_point_info structure.
64*e33b78a6SSoby Mathew  *
65*e33b78a6SSoby Mathew  * The security state to initialize is determined by the SECURE attribute
66*e33b78a6SSoby Mathew  * of the entry_point_info. The function returns a pointer to the initialized
67*e33b78a6SSoby Mathew  * context and sets this as the next context to return to.
68*e33b78a6SSoby Mathew  *
69*e33b78a6SSoby Mathew  * The EE and ST attributes are used to configure the endianness and secure
70*e33b78a6SSoby Mathew  * timer availability for the new execution context.
71*e33b78a6SSoby Mathew  *
72*e33b78a6SSoby Mathew  * To prepare the register state for entry call cm_prepare_el3_exit() and
73*e33b78a6SSoby Mathew  * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
74*e33b78a6SSoby Mathew  * cm_e1_sysreg_context_restore().
75*e33b78a6SSoby Mathew  ******************************************************************************/
76*e33b78a6SSoby Mathew static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
77*e33b78a6SSoby Mathew {
78*e33b78a6SSoby Mathew 	unsigned int security_state;
79*e33b78a6SSoby Mathew 	uint32_t scr, sctlr;
80*e33b78a6SSoby Mathew 	regs_t *reg_ctx;
81*e33b78a6SSoby Mathew 
82*e33b78a6SSoby Mathew 	assert(ctx);
83*e33b78a6SSoby Mathew 
84*e33b78a6SSoby Mathew 	security_state = GET_SECURITY_STATE(ep->h.attr);
85*e33b78a6SSoby Mathew 
86*e33b78a6SSoby Mathew 	/* Clear any residual register values from the context */
87*e33b78a6SSoby Mathew 	memset(ctx, 0, sizeof(*ctx));
88*e33b78a6SSoby Mathew 
89*e33b78a6SSoby Mathew 	/*
90*e33b78a6SSoby Mathew 	 * Base the context SCR on the current value, adjust for entry point
91*e33b78a6SSoby Mathew 	 * specific requirements
92*e33b78a6SSoby Mathew 	 */
93*e33b78a6SSoby Mathew 	scr = read_scr();
94*e33b78a6SSoby Mathew 	scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
95*e33b78a6SSoby Mathew 
96*e33b78a6SSoby Mathew 	if (security_state != SECURE)
97*e33b78a6SSoby Mathew 		scr |= SCR_NS_BIT;
98*e33b78a6SSoby Mathew 
99*e33b78a6SSoby Mathew 	/*
100*e33b78a6SSoby Mathew 	 * Set up SCTLR for the Non Secure context.
101*e33b78a6SSoby Mathew 	 * EE bit is taken from the entrypoint attributes
102*e33b78a6SSoby Mathew 	 * M, C and I bits must be zero (as required by PSCI specification)
103*e33b78a6SSoby Mathew 	 *
104*e33b78a6SSoby Mathew 	 * The target exception level is based on the spsr mode requested.
105*e33b78a6SSoby Mathew 	 * If execution is requested to hyp mode, HVC is enabled
106*e33b78a6SSoby Mathew 	 * via SCR.HCE.
107*e33b78a6SSoby Mathew 	 *
108*e33b78a6SSoby Mathew 	 * Always compute the SCTLR_EL1 value and save in the cpu_context
109*e33b78a6SSoby Mathew 	 * - the HYP registers are set up by cm_preapre_ns_entry() as they
110*e33b78a6SSoby Mathew 	 * are not part of the stored cpu_context
111*e33b78a6SSoby Mathew 	 *
112*e33b78a6SSoby Mathew 	 * TODO: In debug builds the spsr should be validated and checked
113*e33b78a6SSoby Mathew 	 * against the CPU support, security state, endianness and pc
114*e33b78a6SSoby Mathew 	 */
115*e33b78a6SSoby Mathew 	if (security_state != SECURE) {
116*e33b78a6SSoby Mathew 		sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
117*e33b78a6SSoby Mathew 		sctlr |= SCTLR_RES1;
118*e33b78a6SSoby Mathew 		write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
119*e33b78a6SSoby Mathew 	}
120*e33b78a6SSoby Mathew 
121*e33b78a6SSoby Mathew 	if (GET_M32(ep->spsr) == MODE32_hyp)
122*e33b78a6SSoby Mathew 		scr |= SCR_HCE_BIT;
123*e33b78a6SSoby Mathew 
124*e33b78a6SSoby Mathew 	reg_ctx = get_regs_ctx(ctx);
125*e33b78a6SSoby Mathew 
126*e33b78a6SSoby Mathew 	write_ctx_reg(reg_ctx, CTX_SCR, scr);
127*e33b78a6SSoby Mathew 	write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
128*e33b78a6SSoby Mathew 	write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
129*e33b78a6SSoby Mathew 
130*e33b78a6SSoby Mathew 	/*
131*e33b78a6SSoby Mathew 	 * Store the r0-r3 value from the entrypoint into the context
132*e33b78a6SSoby Mathew 	 * Use memcpy as we are in control of the layout of the structures
133*e33b78a6SSoby Mathew 	 */
134*e33b78a6SSoby Mathew 	memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
135*e33b78a6SSoby Mathew }
136*e33b78a6SSoby Mathew 
137*e33b78a6SSoby Mathew /*******************************************************************************
138*e33b78a6SSoby Mathew  * The following function initializes the cpu_context for a CPU specified by
139*e33b78a6SSoby Mathew  * its `cpu_idx` for first use, and sets the initial entrypoint state as
140*e33b78a6SSoby Mathew  * specified by the entry_point_info structure.
141*e33b78a6SSoby Mathew  ******************************************************************************/
142*e33b78a6SSoby Mathew void cm_init_context_by_index(unsigned int cpu_idx,
143*e33b78a6SSoby Mathew 			      const entry_point_info_t *ep)
144*e33b78a6SSoby Mathew {
145*e33b78a6SSoby Mathew 	cpu_context_t *ctx;
146*e33b78a6SSoby Mathew 	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
147*e33b78a6SSoby Mathew 	cm_init_context_common(ctx, ep);
148*e33b78a6SSoby Mathew }
149*e33b78a6SSoby Mathew 
150*e33b78a6SSoby Mathew /*******************************************************************************
151*e33b78a6SSoby Mathew  * The following function initializes the cpu_context for the current CPU
152*e33b78a6SSoby Mathew  * for first use, and sets the initial entrypoint state as specified by the
153*e33b78a6SSoby Mathew  * entry_point_info structure.
154*e33b78a6SSoby Mathew  ******************************************************************************/
155*e33b78a6SSoby Mathew void cm_init_my_context(const entry_point_info_t *ep)
156*e33b78a6SSoby Mathew {
157*e33b78a6SSoby Mathew 	cpu_context_t *ctx;
158*e33b78a6SSoby Mathew 	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
159*e33b78a6SSoby Mathew 	cm_init_context_common(ctx, ep);
160*e33b78a6SSoby Mathew }
161*e33b78a6SSoby Mathew 
162*e33b78a6SSoby Mathew /*******************************************************************************
163*e33b78a6SSoby Mathew  * Prepare the CPU system registers for first entry into secure or normal world
164*e33b78a6SSoby Mathew  *
165*e33b78a6SSoby Mathew  * If execution is requested to hyp mode, HSCTLR is initialized
166*e33b78a6SSoby Mathew  * If execution is requested to non-secure PL1, and the CPU supports
167*e33b78a6SSoby Mathew  * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
168*e33b78a6SSoby Mathew  * registers.
169*e33b78a6SSoby Mathew  ******************************************************************************/
170*e33b78a6SSoby Mathew void cm_prepare_el3_exit(uint32_t security_state)
171*e33b78a6SSoby Mathew {
172*e33b78a6SSoby Mathew 	uint32_t sctlr, scr, hcptr;
173*e33b78a6SSoby Mathew 	cpu_context_t *ctx = cm_get_context(security_state);
174*e33b78a6SSoby Mathew 
175*e33b78a6SSoby Mathew 	assert(ctx);
176*e33b78a6SSoby Mathew 
177*e33b78a6SSoby Mathew 	if (security_state == NON_SECURE) {
178*e33b78a6SSoby Mathew 		scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
179*e33b78a6SSoby Mathew 		if (scr & SCR_HCE_BIT) {
180*e33b78a6SSoby Mathew 			/* Use SCTLR value to initialize HSCTLR */
181*e33b78a6SSoby Mathew 			sctlr = read_ctx_reg(get_regs_ctx(ctx),
182*e33b78a6SSoby Mathew 						 CTX_NS_SCTLR);
183*e33b78a6SSoby Mathew 			sctlr |= HSCTLR_RES1;
184*e33b78a6SSoby Mathew 			/* Temporarily set the NS bit to access HSCTLR */
185*e33b78a6SSoby Mathew 			write_scr(read_scr() | SCR_NS_BIT);
186*e33b78a6SSoby Mathew 			/*
187*e33b78a6SSoby Mathew 			 * Make sure the write to SCR is complete so that
188*e33b78a6SSoby Mathew 			 * we can access HSCTLR
189*e33b78a6SSoby Mathew 			 */
190*e33b78a6SSoby Mathew 			isb();
191*e33b78a6SSoby Mathew 			write_hsctlr(sctlr);
192*e33b78a6SSoby Mathew 			isb();
193*e33b78a6SSoby Mathew 
194*e33b78a6SSoby Mathew 			write_scr(read_scr() & ~SCR_NS_BIT);
195*e33b78a6SSoby Mathew 			isb();
196*e33b78a6SSoby Mathew 		} else if (read_id_pfr1() &
197*e33b78a6SSoby Mathew 			(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
198*e33b78a6SSoby Mathew 			/* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */
199*e33b78a6SSoby Mathew 			write_scr(read_scr() | SCR_NS_BIT);
200*e33b78a6SSoby Mathew 			isb();
201*e33b78a6SSoby Mathew 
202*e33b78a6SSoby Mathew 			/* PL2 present but unused, need to disable safely */
203*e33b78a6SSoby Mathew 			write_hcr(0);
204*e33b78a6SSoby Mathew 
205*e33b78a6SSoby Mathew 			/* HSCTLR : can be ignored when bypassing */
206*e33b78a6SSoby Mathew 
207*e33b78a6SSoby Mathew 			/* HCPTR : disable all traps TCPAC, TTA, TCP */
208*e33b78a6SSoby Mathew 			hcptr = read_hcptr();
209*e33b78a6SSoby Mathew 			hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
210*e33b78a6SSoby Mathew 			write_hcptr(hcptr);
211*e33b78a6SSoby Mathew 
212*e33b78a6SSoby Mathew 			/* Enable EL1 access to timer */
213*e33b78a6SSoby Mathew 			write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);
214*e33b78a6SSoby Mathew 
215*e33b78a6SSoby Mathew 			/* Reset CNTVOFF_EL2 */
216*e33b78a6SSoby Mathew 			write64_cntvoff(0);
217*e33b78a6SSoby Mathew 
218*e33b78a6SSoby Mathew 			/* Set VPIDR, VMPIDR to match MIDR, MPIDR */
219*e33b78a6SSoby Mathew 			write_vpidr(read_midr());
220*e33b78a6SSoby Mathew 			write_vmpidr(read_mpidr());
221*e33b78a6SSoby Mathew 
222*e33b78a6SSoby Mathew 			/*
223*e33b78a6SSoby Mathew 			 * Reset VTTBR.
224*e33b78a6SSoby Mathew 			 * Needed because cache maintenance operations depend on
225*e33b78a6SSoby Mathew 			 * the VMID even when non-secure EL1&0 stage 2 address
226*e33b78a6SSoby Mathew 			 * translation are disabled.
227*e33b78a6SSoby Mathew 			 */
228*e33b78a6SSoby Mathew 			write64_vttbr(0);
229*e33b78a6SSoby Mathew 			isb();
230*e33b78a6SSoby Mathew 
231*e33b78a6SSoby Mathew 			write_scr(read_scr() & ~SCR_NS_BIT);
232*e33b78a6SSoby Mathew 			isb();
233*e33b78a6SSoby Mathew 		}
234*e33b78a6SSoby Mathew 	}
235*e33b78a6SSoby Mathew }
236