xref: /rk3399_ARM-atf/lib/el3_runtime/aarch32/context_mgmt.c (revision 51faada71a219a8b94cd8d8e423f0f22e9da4d8f)
1 /*
2  * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <context.h>
36 #include <context_mgmt.h>
37 #include <platform.h>
38 #include <platform_def.h>
39 #include <smcc_helpers.h>
40 #include <string.h>
41 #include <utils.h>
42 
43 /*******************************************************************************
44  * Context management library initialisation routine. This library is used by
45  * runtime services to share pointers to 'cpu_context' structures for the secure
46  * and non-secure states. Management of the structures and their associated
47  * memory is not done by the context management library e.g. the PSCI service
48  * manages the cpu context used for entry from and exit to the non-secure state.
49  * The Secure payload manages the context(s) corresponding to the secure state.
50  * It also uses this library to get access to the non-secure
51  * state cpu context pointers.
52  ******************************************************************************/
53 void cm_init(void)
54 {
55 	/*
56 	 * The context management library has only global data to initialize, but
57 	 * that will be done when the BSS is zeroed out
58 	 */
59 }
60 
61 /*******************************************************************************
62  * The following function initializes the cpu_context 'ctx' for
63  * first use, and sets the initial entrypoint state as specified by the
64  * entry_point_info structure.
65  *
66  * The security state to initialize is determined by the SECURE attribute
67  * of the entry_point_info. The function returns a pointer to the initialized
68  * context and sets this as the next context to return to.
69  *
70  * The EE and ST attributes are used to configure the endianness and secure
71  * timer availability for the new execution context.
72  *
73  * To prepare the register state for entry call cm_prepare_el3_exit() and
74  * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
75  * cm_e1_sysreg_context_restore().
76  ******************************************************************************/
77 static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
78 {
79 	unsigned int security_state;
80 	uint32_t scr, sctlr;
81 	regs_t *reg_ctx;
82 
83 	assert(ctx);
84 
85 	security_state = GET_SECURITY_STATE(ep->h.attr);
86 
87 	/* Clear any residual register values from the context */
88 	zeromem(ctx, sizeof(*ctx));
89 
90 	reg_ctx = get_regs_ctx(ctx);
91 
92 	/*
93 	 * Base the context SCR on the current value, adjust for entry point
94 	 * specific requirements
95 	 */
96 	scr = read_scr();
97 	scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);
98 
99 	if (security_state != SECURE)
100 		scr |= SCR_NS_BIT;
101 
102 	/*
103 	 * Set up SCTLR for the Non Secure context.
104 	 * EE bit is taken from the entrypoint attributes
105 	 * M, C and I bits must be zero (as required by PSCI specification)
106 	 *
107 	 * The target exception level is based on the spsr mode requested.
108 	 * If execution is requested to hyp mode, HVC is enabled
109 	 * via SCR.HCE.
110 	 *
111 	 * Always compute the SCTLR_EL1 value and save in the cpu_context
112 	 * - the HYP registers are set up by cm_preapre_ns_entry() as they
113 	 * are not part of the stored cpu_context
114 	 *
115 	 * TODO: In debug builds the spsr should be validated and checked
116 	 * against the CPU support, security state, endianness and pc
117 	 */
118 	if (security_state != SECURE) {
119 		sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
120 		/*
121 		 * In addition to SCTLR_RES1, set the CP15_BEN, nTWI & nTWE
122 		 * bits that architecturally reset to 1.
123 		 */
124 		sctlr |= SCTLR_RES1 | SCTLR_CP15BEN_BIT |
125 				SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
126 		write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
127 	}
128 
129 	if (GET_M32(ep->spsr) == MODE32_hyp)
130 		scr |= SCR_HCE_BIT;
131 
132 	write_ctx_reg(reg_ctx, CTX_SCR, scr);
133 	write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
134 	write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
135 
136 	/*
137 	 * Store the r0-r3 value from the entrypoint into the context
138 	 * Use memcpy as we are in control of the layout of the structures
139 	 */
140 	memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
141 }
142 
143 /*******************************************************************************
144  * The following function initializes the cpu_context for a CPU specified by
145  * its `cpu_idx` for first use, and sets the initial entrypoint state as
146  * specified by the entry_point_info structure.
147  ******************************************************************************/
148 void cm_init_context_by_index(unsigned int cpu_idx,
149 			      const entry_point_info_t *ep)
150 {
151 	cpu_context_t *ctx;
152 	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
153 	cm_init_context_common(ctx, ep);
154 }
155 
156 /*******************************************************************************
157  * The following function initializes the cpu_context for the current CPU
158  * for first use, and sets the initial entrypoint state as specified by the
159  * entry_point_info structure.
160  ******************************************************************************/
161 void cm_init_my_context(const entry_point_info_t *ep)
162 {
163 	cpu_context_t *ctx;
164 	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
165 	cm_init_context_common(ctx, ep);
166 }
167 
168 /*******************************************************************************
169  * Prepare the CPU system registers for first entry into secure or normal world
170  *
171  * If execution is requested to hyp mode, HSCTLR is initialized
172  * If execution is requested to non-secure PL1, and the CPU supports
173  * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
174  * registers.
175  ******************************************************************************/
176 void cm_prepare_el3_exit(uint32_t security_state)
177 {
178 	uint32_t sctlr, scr, hcptr;
179 	cpu_context_t *ctx = cm_get_context(security_state);
180 
181 	assert(ctx);
182 
183 	if (security_state == NON_SECURE) {
184 		scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
185 		if (scr & SCR_HCE_BIT) {
186 			/* Use SCTLR value to initialize HSCTLR */
187 			sctlr = read_ctx_reg(get_regs_ctx(ctx),
188 						 CTX_NS_SCTLR);
189 			sctlr |= HSCTLR_RES1;
190 			/* Temporarily set the NS bit to access HSCTLR */
191 			write_scr(read_scr() | SCR_NS_BIT);
192 			/*
193 			 * Make sure the write to SCR is complete so that
194 			 * we can access HSCTLR
195 			 */
196 			isb();
197 			write_hsctlr(sctlr);
198 			isb();
199 
200 			write_scr(read_scr() & ~SCR_NS_BIT);
201 			isb();
202 		} else if (read_id_pfr1() &
203 			(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
204 			/*
205 			 * Set the NS bit to access NS copies of certain banked
206 			 * registers
207 			 */
208 			write_scr(read_scr() | SCR_NS_BIT);
209 			isb();
210 
211 			/* PL2 present but unused, need to disable safely */
212 			write_hcr(0);
213 
214 			/* HSCTLR : can be ignored when bypassing */
215 
216 			/* HCPTR : disable all traps TCPAC, TTA, TCP */
217 			hcptr = read_hcptr();
218 			hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
219 			write_hcptr(hcptr);
220 
221 			/* Enable EL1 access to timer */
222 			write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);
223 
224 			/* Reset CNTVOFF_EL2 */
225 			write64_cntvoff(0);
226 
227 			/* Set VPIDR, VMPIDR to match MIDR, MPIDR */
228 			write_vpidr(read_midr());
229 			write_vmpidr(read_mpidr());
230 
231 			/*
232 			 * Reset VTTBR.
233 			 * Needed because cache maintenance operations depend on
234 			 * the VMID even when non-secure EL1&0 stage 2 address
235 			 * translation are disabled.
236 			 */
237 			write64_vttbr(0);
238 
239 			/*
240 			 * Avoid unexpected debug traps in case where HDCR
241 			 * is not completely reset by the hardware - set
242 			 * HDCR.HPMN to PMCR.N and zero the remaining bits.
243 			 * The HDCR.HPMN and PMCR.N fields are the same size
244 			 * (5 bits) and HPMN is at offset zero within HDCR.
245 			 */
246 			write_hdcr((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT);
247 
248 			/*
249 			 * Reset CNTHP_CTL to disable the EL2 physical timer and
250 			 * therefore prevent timer interrupts.
251 			 */
252 			write_cnthp_ctl(0);
253 			isb();
254 
255 			write_scr(read_scr() & ~SCR_NS_BIT);
256 			isb();
257 		}
258 	}
259 }
260