xref: /rk3399_ARM-atf/lib/el3_runtime/aarch64/context_mgmt.c (revision 532ed6183868036e4a4f83cd7a71b93266a3bdb7)
1*532ed618SSoby Mathew /*
2*532ed618SSoby Mathew  * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
3*532ed618SSoby Mathew  *
4*532ed618SSoby Mathew  * Redistribution and use in source and binary forms, with or without
5*532ed618SSoby Mathew  * modification, are permitted provided that the following conditions are met:
6*532ed618SSoby Mathew  *
7*532ed618SSoby Mathew  * Redistributions of source code must retain the above copyright notice, this
8*532ed618SSoby Mathew  * list of conditions and the following disclaimer.
9*532ed618SSoby Mathew  *
10*532ed618SSoby Mathew  * Redistributions in binary form must reproduce the above copyright notice,
11*532ed618SSoby Mathew  * this list of conditions and the following disclaimer in the documentation
12*532ed618SSoby Mathew  * and/or other materials provided with the distribution.
13*532ed618SSoby Mathew  *
14*532ed618SSoby Mathew  * Neither the name of ARM nor the names of its contributors may be used
15*532ed618SSoby Mathew  * to endorse or promote products derived from this software without specific
16*532ed618SSoby Mathew  * prior written permission.
17*532ed618SSoby Mathew  *
18*532ed618SSoby Mathew  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19*532ed618SSoby Mathew  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*532ed618SSoby Mathew  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*532ed618SSoby Mathew  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22*532ed618SSoby Mathew  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23*532ed618SSoby Mathew  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24*532ed618SSoby Mathew  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25*532ed618SSoby Mathew  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26*532ed618SSoby Mathew  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27*532ed618SSoby Mathew  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28*532ed618SSoby Mathew  * POSSIBILITY OF SUCH DAMAGE.
29*532ed618SSoby Mathew  */
30*532ed618SSoby Mathew 
31*532ed618SSoby Mathew #include <arch.h>
32*532ed618SSoby Mathew #include <arch_helpers.h>
33*532ed618SSoby Mathew #include <assert.h>
34*532ed618SSoby Mathew #include <bl_common.h>
35*532ed618SSoby Mathew #include <context.h>
36*532ed618SSoby Mathew #include <context_mgmt.h>
37*532ed618SSoby Mathew #include <interrupt_mgmt.h>
38*532ed618SSoby Mathew #include <platform.h>
39*532ed618SSoby Mathew #include <platform_def.h>
40*532ed618SSoby Mathew #include <smcc_helpers.h>
41*532ed618SSoby Mathew #include <string.h>
42*532ed618SSoby Mathew 
43*532ed618SSoby Mathew 
44*532ed618SSoby Mathew /*******************************************************************************
45*532ed618SSoby Mathew  * Context management library initialisation routine. This library is used by
46*532ed618SSoby Mathew  * runtime services to share pointers to 'cpu_context' structures for the secure
47*532ed618SSoby Mathew  * and non-secure states. Management of the structures and their associated
48*532ed618SSoby Mathew  * memory is not done by the context management library e.g. the PSCI service
49*532ed618SSoby Mathew  * manages the cpu context used for entry from and exit to the non-secure state.
50*532ed618SSoby Mathew  * The Secure payload dispatcher service manages the context(s) corresponding to
51*532ed618SSoby Mathew  * the secure state. It also uses this library to get access to the non-secure
52*532ed618SSoby Mathew  * state cpu context pointers.
53*532ed618SSoby Mathew  * Lastly, this library provides the api to make SP_EL3 point to the cpu context
54*532ed618SSoby Mathew  * which will used for programming an entry into a lower EL. The same context
55*532ed618SSoby Mathew  * will used to save state upon exception entry from that EL.
56*532ed618SSoby Mathew  ******************************************************************************/
57*532ed618SSoby Mathew void cm_init(void)
58*532ed618SSoby Mathew {
59*532ed618SSoby Mathew 	/*
60*532ed618SSoby Mathew 	 * The context management library has only global data to intialize, but
61*532ed618SSoby Mathew 	 * that will be done when the BSS is zeroed out
62*532ed618SSoby Mathew 	 */
63*532ed618SSoby Mathew }
64*532ed618SSoby Mathew 
65*532ed618SSoby Mathew /*******************************************************************************
66*532ed618SSoby Mathew  * The following function initializes the cpu_context 'ctx' for
67*532ed618SSoby Mathew  * first use, and sets the initial entrypoint state as specified by the
68*532ed618SSoby Mathew  * entry_point_info structure.
69*532ed618SSoby Mathew  *
70*532ed618SSoby Mathew  * The security state to initialize is determined by the SECURE attribute
71*532ed618SSoby Mathew  * of the entry_point_info. The function returns a pointer to the initialized
72*532ed618SSoby Mathew  * context and sets this as the next context to return to.
73*532ed618SSoby Mathew  *
74*532ed618SSoby Mathew  * The EE and ST attributes are used to configure the endianess and secure
75*532ed618SSoby Mathew  * timer availability for the new execution context.
76*532ed618SSoby Mathew  *
77*532ed618SSoby Mathew  * To prepare the register state for entry call cm_prepare_el3_exit() and
78*532ed618SSoby Mathew  * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
79*532ed618SSoby Mathew  * cm_e1_sysreg_context_restore().
80*532ed618SSoby Mathew  ******************************************************************************/
81*532ed618SSoby Mathew static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
82*532ed618SSoby Mathew {
83*532ed618SSoby Mathew 	unsigned int security_state;
84*532ed618SSoby Mathew 	uint32_t scr_el3;
85*532ed618SSoby Mathew 	el3_state_t *state;
86*532ed618SSoby Mathew 	gp_regs_t *gp_regs;
87*532ed618SSoby Mathew 	unsigned long sctlr_elx;
88*532ed618SSoby Mathew 
89*532ed618SSoby Mathew 	assert(ctx);
90*532ed618SSoby Mathew 
91*532ed618SSoby Mathew 	security_state = GET_SECURITY_STATE(ep->h.attr);
92*532ed618SSoby Mathew 
93*532ed618SSoby Mathew 	/* Clear any residual register values from the context */
94*532ed618SSoby Mathew 	memset(ctx, 0, sizeof(*ctx));
95*532ed618SSoby Mathew 
96*532ed618SSoby Mathew 	/*
97*532ed618SSoby Mathew 	 * Base the context SCR on the current value, adjust for entry point
98*532ed618SSoby Mathew 	 * specific requirements and set trap bits from the IMF
99*532ed618SSoby Mathew 	 * TODO: provide the base/global SCR bits using another mechanism?
100*532ed618SSoby Mathew 	 */
101*532ed618SSoby Mathew 	scr_el3 = read_scr();
102*532ed618SSoby Mathew 	scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
103*532ed618SSoby Mathew 			SCR_ST_BIT | SCR_HCE_BIT);
104*532ed618SSoby Mathew 
105*532ed618SSoby Mathew 	if (security_state != SECURE)
106*532ed618SSoby Mathew 		scr_el3 |= SCR_NS_BIT;
107*532ed618SSoby Mathew 
108*532ed618SSoby Mathew 	if (GET_RW(ep->spsr) == MODE_RW_64)
109*532ed618SSoby Mathew 		scr_el3 |= SCR_RW_BIT;
110*532ed618SSoby Mathew 
111*532ed618SSoby Mathew 	if (EP_GET_ST(ep->h.attr))
112*532ed618SSoby Mathew 		scr_el3 |= SCR_ST_BIT;
113*532ed618SSoby Mathew 
114*532ed618SSoby Mathew #ifndef HANDLE_EA_EL3_FIRST
115*532ed618SSoby Mathew 	/* Explicitly stop to trap aborts from lower exception levels. */
116*532ed618SSoby Mathew 	scr_el3 &= ~SCR_EA_BIT;
117*532ed618SSoby Mathew #endif
118*532ed618SSoby Mathew 
119*532ed618SSoby Mathew #if IMAGE_BL31
120*532ed618SSoby Mathew 	/*
121*532ed618SSoby Mathew 	 * IRQ/FIQ bits only need setting if interrupt routing
122*532ed618SSoby Mathew 	 * model has been set up for BL31.
123*532ed618SSoby Mathew 	 */
124*532ed618SSoby Mathew 	scr_el3 |= get_scr_el3_from_routing_model(security_state);
125*532ed618SSoby Mathew #endif
126*532ed618SSoby Mathew 
127*532ed618SSoby Mathew 	/*
128*532ed618SSoby Mathew 	 * Set up SCTLR_ELx for the target exception level:
129*532ed618SSoby Mathew 	 * EE bit is taken from the entrypoint attributes
130*532ed618SSoby Mathew 	 * M, C and I bits must be zero (as required by PSCI specification)
131*532ed618SSoby Mathew 	 *
132*532ed618SSoby Mathew 	 * The target exception level is based on the spsr mode requested.
133*532ed618SSoby Mathew 	 * If execution is requested to EL2 or hyp mode, HVC is enabled
134*532ed618SSoby Mathew 	 * via SCR_EL3.HCE.
135*532ed618SSoby Mathew 	 *
136*532ed618SSoby Mathew 	 * Always compute the SCTLR_EL1 value and save in the cpu_context
137*532ed618SSoby Mathew 	 * - the EL2 registers are set up by cm_preapre_ns_entry() as they
138*532ed618SSoby Mathew 	 * are not part of the stored cpu_context
139*532ed618SSoby Mathew 	 *
140*532ed618SSoby Mathew 	 * TODO: In debug builds the spsr should be validated and checked
141*532ed618SSoby Mathew 	 * against the CPU support, security state, endianess and pc
142*532ed618SSoby Mathew 	 */
143*532ed618SSoby Mathew 	sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
144*532ed618SSoby Mathew 	if (GET_RW(ep->spsr) == MODE_RW_64)
145*532ed618SSoby Mathew 		sctlr_elx |= SCTLR_EL1_RES1;
146*532ed618SSoby Mathew 	else
147*532ed618SSoby Mathew 		sctlr_elx |= SCTLR_AARCH32_EL1_RES1;
148*532ed618SSoby Mathew 	write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
149*532ed618SSoby Mathew 
150*532ed618SSoby Mathew 	if ((GET_RW(ep->spsr) == MODE_RW_64
151*532ed618SSoby Mathew 	     && GET_EL(ep->spsr) == MODE_EL2)
152*532ed618SSoby Mathew 	    || (GET_RW(ep->spsr) != MODE_RW_64
153*532ed618SSoby Mathew 		&& GET_M32(ep->spsr) == MODE32_hyp)) {
154*532ed618SSoby Mathew 		scr_el3 |= SCR_HCE_BIT;
155*532ed618SSoby Mathew 	}
156*532ed618SSoby Mathew 
157*532ed618SSoby Mathew 	/* Populate EL3 state so that we've the right context before doing ERET */
158*532ed618SSoby Mathew 	state = get_el3state_ctx(ctx);
159*532ed618SSoby Mathew 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
160*532ed618SSoby Mathew 	write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
161*532ed618SSoby Mathew 	write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
162*532ed618SSoby Mathew 
163*532ed618SSoby Mathew 	/*
164*532ed618SSoby Mathew 	 * Store the X0-X7 value from the entrypoint into the context
165*532ed618SSoby Mathew 	 * Use memcpy as we are in control of the layout of the structures
166*532ed618SSoby Mathew 	 */
167*532ed618SSoby Mathew 	gp_regs = get_gpregs_ctx(ctx);
168*532ed618SSoby Mathew 	memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
169*532ed618SSoby Mathew }
170*532ed618SSoby Mathew 
171*532ed618SSoby Mathew /*******************************************************************************
172*532ed618SSoby Mathew  * The following function initializes the cpu_context for a CPU specified by
173*532ed618SSoby Mathew  * its `cpu_idx` for first use, and sets the initial entrypoint state as
174*532ed618SSoby Mathew  * specified by the entry_point_info structure.
175*532ed618SSoby Mathew  ******************************************************************************/
176*532ed618SSoby Mathew void cm_init_context_by_index(unsigned int cpu_idx,
177*532ed618SSoby Mathew 			      const entry_point_info_t *ep)
178*532ed618SSoby Mathew {
179*532ed618SSoby Mathew 	cpu_context_t *ctx;
180*532ed618SSoby Mathew 	ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
181*532ed618SSoby Mathew 	cm_init_context_common(ctx, ep);
182*532ed618SSoby Mathew }
183*532ed618SSoby Mathew 
184*532ed618SSoby Mathew /*******************************************************************************
185*532ed618SSoby Mathew  * The following function initializes the cpu_context for the current CPU
186*532ed618SSoby Mathew  * for first use, and sets the initial entrypoint state as specified by the
187*532ed618SSoby Mathew  * entry_point_info structure.
188*532ed618SSoby Mathew  ******************************************************************************/
189*532ed618SSoby Mathew void cm_init_my_context(const entry_point_info_t *ep)
190*532ed618SSoby Mathew {
191*532ed618SSoby Mathew 	cpu_context_t *ctx;
192*532ed618SSoby Mathew 	ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
193*532ed618SSoby Mathew 	cm_init_context_common(ctx, ep);
194*532ed618SSoby Mathew }
195*532ed618SSoby Mathew 
196*532ed618SSoby Mathew /*******************************************************************************
197*532ed618SSoby Mathew  * Prepare the CPU system registers for first entry into secure or normal world
198*532ed618SSoby Mathew  *
199*532ed618SSoby Mathew  * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
200*532ed618SSoby Mathew  * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
201*532ed618SSoby Mathew  * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
202*532ed618SSoby Mathew  * For all entries, the EL1 registers are initialized from the cpu_context
203*532ed618SSoby Mathew  ******************************************************************************/
204*532ed618SSoby Mathew void cm_prepare_el3_exit(uint32_t security_state)
205*532ed618SSoby Mathew {
206*532ed618SSoby Mathew 	uint32_t sctlr_elx, scr_el3, cptr_el2;
207*532ed618SSoby Mathew 	cpu_context_t *ctx = cm_get_context(security_state);
208*532ed618SSoby Mathew 
209*532ed618SSoby Mathew 	assert(ctx);
210*532ed618SSoby Mathew 
211*532ed618SSoby Mathew 	if (security_state == NON_SECURE) {
212*532ed618SSoby Mathew 		scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
213*532ed618SSoby Mathew 		if (scr_el3 & SCR_HCE_BIT) {
214*532ed618SSoby Mathew 			/* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
215*532ed618SSoby Mathew 			sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx),
216*532ed618SSoby Mathew 						 CTX_SCTLR_EL1);
217*532ed618SSoby Mathew 			sctlr_elx &= ~SCTLR_EE_BIT;
218*532ed618SSoby Mathew 			sctlr_elx |= SCTLR_EL2_RES1;
219*532ed618SSoby Mathew 			write_sctlr_el2(sctlr_elx);
220*532ed618SSoby Mathew 		} else if (read_id_aa64pfr0_el1() &
221*532ed618SSoby Mathew 			   (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) {
222*532ed618SSoby Mathew 			/* EL2 present but unused, need to disable safely */
223*532ed618SSoby Mathew 
224*532ed618SSoby Mathew 			/* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */
225*532ed618SSoby Mathew 			write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0);
226*532ed618SSoby Mathew 
227*532ed618SSoby Mathew 			/* SCTLR_EL2 : can be ignored when bypassing */
228*532ed618SSoby Mathew 
229*532ed618SSoby Mathew 			/* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */
230*532ed618SSoby Mathew 			cptr_el2 = read_cptr_el2();
231*532ed618SSoby Mathew 			cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT);
232*532ed618SSoby Mathew 			write_cptr_el2(cptr_el2);
233*532ed618SSoby Mathew 
234*532ed618SSoby Mathew 			/* Enable EL1 access to timer */
235*532ed618SSoby Mathew 			write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT);
236*532ed618SSoby Mathew 
237*532ed618SSoby Mathew 			/* Reset CNTVOFF_EL2 */
238*532ed618SSoby Mathew 			write_cntvoff_el2(0);
239*532ed618SSoby Mathew 
240*532ed618SSoby Mathew 			/* Set VPIDR, VMPIDR to match MIDR, MPIDR */
241*532ed618SSoby Mathew 			write_vpidr_el2(read_midr_el1());
242*532ed618SSoby Mathew 			write_vmpidr_el2(read_mpidr_el1());
243*532ed618SSoby Mathew 
244*532ed618SSoby Mathew 			/*
245*532ed618SSoby Mathew 			 * Reset VTTBR_EL2.
246*532ed618SSoby Mathew 			 * Needed because cache maintenance operations depend on
247*532ed618SSoby Mathew 			 * the VMID even when non-secure EL1&0 stage 2 address
248*532ed618SSoby Mathew 			 * translation are disabled.
249*532ed618SSoby Mathew 			 */
250*532ed618SSoby Mathew 			write_vttbr_el2(0);
251*532ed618SSoby Mathew 		}
252*532ed618SSoby Mathew 	}
253*532ed618SSoby Mathew 
254*532ed618SSoby Mathew 	el1_sysregs_context_restore(get_sysregs_ctx(ctx));
255*532ed618SSoby Mathew 
256*532ed618SSoby Mathew 	cm_set_next_context(ctx);
257*532ed618SSoby Mathew }
258*532ed618SSoby Mathew 
259*532ed618SSoby Mathew /*******************************************************************************
260*532ed618SSoby Mathew  * The next four functions are used by runtime services to save and restore
261*532ed618SSoby Mathew  * EL1 context on the 'cpu_context' structure for the specified security
262*532ed618SSoby Mathew  * state.
263*532ed618SSoby Mathew  ******************************************************************************/
264*532ed618SSoby Mathew void cm_el1_sysregs_context_save(uint32_t security_state)
265*532ed618SSoby Mathew {
266*532ed618SSoby Mathew 	cpu_context_t *ctx;
267*532ed618SSoby Mathew 
268*532ed618SSoby Mathew 	ctx = cm_get_context(security_state);
269*532ed618SSoby Mathew 	assert(ctx);
270*532ed618SSoby Mathew 
271*532ed618SSoby Mathew 	el1_sysregs_context_save(get_sysregs_ctx(ctx));
272*532ed618SSoby Mathew }
273*532ed618SSoby Mathew 
274*532ed618SSoby Mathew void cm_el1_sysregs_context_restore(uint32_t security_state)
275*532ed618SSoby Mathew {
276*532ed618SSoby Mathew 	cpu_context_t *ctx;
277*532ed618SSoby Mathew 
278*532ed618SSoby Mathew 	ctx = cm_get_context(security_state);
279*532ed618SSoby Mathew 	assert(ctx);
280*532ed618SSoby Mathew 
281*532ed618SSoby Mathew 	el1_sysregs_context_restore(get_sysregs_ctx(ctx));
282*532ed618SSoby Mathew }
283*532ed618SSoby Mathew 
284*532ed618SSoby Mathew /*******************************************************************************
285*532ed618SSoby Mathew  * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
286*532ed618SSoby Mathew  * given security state with the given entrypoint
287*532ed618SSoby Mathew  ******************************************************************************/
288*532ed618SSoby Mathew void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
289*532ed618SSoby Mathew {
290*532ed618SSoby Mathew 	cpu_context_t *ctx;
291*532ed618SSoby Mathew 	el3_state_t *state;
292*532ed618SSoby Mathew 
293*532ed618SSoby Mathew 	ctx = cm_get_context(security_state);
294*532ed618SSoby Mathew 	assert(ctx);
295*532ed618SSoby Mathew 
296*532ed618SSoby Mathew 	/* Populate EL3 state so that ERET jumps to the correct entry */
297*532ed618SSoby Mathew 	state = get_el3state_ctx(ctx);
298*532ed618SSoby Mathew 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
299*532ed618SSoby Mathew }
300*532ed618SSoby Mathew 
301*532ed618SSoby Mathew /*******************************************************************************
302*532ed618SSoby Mathew  * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
303*532ed618SSoby Mathew  * pertaining to the given security state
304*532ed618SSoby Mathew  ******************************************************************************/
305*532ed618SSoby Mathew void cm_set_elr_spsr_el3(uint32_t security_state,
306*532ed618SSoby Mathew 			uintptr_t entrypoint, uint32_t spsr)
307*532ed618SSoby Mathew {
308*532ed618SSoby Mathew 	cpu_context_t *ctx;
309*532ed618SSoby Mathew 	el3_state_t *state;
310*532ed618SSoby Mathew 
311*532ed618SSoby Mathew 	ctx = cm_get_context(security_state);
312*532ed618SSoby Mathew 	assert(ctx);
313*532ed618SSoby Mathew 
314*532ed618SSoby Mathew 	/* Populate EL3 state so that ERET jumps to the correct entry */
315*532ed618SSoby Mathew 	state = get_el3state_ctx(ctx);
316*532ed618SSoby Mathew 	write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
317*532ed618SSoby Mathew 	write_ctx_reg(state, CTX_SPSR_EL3, spsr);
318*532ed618SSoby Mathew }
319*532ed618SSoby Mathew 
320*532ed618SSoby Mathew /*******************************************************************************
321*532ed618SSoby Mathew  * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
322*532ed618SSoby Mathew  * pertaining to the given security state using the value and bit position
323*532ed618SSoby Mathew  * specified in the parameters. It preserves all other bits.
324*532ed618SSoby Mathew  ******************************************************************************/
325*532ed618SSoby Mathew void cm_write_scr_el3_bit(uint32_t security_state,
326*532ed618SSoby Mathew 			  uint32_t bit_pos,
327*532ed618SSoby Mathew 			  uint32_t value)
328*532ed618SSoby Mathew {
329*532ed618SSoby Mathew 	cpu_context_t *ctx;
330*532ed618SSoby Mathew 	el3_state_t *state;
331*532ed618SSoby Mathew 	uint32_t scr_el3;
332*532ed618SSoby Mathew 
333*532ed618SSoby Mathew 	ctx = cm_get_context(security_state);
334*532ed618SSoby Mathew 	assert(ctx);
335*532ed618SSoby Mathew 
336*532ed618SSoby Mathew 	/* Ensure that the bit position is a valid one */
337*532ed618SSoby Mathew 	assert((1 << bit_pos) & SCR_VALID_BIT_MASK);
338*532ed618SSoby Mathew 
339*532ed618SSoby Mathew 	/* Ensure that the 'value' is only a bit wide */
340*532ed618SSoby Mathew 	assert(value <= 1);
341*532ed618SSoby Mathew 
342*532ed618SSoby Mathew 	/*
343*532ed618SSoby Mathew 	 * Get the SCR_EL3 value from the cpu context, clear the desired bit
344*532ed618SSoby Mathew 	 * and set it to its new value.
345*532ed618SSoby Mathew 	 */
346*532ed618SSoby Mathew 	state = get_el3state_ctx(ctx);
347*532ed618SSoby Mathew 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
348*532ed618SSoby Mathew 	scr_el3 &= ~(1 << bit_pos);
349*532ed618SSoby Mathew 	scr_el3 |= value << bit_pos;
350*532ed618SSoby Mathew 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
351*532ed618SSoby Mathew }
352*532ed618SSoby Mathew 
353*532ed618SSoby Mathew /*******************************************************************************
354*532ed618SSoby Mathew  * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
355*532ed618SSoby Mathew  * given security state.
356*532ed618SSoby Mathew  ******************************************************************************/
357*532ed618SSoby Mathew uint32_t cm_get_scr_el3(uint32_t security_state)
358*532ed618SSoby Mathew {
359*532ed618SSoby Mathew 	cpu_context_t *ctx;
360*532ed618SSoby Mathew 	el3_state_t *state;
361*532ed618SSoby Mathew 
362*532ed618SSoby Mathew 	ctx = cm_get_context(security_state);
363*532ed618SSoby Mathew 	assert(ctx);
364*532ed618SSoby Mathew 
365*532ed618SSoby Mathew 	/* Populate EL3 state so that ERET jumps to the correct entry */
366*532ed618SSoby Mathew 	state = get_el3state_ctx(ctx);
367*532ed618SSoby Mathew 	return read_ctx_reg(state, CTX_SCR_EL3);
368*532ed618SSoby Mathew }
369*532ed618SSoby Mathew 
370*532ed618SSoby Mathew /*******************************************************************************
371*532ed618SSoby Mathew  * This function is used to program the context that's used for exception
372*532ed618SSoby Mathew  * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
373*532ed618SSoby Mathew  * the required security state
374*532ed618SSoby Mathew  ******************************************************************************/
375*532ed618SSoby Mathew void cm_set_next_eret_context(uint32_t security_state)
376*532ed618SSoby Mathew {
377*532ed618SSoby Mathew 	cpu_context_t *ctx;
378*532ed618SSoby Mathew 
379*532ed618SSoby Mathew 	ctx = cm_get_context(security_state);
380*532ed618SSoby Mathew 	assert(ctx);
381*532ed618SSoby Mathew 
382*532ed618SSoby Mathew 	cm_set_next_context(ctx);
383*532ed618SSoby Mathew }
384