xref: /rk3399_ARM-atf/services/spd/tspd/tspd_common.c (revision 375f538a797a89a5f49aab1be70e86df4511c05a)
1*375f538aSAchin Gupta /*
2*375f538aSAchin Gupta  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3*375f538aSAchin Gupta  *
4*375f538aSAchin Gupta  * Redistribution and use in source and binary forms, with or without
5*375f538aSAchin Gupta  * modification, are permitted provided that the following conditions are met:
6*375f538aSAchin Gupta  *
7*375f538aSAchin Gupta  * Redistributions of source code must retain the above copyright notice, this
8*375f538aSAchin Gupta  * list of conditions and the following disclaimer.
9*375f538aSAchin Gupta  *
10*375f538aSAchin Gupta  * Redistributions in binary form must reproduce the above copyright notice,
11*375f538aSAchin Gupta  * this list of conditions and the following disclaimer in the documentation
12*375f538aSAchin Gupta  * and/or other materials provided with the distribution.
13*375f538aSAchin Gupta  *
14*375f538aSAchin Gupta  * Neither the name of ARM nor the names of its contributors may be used
15*375f538aSAchin Gupta  * to endorse or promote products derived from this software without specific
16*375f538aSAchin Gupta  * prior written permission.
17*375f538aSAchin Gupta  *
18*375f538aSAchin Gupta  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19*375f538aSAchin Gupta  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*375f538aSAchin Gupta  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*375f538aSAchin Gupta  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22*375f538aSAchin Gupta  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23*375f538aSAchin Gupta  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24*375f538aSAchin Gupta  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25*375f538aSAchin Gupta  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26*375f538aSAchin Gupta  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27*375f538aSAchin Gupta  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28*375f538aSAchin Gupta  * POSSIBILITY OF SUCH DAMAGE.
29*375f538aSAchin Gupta  */
30*375f538aSAchin Gupta 
31*375f538aSAchin Gupta #include <stdio.h>
32*375f538aSAchin Gupta #include <errno.h>
33*375f538aSAchin Gupta #include <string.h>
34*375f538aSAchin Gupta #include <assert.h>
35*375f538aSAchin Gupta #include <arch_helpers.h>
36*375f538aSAchin Gupta #include <platform.h>
37*375f538aSAchin Gupta #include <bl_common.h>
38*375f538aSAchin Gupta #include <runtime_svc.h>
39*375f538aSAchin Gupta #include <context_mgmt.h>
40*375f538aSAchin Gupta #include <tspd_private.h>
41*375f538aSAchin Gupta 
42*375f538aSAchin Gupta /*******************************************************************************
43*375f538aSAchin Gupta  * Given a secure payload entrypoint, register width, cpu id & pointer to a
44*375f538aSAchin Gupta  * context data structure, this function will create a secure context ready for
45*375f538aSAchin Gupta  * programming an entry into the secure payload.
46*375f538aSAchin Gupta  ******************************************************************************/
47*375f538aSAchin Gupta int32_t tspd_init_secure_context(uint64_t entrypoint,
48*375f538aSAchin Gupta 				uint32_t rw,
49*375f538aSAchin Gupta 				uint64_t mpidr,
50*375f538aSAchin Gupta 				tsp_context *tsp_ctx)
51*375f538aSAchin Gupta {
52*375f538aSAchin Gupta 	uint32_t scr = read_scr(), sctlr = read_sctlr();
53*375f538aSAchin Gupta 	el1_sys_regs *el1_state;
54*375f538aSAchin Gupta 	uint32_t spsr;
55*375f538aSAchin Gupta 
56*375f538aSAchin Gupta 	/* Passing a NULL context is a critical programming error */
57*375f538aSAchin Gupta 	assert(tsp_ctx);
58*375f538aSAchin Gupta 
59*375f538aSAchin Gupta 	/*
60*375f538aSAchin Gupta 	 * We support AArch64 TSP for now.
61*375f538aSAchin Gupta 	 * TODO: Add support for AArch32 TSP
62*375f538aSAchin Gupta 	 */
63*375f538aSAchin Gupta 	assert(rw == TSP_AARCH64);
64*375f538aSAchin Gupta 
65*375f538aSAchin Gupta 	/*
66*375f538aSAchin Gupta 	 * This might look redundant if the context was statically
67*375f538aSAchin Gupta 	 * allocated but this function cannot make that assumption.
68*375f538aSAchin Gupta 	 */
69*375f538aSAchin Gupta 	memset(tsp_ctx, 0, sizeof(*tsp_ctx));
70*375f538aSAchin Gupta 
71*375f538aSAchin Gupta 	/* Set the right security state and register width for the SP */
72*375f538aSAchin Gupta 	scr &= ~SCR_NS_BIT;
73*375f538aSAchin Gupta 	scr &= ~SCR_RW_BIT;
74*375f538aSAchin Gupta 	if (rw == TSP_AARCH64)
75*375f538aSAchin Gupta 		scr |= SCR_RW_BIT;
76*375f538aSAchin Gupta 
77*375f538aSAchin Gupta 	/* Get a pointer to the S-EL1 context memory */
78*375f538aSAchin Gupta 	el1_state = get_sysregs_ctx(&tsp_ctx->cpu_ctx);
79*375f538aSAchin Gupta 
80*375f538aSAchin Gupta 	/*
81*375f538aSAchin Gupta 	 * Program the sctlr to allow execution in S-EL1 with caches
82*375f538aSAchin Gupta 	 * and mmu off
83*375f538aSAchin Gupta 	 */
84*375f538aSAchin Gupta 	sctlr &= SCTLR_EE_BIT;
85*375f538aSAchin Gupta 	sctlr |= SCTLR_EL1_RES1;
86*375f538aSAchin Gupta 	write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr);
87*375f538aSAchin Gupta 
88*375f538aSAchin Gupta 	/* Set this context as ready to be initialised i.e OFF */
89*375f538aSAchin Gupta 	tsp_ctx->state = TSP_STATE_OFF;
90*375f538aSAchin Gupta 
91*375f538aSAchin Gupta 	/* Associate this context with the cpu specified */
92*375f538aSAchin Gupta 	tsp_ctx->mpidr = mpidr;
93*375f538aSAchin Gupta 
94*375f538aSAchin Gupta 	cm_set_context(mpidr, &tsp_ctx->cpu_ctx, SECURE);
95*375f538aSAchin Gupta 	spsr = make_spsr(MODE_EL1, MODE_SP_ELX, rw);
96*375f538aSAchin Gupta 	cm_set_el3_eret_context(SECURE, entrypoint, spsr, scr);
97*375f538aSAchin Gupta 
98*375f538aSAchin Gupta 	cm_init_exception_stack(mpidr, SECURE);
99*375f538aSAchin Gupta 
100*375f538aSAchin Gupta 	return 0;
101*375f538aSAchin Gupta }
102*375f538aSAchin Gupta 
103*375f538aSAchin Gupta /*******************************************************************************
104*375f538aSAchin Gupta  * This function takes an SP context pointer and:
105*375f538aSAchin Gupta  * 1. Applies the S-EL1 system register context from tsp_ctx->cpu_ctx.
106*375f538aSAchin Gupta  * 2. Saves the current C runtime state (callee saved registers) on the stack
107*375f538aSAchin Gupta  *    frame and saves a reference to this state.
108*375f538aSAchin Gupta  * 3. Calls el3_exit() so that the EL3 system and general purpose registers
109*375f538aSAchin Gupta  *    from the tsp_ctx->cpu_ctx are used to enter the secure payload image.
110*375f538aSAchin Gupta  ******************************************************************************/
111*375f538aSAchin Gupta uint64_t tspd_synchronous_sp_entry(tsp_context *tsp_ctx)
112*375f538aSAchin Gupta {
113*375f538aSAchin Gupta 	uint64_t rc;
114*375f538aSAchin Gupta 
115*375f538aSAchin Gupta 	assert(tsp_ctx->c_rt_ctx == 0);
116*375f538aSAchin Gupta 
117*375f538aSAchin Gupta 	/* Apply the Secure EL1 system register context and switch to it */
118*375f538aSAchin Gupta 	assert(cm_get_context(read_mpidr(), SECURE) == &tsp_ctx->cpu_ctx);
119*375f538aSAchin Gupta 	cm_el1_sysregs_context_restore(SECURE);
120*375f538aSAchin Gupta 	cm_set_next_eret_context(SECURE);
121*375f538aSAchin Gupta 
122*375f538aSAchin Gupta 	rc = tspd_enter_sp(&tsp_ctx->c_rt_ctx);
123*375f538aSAchin Gupta #if DEBUG
124*375f538aSAchin Gupta 	tsp_ctx->c_rt_ctx = 0;
125*375f538aSAchin Gupta #endif
126*375f538aSAchin Gupta 
127*375f538aSAchin Gupta 	return rc;
128*375f538aSAchin Gupta }
129*375f538aSAchin Gupta 
130*375f538aSAchin Gupta 
131*375f538aSAchin Gupta /*******************************************************************************
132*375f538aSAchin Gupta  * This function takes an SP context pointer and:
133*375f538aSAchin Gupta  * 1. Saves the S-EL1 system register context tp tsp_ctx->cpu_ctx.
134*375f538aSAchin Gupta  * 2. Restores the current C runtime state (callee saved registers) from the
135*375f538aSAchin Gupta  *    stack frame using the reference to this state saved in tspd_enter_sp().
136*375f538aSAchin Gupta  * 3. It does not need to save any general purpose or EL3 system register state
137*375f538aSAchin Gupta  *    as the generic smc entry routine should have saved those.
138*375f538aSAchin Gupta  ******************************************************************************/
139*375f538aSAchin Gupta void tspd_synchronous_sp_exit(tsp_context *tsp_ctx, uint64_t ret)
140*375f538aSAchin Gupta {
141*375f538aSAchin Gupta 	/* Save the Secure EL1 system register context */
142*375f538aSAchin Gupta 	assert(cm_get_context(read_mpidr(), SECURE) == &tsp_ctx->cpu_ctx);
143*375f538aSAchin Gupta 	cm_el1_sysregs_context_save(SECURE);
144*375f538aSAchin Gupta 
145*375f538aSAchin Gupta 	assert(tsp_ctx->c_rt_ctx != 0);
146*375f538aSAchin Gupta 	tspd_exit_sp(tsp_ctx->c_rt_ctx, ret);
147*375f538aSAchin Gupta 
148*375f538aSAchin Gupta 	/* Should never reach here */
149*375f538aSAchin Gupta 	assert(0);
150*375f538aSAchin Gupta }
151