xref: /rk3399_ARM-atf/services/spd/tspd/tspd_common.c (revision d3280beb700321b0ef47b4f61d84667ba501bc61)
1375f538aSAchin Gupta /*
2375f538aSAchin Gupta  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3375f538aSAchin Gupta  *
4375f538aSAchin Gupta  * Redistribution and use in source and binary forms, with or without
5375f538aSAchin Gupta  * modification, are permitted provided that the following conditions are met:
6375f538aSAchin Gupta  *
7375f538aSAchin Gupta  * Redistributions of source code must retain the above copyright notice, this
8375f538aSAchin Gupta  * list of conditions and the following disclaimer.
9375f538aSAchin Gupta  *
10375f538aSAchin Gupta  * Redistributions in binary form must reproduce the above copyright notice,
11375f538aSAchin Gupta  * this list of conditions and the following disclaimer in the documentation
12375f538aSAchin Gupta  * and/or other materials provided with the distribution.
13375f538aSAchin Gupta  *
14375f538aSAchin Gupta  * Neither the name of ARM nor the names of its contributors may be used
15375f538aSAchin Gupta  * to endorse or promote products derived from this software without specific
16375f538aSAchin Gupta  * prior written permission.
17375f538aSAchin Gupta  *
18375f538aSAchin Gupta  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19375f538aSAchin Gupta  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20375f538aSAchin Gupta  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21375f538aSAchin Gupta  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22375f538aSAchin Gupta  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23375f538aSAchin Gupta  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24375f538aSAchin Gupta  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25375f538aSAchin Gupta  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26375f538aSAchin Gupta  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27375f538aSAchin Gupta  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28375f538aSAchin Gupta  * POSSIBILITY OF SUCH DAMAGE.
29375f538aSAchin Gupta  */
30375f538aSAchin Gupta 
31375f538aSAchin Gupta #include <arch_helpers.h>
3297043ac9SDan Handley #include <assert.h>
33375f538aSAchin Gupta #include <bl_common.h>
34375f538aSAchin Gupta #include <context_mgmt.h>
3597043ac9SDan Handley #include <string.h>
3635e98e55SDan Handley #include "tspd_private.h"
37375f538aSAchin Gupta 
38375f538aSAchin Gupta /*******************************************************************************
39375f538aSAchin Gupta  * Given a secure payload entrypoint, register width, cpu id & pointer to a
40375f538aSAchin Gupta  * context data structure, this function will create a secure context ready for
41375f538aSAchin Gupta  * programming an entry into the secure payload.
42375f538aSAchin Gupta  ******************************************************************************/
43375f538aSAchin Gupta int32_t tspd_init_secure_context(uint64_t entrypoint,
44375f538aSAchin Gupta 				 uint32_t rw,
45375f538aSAchin Gupta 				 uint64_t mpidr,
46fb037bfbSDan Handley 				 tsp_context_t *tsp_ctx)
47375f538aSAchin Gupta {
48167a9357SAndrew Thoelke 	entry_point_info_t ep;
49167a9357SAndrew Thoelke 	uint32_t ep_attr;
50375f538aSAchin Gupta 
51375f538aSAchin Gupta 	/* Passing a NULL context is a critical programming error */
52375f538aSAchin Gupta 	assert(tsp_ctx);
53375f538aSAchin Gupta 
54375f538aSAchin Gupta 	/*
55375f538aSAchin Gupta 	 * We support AArch64 TSP for now.
56375f538aSAchin Gupta 	 * TODO: Add support for AArch32 TSP
57375f538aSAchin Gupta 	 */
58375f538aSAchin Gupta 	assert(rw == TSP_AARCH64);
59375f538aSAchin Gupta 
60375f538aSAchin Gupta 	/* Associate this context with the cpu specified */
61375f538aSAchin Gupta 	tsp_ctx->mpidr = mpidr;
62167a9357SAndrew Thoelke 	tsp_ctx->state = 0;
63167a9357SAndrew Thoelke 	set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
64167a9357SAndrew Thoelke 	clr_std_smc_active_flag(tsp_ctx->state);
65375f538aSAchin Gupta 
66167a9357SAndrew Thoelke 	cm_set_context_by_mpidr(mpidr, &tsp_ctx->cpu_ctx, SECURE);
67167a9357SAndrew Thoelke 
68167a9357SAndrew Thoelke 	/* initialise an entrypoint to set up the CPU context */
69167a9357SAndrew Thoelke 	ep_attr = SECURE | EP_ST_ENABLE;
70167a9357SAndrew Thoelke 	if (read_sctlr_el3() & SCTLR_EE_BIT)
71167a9357SAndrew Thoelke 		ep_attr |= EP_EE_BIG;
72167a9357SAndrew Thoelke 	SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1, ep_attr);
73167a9357SAndrew Thoelke 	ep.pc = entrypoint;
74167a9357SAndrew Thoelke 	ep.spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
75167a9357SAndrew Thoelke 	memset(&ep.args, 0, sizeof(ep.args));
76167a9357SAndrew Thoelke 
77167a9357SAndrew Thoelke 	cm_init_context(mpidr, &ep);
78375f538aSAchin Gupta 
79375f538aSAchin Gupta 	return 0;
80375f538aSAchin Gupta }
81375f538aSAchin Gupta 
82375f538aSAchin Gupta /*******************************************************************************
83375f538aSAchin Gupta  * This function takes an SP context pointer and:
84375f538aSAchin Gupta  * 1. Applies the S-EL1 system register context from tsp_ctx->cpu_ctx.
85375f538aSAchin Gupta  * 2. Saves the current C runtime state (callee saved registers) on the stack
86375f538aSAchin Gupta  *    frame and saves a reference to this state.
87375f538aSAchin Gupta  * 3. Calls el3_exit() so that the EL3 system and general purpose registers
88375f538aSAchin Gupta  *    from the tsp_ctx->cpu_ctx are used to enter the secure payload image.
89375f538aSAchin Gupta  ******************************************************************************/
90fb037bfbSDan Handley uint64_t tspd_synchronous_sp_entry(tsp_context_t *tsp_ctx)
91375f538aSAchin Gupta {
92375f538aSAchin Gupta 	uint64_t rc;
93375f538aSAchin Gupta 
94*d3280bebSJuan Castillo 	assert(tsp_ctx != NULL);
95375f538aSAchin Gupta 	assert(tsp_ctx->c_rt_ctx == 0);
96375f538aSAchin Gupta 
97375f538aSAchin Gupta 	/* Apply the Secure EL1 system register context and switch to it */
9808ab89d3SAndrew Thoelke 	assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
99375f538aSAchin Gupta 	cm_el1_sysregs_context_restore(SECURE);
100375f538aSAchin Gupta 	cm_set_next_eret_context(SECURE);
101375f538aSAchin Gupta 
102375f538aSAchin Gupta 	rc = tspd_enter_sp(&tsp_ctx->c_rt_ctx);
103375f538aSAchin Gupta #if DEBUG
104375f538aSAchin Gupta 	tsp_ctx->c_rt_ctx = 0;
105375f538aSAchin Gupta #endif
106375f538aSAchin Gupta 
107375f538aSAchin Gupta 	return rc;
108375f538aSAchin Gupta }
109375f538aSAchin Gupta 
110375f538aSAchin Gupta 
111375f538aSAchin Gupta /*******************************************************************************
112375f538aSAchin Gupta  * This function takes an SP context pointer and:
113375f538aSAchin Gupta  * 1. Saves the S-EL1 system register context tp tsp_ctx->cpu_ctx.
114375f538aSAchin Gupta  * 2. Restores the current C runtime state (callee saved registers) from the
115375f538aSAchin Gupta  *    stack frame using the reference to this state saved in tspd_enter_sp().
116375f538aSAchin Gupta  * 3. It does not need to save any general purpose or EL3 system register state
117375f538aSAchin Gupta  *    as the generic smc entry routine should have saved those.
118375f538aSAchin Gupta  ******************************************************************************/
119fb037bfbSDan Handley void tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret)
120375f538aSAchin Gupta {
121*d3280bebSJuan Castillo 	assert(tsp_ctx != NULL);
122375f538aSAchin Gupta 	/* Save the Secure EL1 system register context */
12308ab89d3SAndrew Thoelke 	assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
124375f538aSAchin Gupta 	cm_el1_sysregs_context_save(SECURE);
125375f538aSAchin Gupta 
126375f538aSAchin Gupta 	assert(tsp_ctx->c_rt_ctx != 0);
127375f538aSAchin Gupta 	tspd_exit_sp(tsp_ctx->c_rt_ctx, ret);
128375f538aSAchin Gupta 
129375f538aSAchin Gupta 	/* Should never reach here */
130375f538aSAchin Gupta 	assert(0);
131375f538aSAchin Gupta }
132