xref: /rk3399_ARM-atf/services/spd/tspd/tspd_common.c (revision 16292f54811f27bb7de28512cda74db83686cb63)
1375f538aSAchin Gupta /*
232f0d3c6SDouglas Raillard  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3375f538aSAchin Gupta  *
4375f538aSAchin Gupta  * Redistribution and use in source and binary forms, with or without
5375f538aSAchin Gupta  * modification, are permitted provided that the following conditions are met:
6375f538aSAchin Gupta  *
7375f538aSAchin Gupta  * Redistributions of source code must retain the above copyright notice, this
8375f538aSAchin Gupta  * list of conditions and the following disclaimer.
9375f538aSAchin Gupta  *
10375f538aSAchin Gupta  * Redistributions in binary form must reproduce the above copyright notice,
11375f538aSAchin Gupta  * this list of conditions and the following disclaimer in the documentation
12375f538aSAchin Gupta  * and/or other materials provided with the distribution.
13375f538aSAchin Gupta  *
14375f538aSAchin Gupta  * Neither the name of ARM nor the names of its contributors may be used
15375f538aSAchin Gupta  * to endorse or promote products derived from this software without specific
16375f538aSAchin Gupta  * prior written permission.
17375f538aSAchin Gupta  *
18375f538aSAchin Gupta  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19375f538aSAchin Gupta  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20375f538aSAchin Gupta  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21375f538aSAchin Gupta  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22375f538aSAchin Gupta  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23375f538aSAchin Gupta  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24375f538aSAchin Gupta  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25375f538aSAchin Gupta  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26375f538aSAchin Gupta  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27375f538aSAchin Gupta  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28375f538aSAchin Gupta  * POSSIBILITY OF SUCH DAMAGE.
29375f538aSAchin Gupta  */
30375f538aSAchin Gupta 
31375f538aSAchin Gupta #include <arch_helpers.h>
3297043ac9SDan Handley #include <assert.h>
33375f538aSAchin Gupta #include <bl_common.h>
34375f538aSAchin Gupta #include <context_mgmt.h>
353df6012aSDouglas Raillard #include <debug.h>
3697043ac9SDan Handley #include <string.h>
373df6012aSDouglas Raillard #include <tsp.h>
3832f0d3c6SDouglas Raillard #include <utils.h>
3935e98e55SDan Handley #include "tspd_private.h"
40375f538aSAchin Gupta 
41375f538aSAchin Gupta /*******************************************************************************
4250e27dadSVikram Kanigiri  * Given a secure payload entrypoint info pointer, entry point PC, register
4350e27dadSVikram Kanigiri  * width, cpu id & pointer to a context data structure, this function will
4450e27dadSVikram Kanigiri  * initialize tsp context and entry point info for the secure payload
45375f538aSAchin Gupta  ******************************************************************************/
4650e27dadSVikram Kanigiri void tspd_init_tsp_ep_state(struct entry_point_info *tsp_entry_point,
47375f538aSAchin Gupta 				uint32_t rw,
4850e27dadSVikram Kanigiri 				uint64_t pc,
49fb037bfbSDan Handley 				tsp_context_t *tsp_ctx)
50375f538aSAchin Gupta {
51167a9357SAndrew Thoelke 	uint32_t ep_attr;
52375f538aSAchin Gupta 
53375f538aSAchin Gupta 	/* Passing a NULL context is a critical programming error */
54375f538aSAchin Gupta 	assert(tsp_ctx);
5550e27dadSVikram Kanigiri 	assert(tsp_entry_point);
5650e27dadSVikram Kanigiri 	assert(pc);
57375f538aSAchin Gupta 
58375f538aSAchin Gupta 	/*
59375f538aSAchin Gupta 	 * We support AArch64 TSP for now.
60375f538aSAchin Gupta 	 * TODO: Add support for AArch32 TSP
61375f538aSAchin Gupta 	 */
62375f538aSAchin Gupta 	assert(rw == TSP_AARCH64);
63375f538aSAchin Gupta 
64375f538aSAchin Gupta 	/* Associate this context with the cpu specified */
6550e27dadSVikram Kanigiri 	tsp_ctx->mpidr = read_mpidr_el1();
66167a9357SAndrew Thoelke 	tsp_ctx->state = 0;
67167a9357SAndrew Thoelke 	set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
68*16292f54SDavid Cunado 	clr_yield_smc_active_flag(tsp_ctx->state);
69375f538aSAchin Gupta 
7050e27dadSVikram Kanigiri 	cm_set_context(&tsp_ctx->cpu_ctx, SECURE);
71167a9357SAndrew Thoelke 
72167a9357SAndrew Thoelke 	/* initialise an entrypoint to set up the CPU context */
73167a9357SAndrew Thoelke 	ep_attr = SECURE | EP_ST_ENABLE;
74167a9357SAndrew Thoelke 	if (read_sctlr_el3() & SCTLR_EE_BIT)
75167a9357SAndrew Thoelke 		ep_attr |= EP_EE_BIG;
7650e27dadSVikram Kanigiri 	SET_PARAM_HEAD(tsp_entry_point, PARAM_EP, VERSION_1, ep_attr);
77167a9357SAndrew Thoelke 
7850e27dadSVikram Kanigiri 	tsp_entry_point->pc = pc;
7950e27dadSVikram Kanigiri 	tsp_entry_point->spsr = SPSR_64(MODE_EL1,
8050e27dadSVikram Kanigiri 					MODE_SP_ELX,
8150e27dadSVikram Kanigiri 					DISABLE_ALL_EXCEPTIONS);
8232f0d3c6SDouglas Raillard 	zeromem(&tsp_entry_point->args, sizeof(tsp_entry_point->args));
83375f538aSAchin Gupta }
84375f538aSAchin Gupta 
85375f538aSAchin Gupta /*******************************************************************************
86375f538aSAchin Gupta  * This function takes an SP context pointer and:
87375f538aSAchin Gupta  * 1. Applies the S-EL1 system register context from tsp_ctx->cpu_ctx.
88375f538aSAchin Gupta  * 2. Saves the current C runtime state (callee saved registers) on the stack
89375f538aSAchin Gupta  *    frame and saves a reference to this state.
90375f538aSAchin Gupta  * 3. Calls el3_exit() so that the EL3 system and general purpose registers
91375f538aSAchin Gupta  *    from the tsp_ctx->cpu_ctx are used to enter the secure payload image.
92375f538aSAchin Gupta  ******************************************************************************/
93fb037bfbSDan Handley uint64_t tspd_synchronous_sp_entry(tsp_context_t *tsp_ctx)
94375f538aSAchin Gupta {
95375f538aSAchin Gupta 	uint64_t rc;
96375f538aSAchin Gupta 
97d3280bebSJuan Castillo 	assert(tsp_ctx != NULL);
98375f538aSAchin Gupta 	assert(tsp_ctx->c_rt_ctx == 0);
99375f538aSAchin Gupta 
100375f538aSAchin Gupta 	/* Apply the Secure EL1 system register context and switch to it */
10108ab89d3SAndrew Thoelke 	assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
102375f538aSAchin Gupta 	cm_el1_sysregs_context_restore(SECURE);
103375f538aSAchin Gupta 	cm_set_next_eret_context(SECURE);
104375f538aSAchin Gupta 
105375f538aSAchin Gupta 	rc = tspd_enter_sp(&tsp_ctx->c_rt_ctx);
106375f538aSAchin Gupta #if DEBUG
107375f538aSAchin Gupta 	tsp_ctx->c_rt_ctx = 0;
108375f538aSAchin Gupta #endif
109375f538aSAchin Gupta 
110375f538aSAchin Gupta 	return rc;
111375f538aSAchin Gupta }
112375f538aSAchin Gupta 
113375f538aSAchin Gupta 
114375f538aSAchin Gupta /*******************************************************************************
115375f538aSAchin Gupta  * This function takes an SP context pointer and:
116375f538aSAchin Gupta  * 1. Saves the S-EL1 system register context tp tsp_ctx->cpu_ctx.
117375f538aSAchin Gupta  * 2. Restores the current C runtime state (callee saved registers) from the
118375f538aSAchin Gupta  *    stack frame using the reference to this state saved in tspd_enter_sp().
119375f538aSAchin Gupta  * 3. It does not need to save any general purpose or EL3 system register state
120375f538aSAchin Gupta  *    as the generic smc entry routine should have saved those.
121375f538aSAchin Gupta  ******************************************************************************/
122fb037bfbSDan Handley void tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret)
123375f538aSAchin Gupta {
124d3280bebSJuan Castillo 	assert(tsp_ctx != NULL);
125375f538aSAchin Gupta 	/* Save the Secure EL1 system register context */
12608ab89d3SAndrew Thoelke 	assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
127375f538aSAchin Gupta 	cm_el1_sysregs_context_save(SECURE);
128375f538aSAchin Gupta 
129375f538aSAchin Gupta 	assert(tsp_ctx->c_rt_ctx != 0);
130375f538aSAchin Gupta 	tspd_exit_sp(tsp_ctx->c_rt_ctx, ret);
131375f538aSAchin Gupta 
132375f538aSAchin Gupta 	/* Should never reach here */
133375f538aSAchin Gupta 	assert(0);
134375f538aSAchin Gupta }
1353df6012aSDouglas Raillard 
1363df6012aSDouglas Raillard /*******************************************************************************
1373df6012aSDouglas Raillard  * This function takes an SP context pointer and abort any preempted SMC
1383df6012aSDouglas Raillard  * request.
1393df6012aSDouglas Raillard  * Return 1 if there was a preempted SMC request, 0 otherwise.
1403df6012aSDouglas Raillard  ******************************************************************************/
1413df6012aSDouglas Raillard int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx)
1423df6012aSDouglas Raillard {
143*16292f54SDavid Cunado 	if (!get_yield_smc_active_flag(tsp_ctx->state))
1443df6012aSDouglas Raillard 		return 0;
1453df6012aSDouglas Raillard 
1463df6012aSDouglas Raillard 	/* Abort any preempted SMC request */
147*16292f54SDavid Cunado 	clr_yield_smc_active_flag(tsp_ctx->state);
1483df6012aSDouglas Raillard 
1493df6012aSDouglas Raillard 	/*
1503df6012aSDouglas Raillard 	 * Arrange for an entry into the test secure payload. It will
1513df6012aSDouglas Raillard 	 * be returned via TSP_ABORT_DONE case in tspd_smc_handler.
1523df6012aSDouglas Raillard 	 */
1533df6012aSDouglas Raillard 	cm_set_elr_el3(SECURE,
154*16292f54SDavid Cunado 		       (uint64_t) &tsp_vectors->abort_yield_smc_entry);
1553df6012aSDouglas Raillard 	uint64_t rc = tspd_synchronous_sp_entry(tsp_ctx);
1563df6012aSDouglas Raillard 
1573df6012aSDouglas Raillard 	if (rc != 0)
1583df6012aSDouglas Raillard 		panic();
1593df6012aSDouglas Raillard 
1603df6012aSDouglas Raillard 	return 1;
1613df6012aSDouglas Raillard }
1623df6012aSDouglas Raillard 
163