xref: /rk3399_ARM-atf/services/spd/tspd/tspd_common.c (revision fb037bfb7cbf7b404c069b4ebac5a10059d948b1)
1375f538aSAchin Gupta /*
2375f538aSAchin Gupta  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3375f538aSAchin Gupta  *
4375f538aSAchin Gupta  * Redistribution and use in source and binary forms, with or without
5375f538aSAchin Gupta  * modification, are permitted provided that the following conditions are met:
6375f538aSAchin Gupta  *
7375f538aSAchin Gupta  * Redistributions of source code must retain the above copyright notice, this
8375f538aSAchin Gupta  * list of conditions and the following disclaimer.
9375f538aSAchin Gupta  *
10375f538aSAchin Gupta  * Redistributions in binary form must reproduce the above copyright notice,
11375f538aSAchin Gupta  * this list of conditions and the following disclaimer in the documentation
12375f538aSAchin Gupta  * and/or other materials provided with the distribution.
13375f538aSAchin Gupta  *
14375f538aSAchin Gupta  * Neither the name of ARM nor the names of its contributors may be used
15375f538aSAchin Gupta  * to endorse or promote products derived from this software without specific
16375f538aSAchin Gupta  * prior written permission.
17375f538aSAchin Gupta  *
18375f538aSAchin Gupta  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19375f538aSAchin Gupta  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20375f538aSAchin Gupta  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21375f538aSAchin Gupta  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22375f538aSAchin Gupta  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23375f538aSAchin Gupta  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24375f538aSAchin Gupta  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25375f538aSAchin Gupta  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26375f538aSAchin Gupta  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27375f538aSAchin Gupta  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28375f538aSAchin Gupta  * POSSIBILITY OF SUCH DAMAGE.
29375f538aSAchin Gupta  */
30375f538aSAchin Gupta 
31375f538aSAchin Gupta #include <stdio.h>
32375f538aSAchin Gupta #include <errno.h>
33375f538aSAchin Gupta #include <string.h>
34375f538aSAchin Gupta #include <assert.h>
35375f538aSAchin Gupta #include <arch_helpers.h>
36375f538aSAchin Gupta #include <platform.h>
37375f538aSAchin Gupta #include <bl_common.h>
38375f538aSAchin Gupta #include <runtime_svc.h>
39375f538aSAchin Gupta #include <context_mgmt.h>
4035e98e55SDan Handley #include "tspd_private.h"
41375f538aSAchin Gupta 
42375f538aSAchin Gupta /*******************************************************************************
43375f538aSAchin Gupta  * Given a secure payload entrypoint, register width, cpu id & pointer to a
44375f538aSAchin Gupta  * context data structure, this function will create a secure context ready for
45375f538aSAchin Gupta  * programming an entry into the secure payload.
46375f538aSAchin Gupta  ******************************************************************************/
47375f538aSAchin Gupta int32_t tspd_init_secure_context(uint64_t entrypoint,
48375f538aSAchin Gupta 				uint32_t rw,
49375f538aSAchin Gupta 				uint64_t mpidr,
50*fb037bfbSDan Handley 				tsp_context_t *tsp_ctx)
51375f538aSAchin Gupta {
5231526cb0SVikram Kanigiri 	uint32_t scr, sctlr;
53*fb037bfbSDan Handley 	el1_sys_regs_t *el1_state;
54375f538aSAchin Gupta 	uint32_t spsr;
55375f538aSAchin Gupta 
56375f538aSAchin Gupta 	/* Passing a NULL context is a critical programming error */
57375f538aSAchin Gupta 	assert(tsp_ctx);
58375f538aSAchin Gupta 
59375f538aSAchin Gupta 	/*
60375f538aSAchin Gupta 	 * We support AArch64 TSP for now.
61375f538aSAchin Gupta 	 * TODO: Add support for AArch32 TSP
62375f538aSAchin Gupta 	 */
63375f538aSAchin Gupta 	assert(rw == TSP_AARCH64);
64375f538aSAchin Gupta 
65375f538aSAchin Gupta 	/*
66375f538aSAchin Gupta 	 * This might look redundant if the context was statically
67375f538aSAchin Gupta 	 * allocated but this function cannot make that assumption.
68375f538aSAchin Gupta 	 */
69375f538aSAchin Gupta 	memset(tsp_ctx, 0, sizeof(*tsp_ctx));
70375f538aSAchin Gupta 
71375f538aSAchin Gupta 	/* Set the right security state and register width for the SP */
7231526cb0SVikram Kanigiri 	scr = read_scr();
73375f538aSAchin Gupta 	scr &= ~SCR_NS_BIT;
74375f538aSAchin Gupta 	scr &= ~SCR_RW_BIT;
75375f538aSAchin Gupta 	if (rw == TSP_AARCH64)
76375f538aSAchin Gupta 		scr |= SCR_RW_BIT;
77375f538aSAchin Gupta 
78375f538aSAchin Gupta 	/* Get a pointer to the S-EL1 context memory */
79375f538aSAchin Gupta 	el1_state = get_sysregs_ctx(&tsp_ctx->cpu_ctx);
80375f538aSAchin Gupta 
81375f538aSAchin Gupta 	/*
8231526cb0SVikram Kanigiri 	 * Program the SCTLR_EL1 such that upon entry in S-EL1, caches and MMU are
8331526cb0SVikram Kanigiri 	 * disabled and exception endianess is set to be the same as EL3
84375f538aSAchin Gupta 	 */
852eb01d34SAchin Gupta 	sctlr = read_sctlr_el3();
86375f538aSAchin Gupta 	sctlr &= SCTLR_EE_BIT;
87375f538aSAchin Gupta 	sctlr |= SCTLR_EL1_RES1;
88375f538aSAchin Gupta 	write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr);
89375f538aSAchin Gupta 
90375f538aSAchin Gupta 	/* Set this context as ready to be initialised i.e OFF */
91375f538aSAchin Gupta 	tsp_ctx->state = TSP_STATE_OFF;
92375f538aSAchin Gupta 
93375f538aSAchin Gupta 	/* Associate this context with the cpu specified */
94375f538aSAchin Gupta 	tsp_ctx->mpidr = mpidr;
95375f538aSAchin Gupta 
96375f538aSAchin Gupta 	cm_set_context(mpidr, &tsp_ctx->cpu_ctx, SECURE);
97375f538aSAchin Gupta 	spsr = make_spsr(MODE_EL1, MODE_SP_ELX, rw);
98375f538aSAchin Gupta 	cm_set_el3_eret_context(SECURE, entrypoint, spsr, scr);
99375f538aSAchin Gupta 
100375f538aSAchin Gupta 	cm_init_exception_stack(mpidr, SECURE);
101375f538aSAchin Gupta 
102375f538aSAchin Gupta 	return 0;
103375f538aSAchin Gupta }
104375f538aSAchin Gupta 
105375f538aSAchin Gupta /*******************************************************************************
106375f538aSAchin Gupta  * This function takes an SP context pointer and:
107375f538aSAchin Gupta  * 1. Applies the S-EL1 system register context from tsp_ctx->cpu_ctx.
108375f538aSAchin Gupta  * 2. Saves the current C runtime state (callee saved registers) on the stack
109375f538aSAchin Gupta  *    frame and saves a reference to this state.
110375f538aSAchin Gupta  * 3. Calls el3_exit() so that the EL3 system and general purpose registers
111375f538aSAchin Gupta  *    from the tsp_ctx->cpu_ctx are used to enter the secure payload image.
112375f538aSAchin Gupta  ******************************************************************************/
113*fb037bfbSDan Handley uint64_t tspd_synchronous_sp_entry(tsp_context_t *tsp_ctx)
114375f538aSAchin Gupta {
115375f538aSAchin Gupta 	uint64_t rc;
116375f538aSAchin Gupta 
117375f538aSAchin Gupta 	assert(tsp_ctx->c_rt_ctx == 0);
118375f538aSAchin Gupta 
119375f538aSAchin Gupta 	/* Apply the Secure EL1 system register context and switch to it */
120375f538aSAchin Gupta 	assert(cm_get_context(read_mpidr(), SECURE) == &tsp_ctx->cpu_ctx);
121375f538aSAchin Gupta 	cm_el1_sysregs_context_restore(SECURE);
122375f538aSAchin Gupta 	cm_set_next_eret_context(SECURE);
123375f538aSAchin Gupta 
124375f538aSAchin Gupta 	rc = tspd_enter_sp(&tsp_ctx->c_rt_ctx);
125375f538aSAchin Gupta #if DEBUG
126375f538aSAchin Gupta 	tsp_ctx->c_rt_ctx = 0;
127375f538aSAchin Gupta #endif
128375f538aSAchin Gupta 
129375f538aSAchin Gupta 	return rc;
130375f538aSAchin Gupta }
131375f538aSAchin Gupta 
132375f538aSAchin Gupta 
133375f538aSAchin Gupta /*******************************************************************************
134375f538aSAchin Gupta  * This function takes an SP context pointer and:
135375f538aSAchin Gupta  * 1. Saves the S-EL1 system register context tp tsp_ctx->cpu_ctx.
136375f538aSAchin Gupta  * 2. Restores the current C runtime state (callee saved registers) from the
137375f538aSAchin Gupta  *    stack frame using the reference to this state saved in tspd_enter_sp().
138375f538aSAchin Gupta  * 3. It does not need to save any general purpose or EL3 system register state
139375f538aSAchin Gupta  *    as the generic smc entry routine should have saved those.
140375f538aSAchin Gupta  ******************************************************************************/
141*fb037bfbSDan Handley void tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret)
142375f538aSAchin Gupta {
143375f538aSAchin Gupta 	/* Save the Secure EL1 system register context */
144375f538aSAchin Gupta 	assert(cm_get_context(read_mpidr(), SECURE) == &tsp_ctx->cpu_ctx);
145375f538aSAchin Gupta 	cm_el1_sysregs_context_save(SECURE);
146375f538aSAchin Gupta 
147375f538aSAchin Gupta 	assert(tsp_ctx->c_rt_ctx != 0);
148375f538aSAchin Gupta 	tspd_exit_sp(tsp_ctx->c_rt_ctx, ret);
149375f538aSAchin Gupta 
150375f538aSAchin Gupta 	/* Should never reach here */
151375f538aSAchin Gupta 	assert(0);
152375f538aSAchin Gupta }
153