1375f538aSAchin Gupta /* 2375f538aSAchin Gupta * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3375f538aSAchin Gupta * 4375f538aSAchin Gupta * Redistribution and use in source and binary forms, with or without 5375f538aSAchin Gupta * modification, are permitted provided that the following conditions are met: 6375f538aSAchin Gupta * 7375f538aSAchin Gupta * Redistributions of source code must retain the above copyright notice, this 8375f538aSAchin Gupta * list of conditions and the following disclaimer. 9375f538aSAchin Gupta * 10375f538aSAchin Gupta * Redistributions in binary form must reproduce the above copyright notice, 11375f538aSAchin Gupta * this list of conditions and the following disclaimer in the documentation 12375f538aSAchin Gupta * and/or other materials provided with the distribution. 13375f538aSAchin Gupta * 14375f538aSAchin Gupta * Neither the name of ARM nor the names of its contributors may be used 15375f538aSAchin Gupta * to endorse or promote products derived from this software without specific 16375f538aSAchin Gupta * prior written permission. 17375f538aSAchin Gupta * 18375f538aSAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19375f538aSAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20375f538aSAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21375f538aSAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22375f538aSAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23375f538aSAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24375f538aSAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25375f538aSAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26375f538aSAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27375f538aSAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28375f538aSAchin Gupta * POSSIBILITY OF SUCH DAMAGE. 29375f538aSAchin Gupta */ 30375f538aSAchin Gupta 31375f538aSAchin Gupta #include <arch_helpers.h> 3297043ac9SDan Handley #include <assert.h> 33375f538aSAchin Gupta #include <bl_common.h> 34375f538aSAchin Gupta #include <context_mgmt.h> 3597043ac9SDan Handley #include <string.h> 3635e98e55SDan Handley #include "tspd_private.h" 37375f538aSAchin Gupta 38375f538aSAchin Gupta /******************************************************************************* 39*50e27dadSVikram Kanigiri * Given a secure payload entrypoint info pointer, entry point PC, register 40*50e27dadSVikram Kanigiri * width, cpu id & pointer to a context data structure, this function will 41*50e27dadSVikram Kanigiri * initialize tsp context and entry point info for the secure payload 42375f538aSAchin Gupta ******************************************************************************/ 43*50e27dadSVikram Kanigiri void tspd_init_tsp_ep_state(struct entry_point_info *tsp_entry_point, 44375f538aSAchin Gupta uint32_t rw, 45*50e27dadSVikram Kanigiri uint64_t pc, 46fb037bfbSDan Handley tsp_context_t *tsp_ctx) 47375f538aSAchin Gupta { 48167a9357SAndrew Thoelke uint32_t ep_attr; 49375f538aSAchin Gupta 50375f538aSAchin Gupta /* Passing a NULL context is a critical programming error */ 51375f538aSAchin Gupta assert(tsp_ctx); 52*50e27dadSVikram Kanigiri assert(tsp_entry_point); 53*50e27dadSVikram Kanigiri assert(pc); 54375f538aSAchin Gupta 55375f538aSAchin Gupta /* 56375f538aSAchin Gupta * We support AArch64 TSP for now. 57375f538aSAchin Gupta * TODO: Add support for AArch32 TSP 58375f538aSAchin Gupta */ 59375f538aSAchin Gupta assert(rw == TSP_AARCH64); 60375f538aSAchin Gupta 61375f538aSAchin Gupta /* Associate this context with the cpu specified */ 62*50e27dadSVikram Kanigiri tsp_ctx->mpidr = read_mpidr_el1(); 63167a9357SAndrew Thoelke tsp_ctx->state = 0; 64167a9357SAndrew Thoelke set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF); 65167a9357SAndrew Thoelke clr_std_smc_active_flag(tsp_ctx->state); 66375f538aSAchin Gupta 67*50e27dadSVikram Kanigiri cm_set_context(&tsp_ctx->cpu_ctx, SECURE); 68167a9357SAndrew Thoelke 69167a9357SAndrew Thoelke /* initialise an entrypoint to set up the CPU context */ 70167a9357SAndrew Thoelke ep_attr = SECURE | EP_ST_ENABLE; 71167a9357SAndrew Thoelke if (read_sctlr_el3() & SCTLR_EE_BIT) 72167a9357SAndrew Thoelke ep_attr |= EP_EE_BIG; 73*50e27dadSVikram Kanigiri SET_PARAM_HEAD(tsp_entry_point, PARAM_EP, VERSION_1, ep_attr); 74167a9357SAndrew Thoelke 75*50e27dadSVikram Kanigiri tsp_entry_point->pc = pc; 76*50e27dadSVikram Kanigiri tsp_entry_point->spsr = SPSR_64(MODE_EL1, 77*50e27dadSVikram Kanigiri MODE_SP_ELX, 78*50e27dadSVikram Kanigiri DISABLE_ALL_EXCEPTIONS); 79*50e27dadSVikram Kanigiri memset(&tsp_entry_point->args, 0, sizeof(tsp_entry_point->args)); 80375f538aSAchin Gupta } 81375f538aSAchin Gupta 82375f538aSAchin Gupta /******************************************************************************* 83375f538aSAchin Gupta * This function takes an SP context pointer and: 84375f538aSAchin Gupta * 1. Applies the S-EL1 system register context from tsp_ctx->cpu_ctx. 85375f538aSAchin Gupta * 2. Saves the current C runtime state (callee saved registers) on the stack 86375f538aSAchin Gupta * frame and saves a reference to this state. 87375f538aSAchin Gupta * 3. Calls el3_exit() so that the EL3 system and general purpose registers 88375f538aSAchin Gupta * from the tsp_ctx->cpu_ctx are used to enter the secure payload image. 89375f538aSAchin Gupta ******************************************************************************/ 90fb037bfbSDan Handley uint64_t tspd_synchronous_sp_entry(tsp_context_t *tsp_ctx) 91375f538aSAchin Gupta { 92375f538aSAchin Gupta uint64_t rc; 93375f538aSAchin Gupta 94d3280bebSJuan Castillo assert(tsp_ctx != NULL); 95375f538aSAchin Gupta assert(tsp_ctx->c_rt_ctx == 0); 96375f538aSAchin Gupta 97375f538aSAchin Gupta /* Apply the Secure EL1 system register context and switch to it */ 9808ab89d3SAndrew Thoelke assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx); 99375f538aSAchin Gupta cm_el1_sysregs_context_restore(SECURE); 100375f538aSAchin Gupta cm_set_next_eret_context(SECURE); 101375f538aSAchin Gupta 102375f538aSAchin Gupta rc = tspd_enter_sp(&tsp_ctx->c_rt_ctx); 103375f538aSAchin Gupta #if DEBUG 104375f538aSAchin Gupta tsp_ctx->c_rt_ctx = 0; 105375f538aSAchin Gupta #endif 106375f538aSAchin Gupta 107375f538aSAchin Gupta return rc; 108375f538aSAchin Gupta } 109375f538aSAchin Gupta 110375f538aSAchin Gupta 111375f538aSAchin Gupta /******************************************************************************* 112375f538aSAchin Gupta * This function takes an SP context pointer and: 113375f538aSAchin Gupta * 1. Saves the S-EL1 system register context tp tsp_ctx->cpu_ctx. 114375f538aSAchin Gupta * 2. Restores the current C runtime state (callee saved registers) from the 115375f538aSAchin Gupta * stack frame using the reference to this state saved in tspd_enter_sp(). 116375f538aSAchin Gupta * 3. It does not need to save any general purpose or EL3 system register state 117375f538aSAchin Gupta * as the generic smc entry routine should have saved those. 118375f538aSAchin Gupta ******************************************************************************/ 119fb037bfbSDan Handley void tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret) 120375f538aSAchin Gupta { 121d3280bebSJuan Castillo assert(tsp_ctx != NULL); 122375f538aSAchin Gupta /* Save the Secure EL1 system register context */ 12308ab89d3SAndrew Thoelke assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx); 124375f538aSAchin Gupta cm_el1_sysregs_context_save(SECURE); 125375f538aSAchin Gupta 126375f538aSAchin Gupta assert(tsp_ctx->c_rt_ctx != 0); 127375f538aSAchin Gupta tspd_exit_sp(tsp_ctx->c_rt_ctx, ret); 128375f538aSAchin Gupta 129375f538aSAchin Gupta /* Should never reach here */ 130375f538aSAchin Gupta assert(0); 131375f538aSAchin Gupta } 132