1375f538aSAchin Gupta /* 2375f538aSAchin Gupta * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3375f538aSAchin Gupta * 4375f538aSAchin Gupta * Redistribution and use in source and binary forms, with or without 5375f538aSAchin Gupta * modification, are permitted provided that the following conditions are met: 6375f538aSAchin Gupta * 7375f538aSAchin Gupta * Redistributions of source code must retain the above copyright notice, this 8375f538aSAchin Gupta * list of conditions and the following disclaimer. 9375f538aSAchin Gupta * 10375f538aSAchin Gupta * Redistributions in binary form must reproduce the above copyright notice, 11375f538aSAchin Gupta * this list of conditions and the following disclaimer in the documentation 12375f538aSAchin Gupta * and/or other materials provided with the distribution. 13375f538aSAchin Gupta * 14375f538aSAchin Gupta * Neither the name of ARM nor the names of its contributors may be used 15375f538aSAchin Gupta * to endorse or promote products derived from this software without specific 16375f538aSAchin Gupta * prior written permission. 17375f538aSAchin Gupta * 18375f538aSAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19375f538aSAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20375f538aSAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21375f538aSAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22375f538aSAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23375f538aSAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24375f538aSAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25375f538aSAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26375f538aSAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27375f538aSAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28375f538aSAchin Gupta * POSSIBILITY OF SUCH DAMAGE. 29375f538aSAchin Gupta */ 30375f538aSAchin Gupta 31375f538aSAchin Gupta 32375f538aSAchin Gupta /******************************************************************************* 33375f538aSAchin Gupta * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a 34375f538aSAchin Gupta * plug-in component to the Secure Monitor, registered as a runtime service. The 35375f538aSAchin Gupta * SPD is expected to be a functional extension of the Secure Payload (SP) that 36375f538aSAchin Gupta * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting 37375f538aSAchin Gupta * the Trusted OS/Applications range to the dispatcher. The SPD will either 38375f538aSAchin Gupta * handle the request locally or delegate it to the Secure Payload. It is also 39375f538aSAchin Gupta * responsible for initialising and maintaining communication with the SP. 40375f538aSAchin Gupta ******************************************************************************/ 41375f538aSAchin Gupta #include <stdio.h> 42375f538aSAchin Gupta #include <string.h> 43375f538aSAchin Gupta #include <assert.h> 44375f538aSAchin Gupta #include <arch_helpers.h> 45375f538aSAchin Gupta #include <console.h> 46375f538aSAchin Gupta #include <platform.h> 47375f538aSAchin Gupta #include <psci_private.h> 48375f538aSAchin Gupta #include <context_mgmt.h> 49375f538aSAchin Gupta #include <runtime_svc.h> 50375f538aSAchin Gupta #include <bl31.h> 51375f538aSAchin Gupta #include <tsp.h> 52375f538aSAchin Gupta #include <psci.h> 53375f538aSAchin Gupta #include <tspd_private.h> 54375f538aSAchin Gupta #include <debug.h> 55375f538aSAchin Gupta 56375f538aSAchin Gupta /******************************************************************************* 57375f538aSAchin Gupta * Single structure to hold information about the various entry points into the 58375f538aSAchin Gupta * Secure Payload. It is initialised once on the primary core after a cold boot. 59375f538aSAchin Gupta ******************************************************************************/ 60375f538aSAchin Gupta entry_info *tsp_entry_info; 61375f538aSAchin Gupta 62375f538aSAchin Gupta /******************************************************************************* 63375f538aSAchin Gupta * Array to keep track of per-cpu Secure Payload state 64375f538aSAchin Gupta ******************************************************************************/ 65375f538aSAchin Gupta tsp_context tspd_sp_context[TSPD_CORE_COUNT]; 66375f538aSAchin Gupta 67375f538aSAchin Gupta /******************************************************************************* 68375f538aSAchin Gupta * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type 69375f538aSAchin Gupta * (aarch32/aarch64) if not already known and initialises the context for entry 70375f538aSAchin Gupta * into the SP for its initialisation. 71375f538aSAchin Gupta ******************************************************************************/ 72375f538aSAchin Gupta int32_t tspd_setup(void) 73375f538aSAchin Gupta { 74375f538aSAchin Gupta el_change_info *image_info; 75375f538aSAchin Gupta int32_t rc; 76375f538aSAchin Gupta uint64_t mpidr = read_mpidr(); 77375f538aSAchin Gupta uint32_t linear_id; 78375f538aSAchin Gupta 79375f538aSAchin Gupta linear_id = platform_get_core_pos(mpidr); 80375f538aSAchin Gupta 81375f538aSAchin Gupta /* 82375f538aSAchin Gupta * Get information about the Secure Payload (BL32) image. Its 83375f538aSAchin Gupta * absence is a critical failure. TODO: Add support to 84375f538aSAchin Gupta * conditionally include the SPD service 85375f538aSAchin Gupta */ 86375f538aSAchin Gupta image_info = bl31_get_next_image_info(SECURE); 87375f538aSAchin Gupta assert(image_info); 88375f538aSAchin Gupta 89375f538aSAchin Gupta /* 90375f538aSAchin Gupta * We could inspect the SP image and determine it's execution 91375f538aSAchin Gupta * state i.e whether AArch32 or AArch64. Assuming it's AArch64 92375f538aSAchin Gupta * for the time being. 93375f538aSAchin Gupta */ 94375f538aSAchin Gupta rc = tspd_init_secure_context(image_info->entrypoint, 95375f538aSAchin Gupta TSP_AARCH64, 96375f538aSAchin Gupta mpidr, 97375f538aSAchin Gupta &tspd_sp_context[linear_id]); 98375f538aSAchin Gupta assert(rc == 0); 99375f538aSAchin Gupta 100375f538aSAchin Gupta return rc; 101375f538aSAchin Gupta } 102375f538aSAchin Gupta 103375f538aSAchin Gupta /******************************************************************************* 104375f538aSAchin Gupta * This function passes control to the Secure Payload image (BL32) for the first 105375f538aSAchin Gupta * time on the primary cpu after a cold boot. It assumes that a valid secure 106375f538aSAchin Gupta * context has already been created by tspd_setup() which can be directly used. 107375f538aSAchin Gupta * It also assumes that a valid non-secure context has been initialised by PSCI 108375f538aSAchin Gupta * so it does not need to save and restore any non-secure state. This function 109375f538aSAchin Gupta * performs a synchronous entry into the Secure payload. The SP passes control 110375f538aSAchin Gupta * back to this routine through a SMC. It also passes the extents of memory made 111375f538aSAchin Gupta * available to BL32 by BL31. 112375f538aSAchin Gupta ******************************************************************************/ 113375f538aSAchin Gupta int32_t bl32_init(meminfo *bl32_meminfo) 114375f538aSAchin Gupta { 115375f538aSAchin Gupta uint64_t mpidr = read_mpidr(); 116375f538aSAchin Gupta uint32_t linear_id = platform_get_core_pos(mpidr); 117375f538aSAchin Gupta uint64_t rc; 118375f538aSAchin Gupta tsp_context *tsp_ctx = &tspd_sp_context[linear_id]; 119375f538aSAchin Gupta 120375f538aSAchin Gupta /* 121375f538aSAchin Gupta * Arrange for passing a pointer to the meminfo structure 122375f538aSAchin Gupta * describing the memory extents available to the secure 123375f538aSAchin Gupta * payload. 124375f538aSAchin Gupta * TODO: We are passing a pointer to BL31 internal memory 125375f538aSAchin Gupta * whereas this structure should be copied to a communication 126375f538aSAchin Gupta * buffer between the SP and SPD. 127375f538aSAchin Gupta */ 128375f538aSAchin Gupta write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), 129375f538aSAchin Gupta CTX_GPREG_X0, 130375f538aSAchin Gupta (uint64_t) bl32_meminfo); 131375f538aSAchin Gupta 132*607084eeSAchin Gupta /* 133*607084eeSAchin Gupta * Arrange for an entry into the test secure payload. We expect an array 134*607084eeSAchin Gupta * of vectors in return 135*607084eeSAchin Gupta */ 136375f538aSAchin Gupta rc = tspd_synchronous_sp_entry(tsp_ctx); 137375f538aSAchin Gupta assert(rc != 0); 138375f538aSAchin Gupta if (rc) 139375f538aSAchin Gupta tsp_ctx->state = TSP_STATE_ON; 140375f538aSAchin Gupta 141375f538aSAchin Gupta return rc; 142375f538aSAchin Gupta } 143375f538aSAchin Gupta 144375f538aSAchin Gupta /******************************************************************************* 145375f538aSAchin Gupta * This function is responsible for handling all SMCs in the Trusted OS/App 146375f538aSAchin Gupta * range from the non-secure state as defined in the SMC Calling Convention 147375f538aSAchin Gupta * Document. It is also responsible for communicating with the Secure payload 148375f538aSAchin Gupta * to delegate work and return results back to the non-secure state. Lastly it 149375f538aSAchin Gupta * will also return any information that the secure payload needs to do the 150375f538aSAchin Gupta * work assigned to it. 151375f538aSAchin Gupta ******************************************************************************/ 152375f538aSAchin Gupta uint64_t tspd_smc_handler(uint32_t smc_fid, 153375f538aSAchin Gupta uint64_t x1, 154375f538aSAchin Gupta uint64_t x2, 155375f538aSAchin Gupta uint64_t x3, 156375f538aSAchin Gupta uint64_t x4, 157375f538aSAchin Gupta void *cookie, 158375f538aSAchin Gupta void *handle, 159375f538aSAchin Gupta uint64_t flags) 160375f538aSAchin Gupta { 161375f538aSAchin Gupta unsigned long mpidr = read_mpidr(); 162375f538aSAchin Gupta uint32_t linear_id = platform_get_core_pos(mpidr), ns; 163375f538aSAchin Gupta 164375f538aSAchin Gupta /* Determine which security state this SMC originated from */ 165375f538aSAchin Gupta ns = is_caller_non_secure(flags); 166375f538aSAchin Gupta 167375f538aSAchin Gupta switch (smc_fid) { 168375f538aSAchin Gupta 169375f538aSAchin Gupta /* 170375f538aSAchin Gupta * This function ID is used only by the SP to indicate it has 171375f538aSAchin Gupta * finished initialising itself after a cold boot 172375f538aSAchin Gupta */ 173375f538aSAchin Gupta case TSP_ENTRY_DONE: 174375f538aSAchin Gupta if (ns) 175375f538aSAchin Gupta SMC_RET1(handle, SMC_UNK); 176375f538aSAchin Gupta 177375f538aSAchin Gupta /* 178375f538aSAchin Gupta * Stash the SP entry points information. This is done 179375f538aSAchin Gupta * only once on the primary cpu 180375f538aSAchin Gupta */ 181375f538aSAchin Gupta assert(tsp_entry_info == NULL); 182375f538aSAchin Gupta tsp_entry_info = (entry_info *) x1; 183375f538aSAchin Gupta 184375f538aSAchin Gupta /* 185375f538aSAchin Gupta * SP reports completion. The SPD must have initiated 186375f538aSAchin Gupta * the original request through a synchronous entry 187375f538aSAchin Gupta * into the SP. Jump back to the original C runtime 188375f538aSAchin Gupta * context. 189375f538aSAchin Gupta */ 190375f538aSAchin Gupta tspd_synchronous_sp_exit(&tspd_sp_context[linear_id], x1); 191375f538aSAchin Gupta 192375f538aSAchin Gupta /* Should never reach here */ 193375f538aSAchin Gupta assert(0); 194375f538aSAchin Gupta 195*607084eeSAchin Gupta /* 196*607084eeSAchin Gupta * These function IDs is used only by the SP to indicate it has 197*607084eeSAchin Gupta * finished: 198*607084eeSAchin Gupta * 1. turning itself on in response to an earlier psci 199*607084eeSAchin Gupta * cpu_on request 200*607084eeSAchin Gupta * 2. resuming itself after an earlier psci cpu_suspend 201*607084eeSAchin Gupta * request. 202*607084eeSAchin Gupta */ 203*607084eeSAchin Gupta case TSP_ON_DONE: 204*607084eeSAchin Gupta case TSP_RESUME_DONE: 205*607084eeSAchin Gupta 206*607084eeSAchin Gupta /* 207*607084eeSAchin Gupta * These function IDs is used only by the SP to indicate it has 208*607084eeSAchin Gupta * finished: 209*607084eeSAchin Gupta * 1. suspending itself after an earlier psci cpu_suspend 210*607084eeSAchin Gupta * request. 211*607084eeSAchin Gupta * 2. turning itself off in response to an earlier psci 212*607084eeSAchin Gupta * cpu_off request. 213*607084eeSAchin Gupta */ 214*607084eeSAchin Gupta case TSP_OFF_DONE: 215*607084eeSAchin Gupta case TSP_SUSPEND_DONE: 216*607084eeSAchin Gupta if (ns) 217*607084eeSAchin Gupta SMC_RET1(handle, SMC_UNK); 218*607084eeSAchin Gupta 219*607084eeSAchin Gupta /* 220*607084eeSAchin Gupta * SP reports completion. The SPD must have initiated the 221*607084eeSAchin Gupta * original request through a synchronous entry into the SP. 222*607084eeSAchin Gupta * Jump back to the original C runtime context, and pass x1 as 223*607084eeSAchin Gupta * return value to the caller 224*607084eeSAchin Gupta */ 225*607084eeSAchin Gupta tspd_synchronous_sp_exit(&tspd_sp_context[linear_id], x1); 226*607084eeSAchin Gupta 227*607084eeSAchin Gupta /* Should never reach here */ 228*607084eeSAchin Gupta assert(0); 229*607084eeSAchin Gupta 230375f538aSAchin Gupta default: 231*607084eeSAchin Gupta break; 232375f538aSAchin Gupta } 233375f538aSAchin Gupta 234*607084eeSAchin Gupta SMC_RET1(handle, SMC_UNK); 235375f538aSAchin Gupta } 236375f538aSAchin Gupta 237375f538aSAchin Gupta /* Define a SPD runtime service descriptor */ 238375f538aSAchin Gupta DECLARE_RT_SVC( 239375f538aSAchin Gupta spd, 240375f538aSAchin Gupta 241375f538aSAchin Gupta OEN_TOS_START, 242375f538aSAchin Gupta OEN_TOS_END, 243375f538aSAchin Gupta SMC_TYPE_FAST, 244375f538aSAchin Gupta tspd_setup, 245375f538aSAchin Gupta tspd_smc_handler 246375f538aSAchin Gupta ); 247