1375f538aSAchin Gupta /* 2375f538aSAchin Gupta * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3375f538aSAchin Gupta * 4375f538aSAchin Gupta * Redistribution and use in source and binary forms, with or without 5375f538aSAchin Gupta * modification, are permitted provided that the following conditions are met: 6375f538aSAchin Gupta * 7375f538aSAchin Gupta * Redistributions of source code must retain the above copyright notice, this 8375f538aSAchin Gupta * list of conditions and the following disclaimer. 9375f538aSAchin Gupta * 10375f538aSAchin Gupta * Redistributions in binary form must reproduce the above copyright notice, 11375f538aSAchin Gupta * this list of conditions and the following disclaimer in the documentation 12375f538aSAchin Gupta * and/or other materials provided with the distribution. 13375f538aSAchin Gupta * 14375f538aSAchin Gupta * Neither the name of ARM nor the names of its contributors may be used 15375f538aSAchin Gupta * to endorse or promote products derived from this software without specific 16375f538aSAchin Gupta * prior written permission. 17375f538aSAchin Gupta * 18375f538aSAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19375f538aSAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20375f538aSAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21375f538aSAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22375f538aSAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23375f538aSAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24375f538aSAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25375f538aSAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26375f538aSAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27375f538aSAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28375f538aSAchin Gupta * POSSIBILITY OF SUCH DAMAGE. 29375f538aSAchin Gupta */ 30375f538aSAchin Gupta 31375f538aSAchin Gupta 32375f538aSAchin Gupta /******************************************************************************* 33375f538aSAchin Gupta * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a 34375f538aSAchin Gupta * plug-in component to the Secure Monitor, registered as a runtime service. The 35375f538aSAchin Gupta * SPD is expected to be a functional extension of the Secure Payload (SP) that 36375f538aSAchin Gupta * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting 37375f538aSAchin Gupta * the Trusted OS/Applications range to the dispatcher. The SPD will either 38375f538aSAchin Gupta * handle the request locally or delegate it to the Secure Payload. It is also 39375f538aSAchin Gupta * responsible for initialising and maintaining communication with the SP. 40375f538aSAchin Gupta ******************************************************************************/ 41375f538aSAchin Gupta #include <arch_helpers.h> 4297043ac9SDan Handley #include <assert.h> 4397043ac9SDan Handley #include <bl_common.h> 4497043ac9SDan Handley #include <bl31.h> 45375f538aSAchin Gupta #include <context_mgmt.h> 46375f538aSAchin Gupta #include <runtime_svc.h> 4797043ac9SDan Handley #include <stddef.h> 48375f538aSAchin Gupta #include <tsp.h> 4952538b9bSJeenu Viswambharan #include <uuid.h> 5035e98e55SDan Handley #include "tspd_private.h" 51375f538aSAchin Gupta 52375f538aSAchin Gupta /******************************************************************************* 53375f538aSAchin Gupta * Single structure to hold information about the various entry points into the 54375f538aSAchin Gupta * Secure Payload. It is initialised once on the primary core after a cold boot. 55375f538aSAchin Gupta ******************************************************************************/ 56fb037bfbSDan Handley entry_info_t *tsp_entry_info; 57375f538aSAchin Gupta 58375f538aSAchin Gupta /******************************************************************************* 59375f538aSAchin Gupta * Array to keep track of per-cpu Secure Payload state 60375f538aSAchin Gupta ******************************************************************************/ 61fb037bfbSDan Handley tsp_context_t tspd_sp_context[TSPD_CORE_COUNT]; 62375f538aSAchin Gupta 637f366605SJeenu Viswambharan 6452538b9bSJeenu Viswambharan /* TSP UID */ 6552538b9bSJeenu Viswambharan DEFINE_SVC_UUID(tsp_uuid, 6652538b9bSJeenu Viswambharan 0x5b3056a0, 0x3291, 0x427b, 0x98, 0x11, 6752538b9bSJeenu Viswambharan 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa); 6852538b9bSJeenu Viswambharan 69*6871c5d3SVikram Kanigiri int32_t tspd_init(void); 707f366605SJeenu Viswambharan 717f366605SJeenu Viswambharan 72375f538aSAchin Gupta /******************************************************************************* 73375f538aSAchin Gupta * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type 74375f538aSAchin Gupta * (aarch32/aarch64) if not already known and initialises the context for entry 75375f538aSAchin Gupta * into the SP for its initialisation. 76375f538aSAchin Gupta ******************************************************************************/ 77375f538aSAchin Gupta int32_t tspd_setup(void) 78375f538aSAchin Gupta { 794112bfa0SVikram Kanigiri entry_point_info_t *image_info; 80375f538aSAchin Gupta int32_t rc; 81375f538aSAchin Gupta uint64_t mpidr = read_mpidr(); 82375f538aSAchin Gupta uint32_t linear_id; 83375f538aSAchin Gupta 84375f538aSAchin Gupta linear_id = platform_get_core_pos(mpidr); 85375f538aSAchin Gupta 86375f538aSAchin Gupta /* 87375f538aSAchin Gupta * Get information about the Secure Payload (BL32) image. Its 88375f538aSAchin Gupta * absence is a critical failure. TODO: Add support to 89375f538aSAchin Gupta * conditionally include the SPD service 90375f538aSAchin Gupta */ 91375f538aSAchin Gupta image_info = bl31_get_next_image_info(SECURE); 92375f538aSAchin Gupta assert(image_info); 93375f538aSAchin Gupta 94375f538aSAchin Gupta /* 957f366605SJeenu Viswambharan * If there's no valid entry point for SP, we return a non-zero value 967f366605SJeenu Viswambharan * signalling failure initializing the service. We bail out without 977f366605SJeenu Viswambharan * registering any handlers 987f366605SJeenu Viswambharan */ 994112bfa0SVikram Kanigiri if (!image_info->pc) 1007f366605SJeenu Viswambharan return 1; 1017f366605SJeenu Viswambharan 1027f366605SJeenu Viswambharan /* 103375f538aSAchin Gupta * We could inspect the SP image and determine it's execution 104375f538aSAchin Gupta * state i.e whether AArch32 or AArch64. Assuming it's AArch64 105375f538aSAchin Gupta * for the time being. 106375f538aSAchin Gupta */ 1074112bfa0SVikram Kanigiri rc = tspd_init_secure_context(image_info->pc, 108375f538aSAchin Gupta TSP_AARCH64, 109375f538aSAchin Gupta mpidr, 110375f538aSAchin Gupta &tspd_sp_context[linear_id]); 111375f538aSAchin Gupta assert(rc == 0); 112375f538aSAchin Gupta 1137f366605SJeenu Viswambharan /* 1147f366605SJeenu Viswambharan * All TSPD initialization done. Now register our init function with 1157f366605SJeenu Viswambharan * BL31 for deferred invocation 1167f366605SJeenu Viswambharan */ 1177f366605SJeenu Viswambharan bl31_register_bl32_init(&tspd_init); 1187f366605SJeenu Viswambharan 119375f538aSAchin Gupta return rc; 120375f538aSAchin Gupta } 121375f538aSAchin Gupta 122375f538aSAchin Gupta /******************************************************************************* 123375f538aSAchin Gupta * This function passes control to the Secure Payload image (BL32) for the first 124375f538aSAchin Gupta * time on the primary cpu after a cold boot. It assumes that a valid secure 125375f538aSAchin Gupta * context has already been created by tspd_setup() which can be directly used. 126375f538aSAchin Gupta * It also assumes that a valid non-secure context has been initialised by PSCI 127375f538aSAchin Gupta * so it does not need to save and restore any non-secure state. This function 128375f538aSAchin Gupta * performs a synchronous entry into the Secure payload. The SP passes control 129*6871c5d3SVikram Kanigiri * back to this routine through a SMC. 130375f538aSAchin Gupta ******************************************************************************/ 131*6871c5d3SVikram Kanigiri int32_t tspd_init(void) 132375f538aSAchin Gupta { 133375f538aSAchin Gupta uint64_t mpidr = read_mpidr(); 134375f538aSAchin Gupta uint32_t linear_id = platform_get_core_pos(mpidr); 135375f538aSAchin Gupta uint64_t rc; 136fb037bfbSDan Handley tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 137375f538aSAchin Gupta 138375f538aSAchin Gupta /* 139607084eeSAchin Gupta * Arrange for an entry into the test secure payload. We expect an array 140607084eeSAchin Gupta * of vectors in return 141607084eeSAchin Gupta */ 142375f538aSAchin Gupta rc = tspd_synchronous_sp_entry(tsp_ctx); 143375f538aSAchin Gupta assert(rc != 0); 1447f366605SJeenu Viswambharan if (rc) { 145375f538aSAchin Gupta tsp_ctx->state = TSP_STATE_ON; 146375f538aSAchin Gupta 1477f366605SJeenu Viswambharan /* 1487f366605SJeenu Viswambharan * TSP has been successfully initialized. Register power 1497f366605SJeenu Viswambharan * managemnt hooks with PSCI 1507f366605SJeenu Viswambharan */ 1517f366605SJeenu Viswambharan psci_register_spd_pm_hook(&tspd_pm); 1527f366605SJeenu Viswambharan } 1537f366605SJeenu Viswambharan 154375f538aSAchin Gupta return rc; 155375f538aSAchin Gupta } 156375f538aSAchin Gupta 1577f366605SJeenu Viswambharan 158375f538aSAchin Gupta /******************************************************************************* 159375f538aSAchin Gupta * This function is responsible for handling all SMCs in the Trusted OS/App 160375f538aSAchin Gupta * range from the non-secure state as defined in the SMC Calling Convention 161375f538aSAchin Gupta * Document. It is also responsible for communicating with the Secure payload 162375f538aSAchin Gupta * to delegate work and return results back to the non-secure state. Lastly it 163375f538aSAchin Gupta * will also return any information that the secure payload needs to do the 164375f538aSAchin Gupta * work assigned to it. 165375f538aSAchin Gupta ******************************************************************************/ 166375f538aSAchin Gupta uint64_t tspd_smc_handler(uint32_t smc_fid, 167375f538aSAchin Gupta uint64_t x1, 168375f538aSAchin Gupta uint64_t x2, 169375f538aSAchin Gupta uint64_t x3, 170375f538aSAchin Gupta uint64_t x4, 171375f538aSAchin Gupta void *cookie, 172375f538aSAchin Gupta void *handle, 173375f538aSAchin Gupta uint64_t flags) 174375f538aSAchin Gupta { 175fb037bfbSDan Handley cpu_context_t *ns_cpu_context; 176fb037bfbSDan Handley gp_regs_t *ns_gp_regs; 177375f538aSAchin Gupta unsigned long mpidr = read_mpidr(); 178375f538aSAchin Gupta uint32_t linear_id = platform_get_core_pos(mpidr), ns; 179fb037bfbSDan Handley tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 180375f538aSAchin Gupta 181375f538aSAchin Gupta /* Determine which security state this SMC originated from */ 182375f538aSAchin Gupta ns = is_caller_non_secure(flags); 183375f538aSAchin Gupta 184375f538aSAchin Gupta switch (smc_fid) { 185375f538aSAchin Gupta 186375f538aSAchin Gupta /* 187375f538aSAchin Gupta * This function ID is used only by the SP to indicate it has 188375f538aSAchin Gupta * finished initialising itself after a cold boot 189375f538aSAchin Gupta */ 190375f538aSAchin Gupta case TSP_ENTRY_DONE: 191375f538aSAchin Gupta if (ns) 192375f538aSAchin Gupta SMC_RET1(handle, SMC_UNK); 193375f538aSAchin Gupta 194375f538aSAchin Gupta /* 195375f538aSAchin Gupta * Stash the SP entry points information. This is done 196375f538aSAchin Gupta * only once on the primary cpu 197375f538aSAchin Gupta */ 198375f538aSAchin Gupta assert(tsp_entry_info == NULL); 199fb037bfbSDan Handley tsp_entry_info = (entry_info_t *) x1; 200375f538aSAchin Gupta 201375f538aSAchin Gupta /* 202375f538aSAchin Gupta * SP reports completion. The SPD must have initiated 203375f538aSAchin Gupta * the original request through a synchronous entry 204375f538aSAchin Gupta * into the SP. Jump back to the original C runtime 205375f538aSAchin Gupta * context. 206375f538aSAchin Gupta */ 207916a2c1eSAchin Gupta tspd_synchronous_sp_exit(tsp_ctx, x1); 208375f538aSAchin Gupta 209375f538aSAchin Gupta /* Should never reach here */ 210375f538aSAchin Gupta assert(0); 211375f538aSAchin Gupta 212607084eeSAchin Gupta /* 213607084eeSAchin Gupta * These function IDs is used only by the SP to indicate it has 214607084eeSAchin Gupta * finished: 215607084eeSAchin Gupta * 1. turning itself on in response to an earlier psci 216607084eeSAchin Gupta * cpu_on request 217607084eeSAchin Gupta * 2. resuming itself after an earlier psci cpu_suspend 218607084eeSAchin Gupta * request. 219607084eeSAchin Gupta */ 220607084eeSAchin Gupta case TSP_ON_DONE: 221607084eeSAchin Gupta case TSP_RESUME_DONE: 222607084eeSAchin Gupta 223607084eeSAchin Gupta /* 224607084eeSAchin Gupta * These function IDs is used only by the SP to indicate it has 225607084eeSAchin Gupta * finished: 226607084eeSAchin Gupta * 1. suspending itself after an earlier psci cpu_suspend 227607084eeSAchin Gupta * request. 228607084eeSAchin Gupta * 2. turning itself off in response to an earlier psci 229607084eeSAchin Gupta * cpu_off request. 230607084eeSAchin Gupta */ 231607084eeSAchin Gupta case TSP_OFF_DONE: 232607084eeSAchin Gupta case TSP_SUSPEND_DONE: 233607084eeSAchin Gupta if (ns) 234607084eeSAchin Gupta SMC_RET1(handle, SMC_UNK); 235607084eeSAchin Gupta 236607084eeSAchin Gupta /* 237607084eeSAchin Gupta * SP reports completion. The SPD must have initiated the 238607084eeSAchin Gupta * original request through a synchronous entry into the SP. 239607084eeSAchin Gupta * Jump back to the original C runtime context, and pass x1 as 240607084eeSAchin Gupta * return value to the caller 241607084eeSAchin Gupta */ 242916a2c1eSAchin Gupta tspd_synchronous_sp_exit(tsp_ctx, x1); 243607084eeSAchin Gupta 244607084eeSAchin Gupta /* Should never reach here */ 245607084eeSAchin Gupta assert(0); 246607084eeSAchin Gupta 247916a2c1eSAchin Gupta /* 248916a2c1eSAchin Gupta * Request from non-secure client to perform an 249916a2c1eSAchin Gupta * arithmetic operation or response from secure 250916a2c1eSAchin Gupta * payload to an earlier request. 251916a2c1eSAchin Gupta */ 252916a2c1eSAchin Gupta case TSP_FID_ADD: 253916a2c1eSAchin Gupta case TSP_FID_SUB: 254916a2c1eSAchin Gupta case TSP_FID_MUL: 255916a2c1eSAchin Gupta case TSP_FID_DIV: 256916a2c1eSAchin Gupta if (ns) { 257916a2c1eSAchin Gupta /* 258916a2c1eSAchin Gupta * This is a fresh request from the non-secure client. 259916a2c1eSAchin Gupta * The parameters are in x1 and x2. Figure out which 260916a2c1eSAchin Gupta * registers need to be preserved, save the non-secure 261916a2c1eSAchin Gupta * state and send the request to the secure payload. 262916a2c1eSAchin Gupta */ 263916a2c1eSAchin Gupta assert(handle == cm_get_context(mpidr, NON_SECURE)); 264916a2c1eSAchin Gupta cm_el1_sysregs_context_save(NON_SECURE); 265916a2c1eSAchin Gupta 266916a2c1eSAchin Gupta /* Save x1 and x2 for use by TSP_GET_ARGS call below */ 267916a2c1eSAchin Gupta SMC_SET_GP(handle, CTX_GPREG_X1, x1); 268916a2c1eSAchin Gupta SMC_SET_GP(handle, CTX_GPREG_X2, x2); 269916a2c1eSAchin Gupta 270916a2c1eSAchin Gupta /* 271916a2c1eSAchin Gupta * We are done stashing the non-secure context. Ask the 272916a2c1eSAchin Gupta * secure payload to do the work now. 273916a2c1eSAchin Gupta */ 274916a2c1eSAchin Gupta 275916a2c1eSAchin Gupta /* 276916a2c1eSAchin Gupta * Verify if there is a valid context to use, copy the 277916a2c1eSAchin Gupta * operation type and parameters to the secure context 278916a2c1eSAchin Gupta * and jump to the fast smc entry point in the secure 279916a2c1eSAchin Gupta * payload. Entry into S-EL1 will take place upon exit 280916a2c1eSAchin Gupta * from this function. 281916a2c1eSAchin Gupta */ 282916a2c1eSAchin Gupta assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE)); 283916a2c1eSAchin Gupta set_aapcs_args7(&tsp_ctx->cpu_ctx, smc_fid, x1, x2, 0, 0, 284916a2c1eSAchin Gupta 0, 0, 0); 285916a2c1eSAchin Gupta cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry); 286916a2c1eSAchin Gupta cm_el1_sysregs_context_restore(SECURE); 287916a2c1eSAchin Gupta cm_set_next_eret_context(SECURE); 288916a2c1eSAchin Gupta 289916a2c1eSAchin Gupta return smc_fid; 290916a2c1eSAchin Gupta } else { 291916a2c1eSAchin Gupta /* 292916a2c1eSAchin Gupta * This is the result from the secure client of an 293916a2c1eSAchin Gupta * earlier request. The results are in x1-x2. Copy it 294916a2c1eSAchin Gupta * into the non-secure context, save the secure state 295916a2c1eSAchin Gupta * and return to the non-secure state. 296916a2c1eSAchin Gupta */ 297916a2c1eSAchin Gupta assert(handle == cm_get_context(mpidr, SECURE)); 298916a2c1eSAchin Gupta cm_el1_sysregs_context_save(SECURE); 299916a2c1eSAchin Gupta 300916a2c1eSAchin Gupta /* Get a reference to the non-secure context */ 301916a2c1eSAchin Gupta ns_cpu_context = cm_get_context(mpidr, NON_SECURE); 302916a2c1eSAchin Gupta assert(ns_cpu_context); 303916a2c1eSAchin Gupta ns_gp_regs = get_gpregs_ctx(ns_cpu_context); 304916a2c1eSAchin Gupta 305916a2c1eSAchin Gupta /* Restore non-secure state */ 306916a2c1eSAchin Gupta cm_el1_sysregs_context_restore(NON_SECURE); 307916a2c1eSAchin Gupta cm_set_next_eret_context(NON_SECURE); 308916a2c1eSAchin Gupta 309916a2c1eSAchin Gupta SMC_RET2(ns_gp_regs, x1, x2); 310916a2c1eSAchin Gupta } 311916a2c1eSAchin Gupta 312916a2c1eSAchin Gupta break; 313916a2c1eSAchin Gupta 314916a2c1eSAchin Gupta /* 315916a2c1eSAchin Gupta * This is a request from the secure payload for more arguments 316916a2c1eSAchin Gupta * for an ongoing arithmetic operation requested by the 317916a2c1eSAchin Gupta * non-secure world. Simply return the arguments from the non- 318916a2c1eSAchin Gupta * secure client in the original call. 319916a2c1eSAchin Gupta */ 320916a2c1eSAchin Gupta case TSP_GET_ARGS: 321916a2c1eSAchin Gupta if (ns) 322916a2c1eSAchin Gupta SMC_RET1(handle, SMC_UNK); 323916a2c1eSAchin Gupta 324916a2c1eSAchin Gupta /* Get a reference to the non-secure context */ 325916a2c1eSAchin Gupta ns_cpu_context = cm_get_context(mpidr, NON_SECURE); 326916a2c1eSAchin Gupta assert(ns_cpu_context); 327916a2c1eSAchin Gupta ns_gp_regs = get_gpregs_ctx(ns_cpu_context); 328916a2c1eSAchin Gupta 329916a2c1eSAchin Gupta SMC_RET2(handle, read_ctx_reg(ns_gp_regs, CTX_GPREG_X1), 330916a2c1eSAchin Gupta read_ctx_reg(ns_gp_regs, CTX_GPREG_X2)); 331916a2c1eSAchin Gupta 33252538b9bSJeenu Viswambharan case TOS_CALL_COUNT: 33352538b9bSJeenu Viswambharan /* 33452538b9bSJeenu Viswambharan * Return the number of service function IDs implemented to 33552538b9bSJeenu Viswambharan * provide service to non-secure 33652538b9bSJeenu Viswambharan */ 33752538b9bSJeenu Viswambharan SMC_RET1(handle, TSP_NUM_FID); 33852538b9bSJeenu Viswambharan 33952538b9bSJeenu Viswambharan case TOS_UID: 34052538b9bSJeenu Viswambharan /* Return TSP UID to the caller */ 34152538b9bSJeenu Viswambharan SMC_UUID_RET(handle, tsp_uuid); 34252538b9bSJeenu Viswambharan 34352538b9bSJeenu Viswambharan case TOS_CALL_VERSION: 34452538b9bSJeenu Viswambharan /* Return the version of current implementation */ 34552538b9bSJeenu Viswambharan SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR); 34652538b9bSJeenu Viswambharan 347375f538aSAchin Gupta default: 348607084eeSAchin Gupta break; 349375f538aSAchin Gupta } 350375f538aSAchin Gupta 351607084eeSAchin Gupta SMC_RET1(handle, SMC_UNK); 352375f538aSAchin Gupta } 353375f538aSAchin Gupta 354375f538aSAchin Gupta /* Define a SPD runtime service descriptor */ 355375f538aSAchin Gupta DECLARE_RT_SVC( 356375f538aSAchin Gupta spd, 357375f538aSAchin Gupta 358375f538aSAchin Gupta OEN_TOS_START, 359375f538aSAchin Gupta OEN_TOS_END, 360375f538aSAchin Gupta SMC_TYPE_FAST, 361375f538aSAchin Gupta tspd_setup, 362375f538aSAchin Gupta tspd_smc_handler 363375f538aSAchin Gupta ); 364