1 /* 2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 32 /******************************************************************************* 33 * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a 34 * plug-in component to the Secure Monitor, registered as a runtime service. The 35 * SPD is expected to be a functional extension of the Secure Payload (SP) that 36 * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting 37 * the Trusted OS/Applications range to the dispatcher. The SPD will either 38 * handle the request locally or delegate it to the Secure Payload. It is also 39 * responsible for initialising and maintaining communication with the SP. 40 ******************************************************************************/ 41 #include <stdio.h> 42 #include <string.h> 43 #include <assert.h> 44 #include <arch_helpers.h> 45 #include <console.h> 46 #include <platform.h> 47 #include <psci_private.h> 48 #include <context_mgmt.h> 49 #include <runtime_svc.h> 50 #include <bl31.h> 51 #include <tsp.h> 52 #include <psci.h> 53 #include <tspd_private.h> 54 #include <debug.h> 55 56 /******************************************************************************* 57 * Single structure to hold information about the various entry points into the 58 * Secure Payload. It is initialised once on the primary core after a cold boot. 59 ******************************************************************************/ 60 entry_info *tsp_entry_info; 61 62 /******************************************************************************* 63 * Array to keep track of per-cpu Secure Payload state 64 ******************************************************************************/ 65 tsp_context tspd_sp_context[TSPD_CORE_COUNT]; 66 67 /******************************************************************************* 68 * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type 69 * (aarch32/aarch64) if not already known and initialises the context for entry 70 * into the SP for its initialisation. 71 ******************************************************************************/ 72 int32_t tspd_setup(void) 73 { 74 el_change_info *image_info; 75 int32_t rc; 76 uint64_t mpidr = read_mpidr(); 77 uint32_t linear_id; 78 79 linear_id = platform_get_core_pos(mpidr); 80 81 /* 82 * Get information about the Secure Payload (BL32) image. Its 83 * absence is a critical failure. TODO: Add support to 84 * conditionally include the SPD service 85 */ 86 image_info = bl31_get_next_image_info(SECURE); 87 assert(image_info); 88 89 /* 90 * We could inspect the SP image and determine it's execution 91 * state i.e whether AArch32 or AArch64. Assuming it's AArch64 92 * for the time being. 93 */ 94 rc = tspd_init_secure_context(image_info->entrypoint, 95 TSP_AARCH64, 96 mpidr, 97 &tspd_sp_context[linear_id]); 98 assert(rc == 0); 99 100 return rc; 101 } 102 103 /******************************************************************************* 104 * This function passes control to the Secure Payload image (BL32) for the first 105 * time on the primary cpu after a cold boot. It assumes that a valid secure 106 * context has already been created by tspd_setup() which can be directly used. 107 * It also assumes that a valid non-secure context has been initialised by PSCI 108 * so it does not need to save and restore any non-secure state. This function 109 * performs a synchronous entry into the Secure payload. The SP passes control 110 * back to this routine through a SMC. It also passes the extents of memory made 111 * available to BL32 by BL31. 112 ******************************************************************************/ 113 int32_t bl32_init(meminfo *bl32_meminfo) 114 { 115 uint64_t mpidr = read_mpidr(); 116 uint32_t linear_id = platform_get_core_pos(mpidr); 117 uint64_t rc; 118 tsp_context *tsp_ctx = &tspd_sp_context[linear_id]; 119 120 /* 121 * Arrange for passing a pointer to the meminfo structure 122 * describing the memory extents available to the secure 123 * payload. 124 * TODO: We are passing a pointer to BL31 internal memory 125 * whereas this structure should be copied to a communication 126 * buffer between the SP and SPD. 127 */ 128 write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), 129 CTX_GPREG_X0, 130 (uint64_t) bl32_meminfo); 131 132 /* 133 * Arrange for an entry into the test secure payload. We expect an array 134 * of vectors in return 135 */ 136 rc = tspd_synchronous_sp_entry(tsp_ctx); 137 assert(rc != 0); 138 if (rc) 139 tsp_ctx->state = TSP_STATE_ON; 140 141 return rc; 142 } 143 144 /******************************************************************************* 145 * This function is responsible for handling all SMCs in the Trusted OS/App 146 * range from the non-secure state as defined in the SMC Calling Convention 147 * Document. It is also responsible for communicating with the Secure payload 148 * to delegate work and return results back to the non-secure state. Lastly it 149 * will also return any information that the secure payload needs to do the 150 * work assigned to it. 151 ******************************************************************************/ 152 uint64_t tspd_smc_handler(uint32_t smc_fid, 153 uint64_t x1, 154 uint64_t x2, 155 uint64_t x3, 156 uint64_t x4, 157 void *cookie, 158 void *handle, 159 uint64_t flags) 160 { 161 cpu_context *ns_cpu_context; 162 gp_regs *ns_gp_regs; 163 unsigned long mpidr = read_mpidr(); 164 uint32_t linear_id = platform_get_core_pos(mpidr), ns; 165 tsp_context *tsp_ctx = &tspd_sp_context[linear_id]; 166 167 /* Determine which security state this SMC originated from */ 168 ns = is_caller_non_secure(flags); 169 170 switch (smc_fid) { 171 172 /* 173 * This function ID is used only by the SP to indicate it has 174 * finished initialising itself after a cold boot 175 */ 176 case TSP_ENTRY_DONE: 177 if (ns) 178 SMC_RET1(handle, SMC_UNK); 179 180 /* 181 * Stash the SP entry points information. This is done 182 * only once on the primary cpu 183 */ 184 assert(tsp_entry_info == NULL); 185 tsp_entry_info = (entry_info *) x1; 186 187 /* 188 * SP reports completion. The SPD must have initiated 189 * the original request through a synchronous entry 190 * into the SP. Jump back to the original C runtime 191 * context. 192 */ 193 tspd_synchronous_sp_exit(tsp_ctx, x1); 194 195 /* Should never reach here */ 196 assert(0); 197 198 /* 199 * These function IDs is used only by the SP to indicate it has 200 * finished: 201 * 1. turning itself on in response to an earlier psci 202 * cpu_on request 203 * 2. resuming itself after an earlier psci cpu_suspend 204 * request. 205 */ 206 case TSP_ON_DONE: 207 case TSP_RESUME_DONE: 208 209 /* 210 * These function IDs is used only by the SP to indicate it has 211 * finished: 212 * 1. suspending itself after an earlier psci cpu_suspend 213 * request. 214 * 2. turning itself off in response to an earlier psci 215 * cpu_off request. 216 */ 217 case TSP_OFF_DONE: 218 case TSP_SUSPEND_DONE: 219 if (ns) 220 SMC_RET1(handle, SMC_UNK); 221 222 /* 223 * SP reports completion. The SPD must have initiated the 224 * original request through a synchronous entry into the SP. 225 * Jump back to the original C runtime context, and pass x1 as 226 * return value to the caller 227 */ 228 tspd_synchronous_sp_exit(tsp_ctx, x1); 229 230 /* Should never reach here */ 231 assert(0); 232 233 /* 234 * Request from non-secure client to perform an 235 * arithmetic operation or response from secure 236 * payload to an earlier request. 237 */ 238 case TSP_FID_ADD: 239 case TSP_FID_SUB: 240 case TSP_FID_MUL: 241 case TSP_FID_DIV: 242 if (ns) { 243 /* 244 * This is a fresh request from the non-secure client. 245 * The parameters are in x1 and x2. Figure out which 246 * registers need to be preserved, save the non-secure 247 * state and send the request to the secure payload. 248 */ 249 assert(handle == cm_get_context(mpidr, NON_SECURE)); 250 cm_el1_sysregs_context_save(NON_SECURE); 251 252 /* Save x1 and x2 for use by TSP_GET_ARGS call below */ 253 SMC_SET_GP(handle, CTX_GPREG_X1, x1); 254 SMC_SET_GP(handle, CTX_GPREG_X2, x2); 255 256 /* 257 * We are done stashing the non-secure context. Ask the 258 * secure payload to do the work now. 259 */ 260 261 /* 262 * Verify if there is a valid context to use, copy the 263 * operation type and parameters to the secure context 264 * and jump to the fast smc entry point in the secure 265 * payload. Entry into S-EL1 will take place upon exit 266 * from this function. 267 */ 268 assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE)); 269 set_aapcs_args7(&tsp_ctx->cpu_ctx, smc_fid, x1, x2, 0, 0, 270 0, 0, 0); 271 cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry); 272 cm_el1_sysregs_context_restore(SECURE); 273 cm_set_next_eret_context(SECURE); 274 275 return smc_fid; 276 } else { 277 /* 278 * This is the result from the secure client of an 279 * earlier request. The results are in x1-x2. Copy it 280 * into the non-secure context, save the secure state 281 * and return to the non-secure state. 282 */ 283 assert(handle == cm_get_context(mpidr, SECURE)); 284 cm_el1_sysregs_context_save(SECURE); 285 286 /* Get a reference to the non-secure context */ 287 ns_cpu_context = cm_get_context(mpidr, NON_SECURE); 288 assert(ns_cpu_context); 289 ns_gp_regs = get_gpregs_ctx(ns_cpu_context); 290 291 /* Restore non-secure state */ 292 cm_el1_sysregs_context_restore(NON_SECURE); 293 cm_set_next_eret_context(NON_SECURE); 294 295 SMC_RET2(ns_gp_regs, x1, x2); 296 } 297 298 break; 299 300 /* 301 * This is a request from the secure payload for more arguments 302 * for an ongoing arithmetic operation requested by the 303 * non-secure world. Simply return the arguments from the non- 304 * secure client in the original call. 305 */ 306 case TSP_GET_ARGS: 307 if (ns) 308 SMC_RET1(handle, SMC_UNK); 309 310 /* Get a reference to the non-secure context */ 311 ns_cpu_context = cm_get_context(mpidr, NON_SECURE); 312 assert(ns_cpu_context); 313 ns_gp_regs = get_gpregs_ctx(ns_cpu_context); 314 315 SMC_RET2(handle, read_ctx_reg(ns_gp_regs, CTX_GPREG_X1), 316 read_ctx_reg(ns_gp_regs, CTX_GPREG_X2)); 317 318 default: 319 break; 320 } 321 322 SMC_RET1(handle, SMC_UNK); 323 } 324 325 /* Define a SPD runtime service descriptor */ 326 DECLARE_RT_SVC( 327 spd, 328 329 OEN_TOS_START, 330 OEN_TOS_END, 331 SMC_TYPE_FAST, 332 tspd_setup, 333 tspd_smc_handler 334 ); 335