1 /* 2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 32 /******************************************************************************* 33 * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a 34 * plug-in component to the Secure Monitor, registered as a runtime service. The 35 * SPD is expected to be a functional extension of the Secure Payload (SP) that 36 * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting 37 * the Trusted OS/Applications range to the dispatcher. The SPD will either 38 * handle the request locally or delegate it to the Secure Payload. It is also 39 * responsible for initialising and maintaining communication with the SP. 40 ******************************************************************************/ 41 #include <arch_helpers.h> 42 #include <assert.h> 43 #include <bl_common.h> 44 #include <bl31.h> 45 #include <context_mgmt.h> 46 #include <debug.h> 47 #include <errno.h> 48 #include <platform.h> 49 #include <runtime_svc.h> 50 #include <stddef.h> 51 #include <tsp.h> 52 #include <uuid.h> 53 #include "tspd_private.h" 54 55 /******************************************************************************* 56 * Single structure to hold information about the various entry points into the 57 * Secure Payload. It is initialised once on the primary core after a cold boot. 58 ******************************************************************************/ 59 entry_info_t *tsp_entry_info; 60 61 /******************************************************************************* 62 * Array to keep track of per-cpu Secure Payload state 63 ******************************************************************************/ 64 tsp_context_t tspd_sp_context[TSPD_CORE_COUNT]; 65 66 67 /* TSP UID */ 68 DEFINE_SVC_UUID(tsp_uuid, 69 0x5b3056a0, 0x3291, 0x427b, 0x98, 0x11, 70 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa); 71 72 int32_t tspd_init(void); 73 74 /******************************************************************************* 75 * This function is the handler registered for S-EL1 interrupts by the TSPD. It 76 * validates the interrupt and upon success arranges entry into the TSP at 77 * 'tsp_fiq_entry()' for handling the interrupt. 78 ******************************************************************************/ 79 static uint64_t tspd_sel1_interrupt_handler(uint32_t id, 80 uint32_t flags, 81 void *handle, 82 void *cookie) 83 { 84 uint32_t linear_id; 85 uint64_t mpidr; 86 tsp_context_t *tsp_ctx; 87 88 /* Check the security state when the exception was generated */ 89 assert(get_interrupt_src_ss(flags) == NON_SECURE); 90 91 #if IMF_READ_INTERRUPT_ID 92 /* Check the security status of the interrupt */ 93 assert(ic_get_interrupt_group(id) == SECURE); 94 #endif 95 96 /* Sanity check the pointer to this cpu's context */ 97 mpidr = read_mpidr(); 98 assert(handle == cm_get_context(mpidr, NON_SECURE)); 99 100 /* Save the non-secure context before entering the TSP */ 101 cm_el1_sysregs_context_save(NON_SECURE); 102 103 /* Get a reference to this cpu's TSP context */ 104 linear_id = platform_get_core_pos(mpidr); 105 tsp_ctx = &tspd_sp_context[linear_id]; 106 assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE)); 107 108 /* 109 * Determine if the TSP was previously preempted. Its last known 110 * context has to be preserved in this case. 111 * The TSP should return control to the TSPD after handling this 112 * FIQ. Preserve essential EL3 context to allow entry into the 113 * TSP at the FIQ entry point using the 'cpu_context' structure. 114 * There is no need to save the secure system register context 115 * since the TSP is supposed to preserve it during S-EL1 interrupt 116 * handling. 117 */ 118 if (get_std_smc_active_flag(tsp_ctx->state)) { 119 tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, 120 CTX_SPSR_EL3); 121 tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, 122 CTX_ELR_EL3); 123 } 124 125 SMC_SET_EL3(&tsp_ctx->cpu_ctx, 126 CTX_SPSR_EL3, 127 SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 128 SMC_SET_EL3(&tsp_ctx->cpu_ctx, 129 CTX_ELR_EL3, 130 (uint64_t) tsp_entry_info->fiq_entry); 131 cm_el1_sysregs_context_restore(SECURE); 132 cm_set_next_eret_context(SECURE); 133 134 /* 135 * Tell the TSP that it has to handle an FIQ synchronously. Also the 136 * instruction in normal world where the interrupt was generated is 137 * passed for debugging purposes. It is safe to retrieve this address 138 * from ELR_EL3 as the secure context will not take effect until 139 * el3_exit(). 140 */ 141 SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_FIQ_AND_RETURN, read_elr_el3()); 142 } 143 144 /******************************************************************************* 145 * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type 146 * (aarch32/aarch64) if not already known and initialises the context for entry 147 * into the SP for its initialisation. 148 ******************************************************************************/ 149 int32_t tspd_setup(void) 150 { 151 entry_point_info_t *image_info; 152 int32_t rc; 153 uint64_t mpidr = read_mpidr(); 154 uint32_t linear_id; 155 156 linear_id = platform_get_core_pos(mpidr); 157 158 /* 159 * Get information about the Secure Payload (BL32) image. Its 160 * absence is a critical failure. TODO: Add support to 161 * conditionally include the SPD service 162 */ 163 image_info = bl31_get_next_image_info(SECURE); 164 assert(image_info); 165 166 /* 167 * If there's no valid entry point for SP, we return a non-zero value 168 * signalling failure initializing the service. We bail out without 169 * registering any handlers 170 */ 171 if (!image_info->pc) 172 return 1; 173 174 /* 175 * We could inspect the SP image and determine it's execution 176 * state i.e whether AArch32 or AArch64. Assuming it's AArch64 177 * for the time being. 178 */ 179 rc = tspd_init_secure_context(image_info->pc, 180 TSP_AARCH64, 181 mpidr, 182 &tspd_sp_context[linear_id]); 183 assert(rc == 0); 184 185 /* 186 * All TSPD initialization done. Now register our init function with 187 * BL31 for deferred invocation 188 */ 189 bl31_register_bl32_init(&tspd_init); 190 191 return rc; 192 } 193 194 /******************************************************************************* 195 * This function passes control to the Secure Payload image (BL32) for the first 196 * time on the primary cpu after a cold boot. It assumes that a valid secure 197 * context has already been created by tspd_setup() which can be directly used. 198 * It also assumes that a valid non-secure context has been initialised by PSCI 199 * so it does not need to save and restore any non-secure state. This function 200 * performs a synchronous entry into the Secure payload. The SP passes control 201 * back to this routine through a SMC. 202 ******************************************************************************/ 203 int32_t tspd_init(void) 204 { 205 uint64_t mpidr = read_mpidr(); 206 uint32_t linear_id = platform_get_core_pos(mpidr), flags; 207 uint64_t rc; 208 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 209 210 /* 211 * Arrange for an entry into the test secure payload. We expect an array 212 * of vectors in return 213 */ 214 rc = tspd_synchronous_sp_entry(tsp_ctx); 215 assert(rc != 0); 216 if (rc) { 217 set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); 218 219 /* 220 * TSP has been successfully initialized. Register power 221 * managemnt hooks with PSCI 222 */ 223 psci_register_spd_pm_hook(&tspd_pm); 224 } 225 226 /* 227 * Register an interrupt handler for S-EL1 interrupts when generated 228 * during code executing in the non-secure state. 229 */ 230 flags = 0; 231 set_interrupt_rm_flag(flags, NON_SECURE); 232 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 233 tspd_sel1_interrupt_handler, 234 flags); 235 if (rc) 236 panic(); 237 238 return rc; 239 } 240 241 242 /******************************************************************************* 243 * This function is responsible for handling all SMCs in the Trusted OS/App 244 * range from the non-secure state as defined in the SMC Calling Convention 245 * Document. It is also responsible for communicating with the Secure payload 246 * to delegate work and return results back to the non-secure state. Lastly it 247 * will also return any information that the secure payload needs to do the 248 * work assigned to it. 249 ******************************************************************************/ 250 uint64_t tspd_smc_handler(uint32_t smc_fid, 251 uint64_t x1, 252 uint64_t x2, 253 uint64_t x3, 254 uint64_t x4, 255 void *cookie, 256 void *handle, 257 uint64_t flags) 258 { 259 cpu_context_t *ns_cpu_context; 260 gp_regs_t *ns_gp_regs; 261 unsigned long mpidr = read_mpidr(); 262 uint32_t linear_id = platform_get_core_pos(mpidr), ns; 263 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 264 265 /* Determine which security state this SMC originated from */ 266 ns = is_caller_non_secure(flags); 267 268 switch (smc_fid) { 269 270 /* 271 * This function ID is used only by the TSP to indicate that it has 272 * finished handling a S-EL1 FIQ interrupt. Execution should resume 273 * in the normal world. 274 */ 275 case TSP_HANDLED_S_EL1_FIQ: 276 if (ns) 277 SMC_RET1(handle, SMC_UNK); 278 279 assert(handle == cm_get_context(mpidr, SECURE)); 280 281 /* 282 * Restore the relevant EL3 state which saved to service 283 * this SMC. 284 */ 285 if (get_std_smc_active_flag(tsp_ctx->state)) { 286 SMC_SET_EL3(&tsp_ctx->cpu_ctx, 287 CTX_SPSR_EL3, 288 tsp_ctx->saved_spsr_el3); 289 SMC_SET_EL3(&tsp_ctx->cpu_ctx, 290 CTX_ELR_EL3, 291 tsp_ctx->saved_elr_el3); 292 } 293 294 /* Get a reference to the non-secure context */ 295 ns_cpu_context = cm_get_context(mpidr, NON_SECURE); 296 assert(ns_cpu_context); 297 298 /* 299 * Restore non-secure state. There is no need to save the 300 * secure system register context since the TSP was supposed 301 * to preserve it during S-EL1 interrupt handling. 302 */ 303 cm_el1_sysregs_context_restore(NON_SECURE); 304 cm_set_next_eret_context(NON_SECURE); 305 306 SMC_RET0((uint64_t) ns_cpu_context); 307 308 309 /* 310 * This function ID is used only by the TSP to indicate that it was 311 * interrupted due to a EL3 FIQ interrupt. Execution should resume 312 * in the normal world. 313 */ 314 case TSP_EL3_FIQ: 315 if (ns) 316 SMC_RET1(handle, SMC_UNK); 317 318 assert(handle == cm_get_context(mpidr, SECURE)); 319 320 /* Assert that standard SMC execution has been preempted */ 321 assert(get_std_smc_active_flag(tsp_ctx->state)); 322 323 /* Save the secure system register state */ 324 cm_el1_sysregs_context_save(SECURE); 325 326 /* Get a reference to the non-secure context */ 327 ns_cpu_context = cm_get_context(mpidr, NON_SECURE); 328 assert(ns_cpu_context); 329 330 /* Restore non-secure state */ 331 cm_el1_sysregs_context_restore(NON_SECURE); 332 cm_set_next_eret_context(NON_SECURE); 333 334 SMC_RET1(ns_cpu_context, TSP_EL3_FIQ); 335 336 337 /* 338 * This function ID is used only by the SP to indicate it has 339 * finished initialising itself after a cold boot 340 */ 341 case TSP_ENTRY_DONE: 342 if (ns) 343 SMC_RET1(handle, SMC_UNK); 344 345 /* 346 * Stash the SP entry points information. This is done 347 * only once on the primary cpu 348 */ 349 assert(tsp_entry_info == NULL); 350 tsp_entry_info = (entry_info_t *) x1; 351 352 /* 353 * SP reports completion. The SPD must have initiated 354 * the original request through a synchronous entry 355 * into the SP. Jump back to the original C runtime 356 * context. 357 */ 358 tspd_synchronous_sp_exit(tsp_ctx, x1); 359 360 /* Should never reach here */ 361 assert(0); 362 363 /* 364 * These function IDs is used only by the SP to indicate it has 365 * finished: 366 * 1. turning itself on in response to an earlier psci 367 * cpu_on request 368 * 2. resuming itself after an earlier psci cpu_suspend 369 * request. 370 */ 371 case TSP_ON_DONE: 372 case TSP_RESUME_DONE: 373 374 /* 375 * These function IDs is used only by the SP to indicate it has 376 * finished: 377 * 1. suspending itself after an earlier psci cpu_suspend 378 * request. 379 * 2. turning itself off in response to an earlier psci 380 * cpu_off request. 381 */ 382 case TSP_OFF_DONE: 383 case TSP_SUSPEND_DONE: 384 if (ns) 385 SMC_RET1(handle, SMC_UNK); 386 387 /* 388 * SP reports completion. The SPD must have initiated the 389 * original request through a synchronous entry into the SP. 390 * Jump back to the original C runtime context, and pass x1 as 391 * return value to the caller 392 */ 393 tspd_synchronous_sp_exit(tsp_ctx, x1); 394 395 /* Should never reach here */ 396 assert(0); 397 398 /* 399 * Request from non-secure client to perform an 400 * arithmetic operation or response from secure 401 * payload to an earlier request. 402 */ 403 case TSP_FID_ADD: 404 case TSP_FID_SUB: 405 case TSP_FID_MUL: 406 case TSP_FID_DIV: 407 if (ns) { 408 /* 409 * This is a fresh request from the non-secure client. 410 * The parameters are in x1 and x2. Figure out which 411 * registers need to be preserved, save the non-secure 412 * state and send the request to the secure payload. 413 */ 414 assert(handle == cm_get_context(mpidr, NON_SECURE)); 415 cm_el1_sysregs_context_save(NON_SECURE); 416 417 /* Save x1 and x2 for use by TSP_GET_ARGS call below */ 418 SMC_SET_GP(handle, CTX_GPREG_X1, x1); 419 SMC_SET_GP(handle, CTX_GPREG_X2, x2); 420 421 /* 422 * We are done stashing the non-secure context. Ask the 423 * secure payload to do the work now. 424 */ 425 426 /* 427 * Verify if there is a valid context to use, copy the 428 * operation type and parameters to the secure context 429 * and jump to the fast smc entry point in the secure 430 * payload. Entry into S-EL1 will take place upon exit 431 * from this function. 432 */ 433 assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE)); 434 set_aapcs_args7(&tsp_ctx->cpu_ctx, smc_fid, x1, x2, 0, 0, 435 0, 0, 0); 436 cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry); 437 cm_el1_sysregs_context_restore(SECURE); 438 cm_set_next_eret_context(SECURE); 439 440 return smc_fid; 441 } else { 442 /* 443 * This is the result from the secure client of an 444 * earlier request. The results are in x1-x2. Copy it 445 * into the non-secure context, save the secure state 446 * and return to the non-secure state. 447 */ 448 assert(handle == cm_get_context(mpidr, SECURE)); 449 cm_el1_sysregs_context_save(SECURE); 450 451 /* Get a reference to the non-secure context */ 452 ns_cpu_context = cm_get_context(mpidr, NON_SECURE); 453 assert(ns_cpu_context); 454 ns_gp_regs = get_gpregs_ctx(ns_cpu_context); 455 456 /* Restore non-secure state */ 457 cm_el1_sysregs_context_restore(NON_SECURE); 458 cm_set_next_eret_context(NON_SECURE); 459 460 SMC_RET2(ns_gp_regs, x1, x2); 461 } 462 463 break; 464 465 /* 466 * This is a request from the secure payload for more arguments 467 * for an ongoing arithmetic operation requested by the 468 * non-secure world. Simply return the arguments from the non- 469 * secure client in the original call. 470 */ 471 case TSP_GET_ARGS: 472 if (ns) 473 SMC_RET1(handle, SMC_UNK); 474 475 /* Get a reference to the non-secure context */ 476 ns_cpu_context = cm_get_context(mpidr, NON_SECURE); 477 assert(ns_cpu_context); 478 ns_gp_regs = get_gpregs_ctx(ns_cpu_context); 479 480 SMC_RET2(handle, read_ctx_reg(ns_gp_regs, CTX_GPREG_X1), 481 read_ctx_reg(ns_gp_regs, CTX_GPREG_X2)); 482 483 case TOS_CALL_COUNT: 484 /* 485 * Return the number of service function IDs implemented to 486 * provide service to non-secure 487 */ 488 SMC_RET1(handle, TSP_NUM_FID); 489 490 case TOS_UID: 491 /* Return TSP UID to the caller */ 492 SMC_UUID_RET(handle, tsp_uuid); 493 494 case TOS_CALL_VERSION: 495 /* Return the version of current implementation */ 496 SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR); 497 498 default: 499 break; 500 } 501 502 SMC_RET1(handle, SMC_UNK); 503 } 504 505 /* Define a SPD runtime service descriptor */ 506 DECLARE_RT_SVC( 507 spd, 508 509 OEN_TOS_START, 510 OEN_TOS_END, 511 SMC_TYPE_FAST, 512 tspd_setup, 513 tspd_smc_handler 514 ); 515