1 /* 2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 32 /******************************************************************************* 33 * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a 34 * plug-in component to the Secure Monitor, registered as a runtime service. The 35 * SPD is expected to be a functional extension of the Secure Payload (SP) that 36 * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting 37 * the Trusted OS/Applications range to the dispatcher. The SPD will either 38 * handle the request locally or delegate it to the Secure Payload. It is also 39 * responsible for initialising and maintaining communication with the SP. 40 ******************************************************************************/ 41 #include <arch_helpers.h> 42 #include <assert.h> 43 #include <bl_common.h> 44 #include <bl31.h> 45 #include <context_mgmt.h> 46 #include <debug.h> 47 #include <errno.h> 48 #include <platform.h> 49 #include <runtime_svc.h> 50 #include <stddef.h> 51 #include <tsp.h> 52 #include <uuid.h> 53 #include "tspd_private.h" 54 55 /******************************************************************************* 56 * Address of the entrypoint vector table in the Secure Payload. It is 57 * initialised once on the primary core after a cold boot. 58 ******************************************************************************/ 59 tsp_vectors_t *tsp_vectors; 60 61 /******************************************************************************* 62 * Array to keep track of per-cpu Secure Payload state 63 ******************************************************************************/ 64 tsp_context_t tspd_sp_context[TSPD_CORE_COUNT]; 65 66 67 /* TSP UID */ 68 DEFINE_SVC_UUID(tsp_uuid, 69 0x5b3056a0, 0x3291, 0x427b, 0x98, 0x11, 70 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa); 71 72 int32_t tspd_init(void); 73 74 /******************************************************************************* 75 * This function is the handler registered for S-EL1 interrupts by the TSPD. It 76 * validates the interrupt and upon success arranges entry into the TSP at 77 * 'tsp_fiq_entry()' for handling the interrupt. 78 ******************************************************************************/ 79 static uint64_t tspd_sel1_interrupt_handler(uint32_t id, 80 uint32_t flags, 81 void *handle, 82 void *cookie) 83 { 84 uint32_t linear_id; 85 uint64_t mpidr; 86 tsp_context_t *tsp_ctx; 87 88 /* Check the security state when the exception was generated */ 89 assert(get_interrupt_src_ss(flags) == NON_SECURE); 90 91 #if IMF_READ_INTERRUPT_ID 92 /* Check the security status of the interrupt */ 93 assert(plat_ic_get_interrupt_type(id) == INTR_TYPE_S_EL1); 94 #endif 95 96 /* Sanity check the pointer to this cpu's context */ 97 mpidr = read_mpidr(); 98 assert(handle == cm_get_context(NON_SECURE)); 99 100 /* Save the non-secure context before entering the TSP */ 101 cm_el1_sysregs_context_save(NON_SECURE); 102 103 /* Get a reference to this cpu's TSP context */ 104 linear_id = platform_get_core_pos(mpidr); 105 tsp_ctx = &tspd_sp_context[linear_id]; 106 assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); 107 108 /* 109 * Determine if the TSP was previously preempted. Its last known 110 * context has to be preserved in this case. 111 * The TSP should return control to the TSPD after handling this 112 * FIQ. Preserve essential EL3 context to allow entry into the 113 * TSP at the FIQ entry point using the 'cpu_context' structure. 114 * There is no need to save the secure system register context 115 * since the TSP is supposed to preserve it during S-EL1 interrupt 116 * handling. 117 */ 118 if (get_std_smc_active_flag(tsp_ctx->state)) { 119 tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, 120 CTX_SPSR_EL3); 121 tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, 122 CTX_ELR_EL3); 123 } 124 125 cm_el1_sysregs_context_restore(SECURE); 126 cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->fiq_entry, 127 SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 128 cm_set_next_eret_context(SECURE); 129 130 /* 131 * Tell the TSP that it has to handle an FIQ synchronously. Also the 132 * instruction in normal world where the interrupt was generated is 133 * passed for debugging purposes. It is safe to retrieve this address 134 * from ELR_EL3 as the secure context will not take effect until 135 * el3_exit(). 136 */ 137 SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_FIQ_AND_RETURN, read_elr_el3()); 138 } 139 140 /******************************************************************************* 141 * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type 142 * (aarch32/aarch64) if not already known and initialises the context for entry 143 * into the SP for its initialisation. 144 ******************************************************************************/ 145 int32_t tspd_setup(void) 146 { 147 entry_point_info_t *tsp_ep_info; 148 uint64_t mpidr = read_mpidr(); 149 uint32_t linear_id; 150 151 linear_id = platform_get_core_pos(mpidr); 152 153 /* 154 * Get information about the Secure Payload (BL32) image. Its 155 * absence is a critical failure. TODO: Add support to 156 * conditionally include the SPD service 157 */ 158 tsp_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 159 if (!tsp_ep_info) { 160 WARN("No TSP provided by BL2 boot loader, Booting device" 161 " without TSP initialization. SMC`s destined for TSP" 162 " will return SMC_UNK\n"); 163 return 1; 164 } 165 166 /* 167 * If there's no valid entry point for SP, we return a non-zero value 168 * signalling failure initializing the service. We bail out without 169 * registering any handlers 170 */ 171 if (!tsp_ep_info->pc) 172 return 1; 173 174 /* 175 * We could inspect the SP image and determine it's execution 176 * state i.e whether AArch32 or AArch64. Assuming it's AArch64 177 * for the time being. 178 */ 179 tspd_init_tsp_ep_state(tsp_ep_info, 180 TSP_AARCH64, 181 tsp_ep_info->pc, 182 &tspd_sp_context[linear_id]); 183 184 #if TSP_INIT_ASYNC 185 bl31_set_next_image_type(SECURE); 186 #else 187 /* 188 * All TSPD initialization done. Now register our init function with 189 * BL31 for deferred invocation 190 */ 191 bl31_register_bl32_init(&tspd_init); 192 #endif 193 return 0; 194 } 195 196 /******************************************************************************* 197 * This function passes control to the Secure Payload image (BL32) for the first 198 * time on the primary cpu after a cold boot. It assumes that a valid secure 199 * context has already been created by tspd_setup() which can be directly used. 200 * It also assumes that a valid non-secure context has been initialised by PSCI 201 * so it does not need to save and restore any non-secure state. This function 202 * performs a synchronous entry into the Secure payload. The SP passes control 203 * back to this routine through a SMC. 204 ******************************************************************************/ 205 int32_t tspd_init(void) 206 { 207 uint64_t mpidr = read_mpidr(); 208 uint32_t linear_id = platform_get_core_pos(mpidr); 209 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 210 entry_point_info_t *tsp_entry_point; 211 uint64_t rc; 212 213 /* 214 * Get information about the Secure Payload (BL32) image. Its 215 * absence is a critical failure. 216 */ 217 tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE); 218 assert(tsp_entry_point); 219 220 cm_init_context(mpidr, tsp_entry_point); 221 222 /* 223 * Arrange for an entry into the test secure payload. It will be 224 * returned via TSP_ENTRY_DONE case 225 */ 226 rc = tspd_synchronous_sp_entry(tsp_ctx); 227 assert(rc != 0); 228 229 return rc; 230 } 231 232 233 /******************************************************************************* 234 * This function is responsible for handling all SMCs in the Trusted OS/App 235 * range from the non-secure state as defined in the SMC Calling Convention 236 * Document. It is also responsible for communicating with the Secure payload 237 * to delegate work and return results back to the non-secure state. Lastly it 238 * will also return any information that the secure payload needs to do the 239 * work assigned to it. 240 ******************************************************************************/ 241 uint64_t tspd_smc_handler(uint32_t smc_fid, 242 uint64_t x1, 243 uint64_t x2, 244 uint64_t x3, 245 uint64_t x4, 246 void *cookie, 247 void *handle, 248 uint64_t flags) 249 { 250 cpu_context_t *ns_cpu_context; 251 unsigned long mpidr = read_mpidr(); 252 uint32_t linear_id = platform_get_core_pos(mpidr), ns; 253 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 254 uint64_t rc; 255 #if TSP_INIT_ASYNC 256 entry_point_info_t *next_image_info; 257 #endif 258 259 /* Determine which security state this SMC originated from */ 260 ns = is_caller_non_secure(flags); 261 262 switch (smc_fid) { 263 264 /* 265 * This function ID is used by TSP to indicate that it was 266 * preempted by a normal world IRQ. 267 * 268 */ 269 case TSP_PREEMPTED: 270 if (ns) 271 SMC_RET1(handle, SMC_UNK); 272 273 assert(handle == cm_get_context(SECURE)); 274 cm_el1_sysregs_context_save(SECURE); 275 /* Get a reference to the non-secure context */ 276 ns_cpu_context = cm_get_context(NON_SECURE); 277 assert(ns_cpu_context); 278 279 /* 280 * Restore non-secure state. There is no need to save the 281 * secure system register context since the TSP was supposed 282 * to preserve it during S-EL1 interrupt handling. 283 */ 284 cm_el1_sysregs_context_restore(NON_SECURE); 285 cm_set_next_eret_context(NON_SECURE); 286 287 SMC_RET1(ns_cpu_context, SMC_PREEMPTED); 288 289 /* 290 * This function ID is used only by the TSP to indicate that it has 291 * finished handling a S-EL1 FIQ interrupt. Execution should resume 292 * in the normal world. 293 */ 294 case TSP_HANDLED_S_EL1_FIQ: 295 if (ns) 296 SMC_RET1(handle, SMC_UNK); 297 298 assert(handle == cm_get_context(SECURE)); 299 300 /* 301 * Restore the relevant EL3 state which saved to service 302 * this SMC. 303 */ 304 if (get_std_smc_active_flag(tsp_ctx->state)) { 305 SMC_SET_EL3(&tsp_ctx->cpu_ctx, 306 CTX_SPSR_EL3, 307 tsp_ctx->saved_spsr_el3); 308 SMC_SET_EL3(&tsp_ctx->cpu_ctx, 309 CTX_ELR_EL3, 310 tsp_ctx->saved_elr_el3); 311 } 312 313 /* Get a reference to the non-secure context */ 314 ns_cpu_context = cm_get_context(NON_SECURE); 315 assert(ns_cpu_context); 316 317 /* 318 * Restore non-secure state. There is no need to save the 319 * secure system register context since the TSP was supposed 320 * to preserve it during S-EL1 interrupt handling. 321 */ 322 cm_el1_sysregs_context_restore(NON_SECURE); 323 cm_set_next_eret_context(NON_SECURE); 324 325 SMC_RET0((uint64_t) ns_cpu_context); 326 327 328 /* 329 * This function ID is used only by the TSP to indicate that it was 330 * interrupted due to a EL3 FIQ interrupt. Execution should resume 331 * in the normal world. 332 */ 333 case TSP_EL3_FIQ: 334 if (ns) 335 SMC_RET1(handle, SMC_UNK); 336 337 assert(handle == cm_get_context(SECURE)); 338 339 /* Assert that standard SMC execution has been preempted */ 340 assert(get_std_smc_active_flag(tsp_ctx->state)); 341 342 /* Save the secure system register state */ 343 cm_el1_sysregs_context_save(SECURE); 344 345 /* Get a reference to the non-secure context */ 346 ns_cpu_context = cm_get_context(NON_SECURE); 347 assert(ns_cpu_context); 348 349 /* Restore non-secure state */ 350 cm_el1_sysregs_context_restore(NON_SECURE); 351 cm_set_next_eret_context(NON_SECURE); 352 353 SMC_RET1(ns_cpu_context, TSP_EL3_FIQ); 354 355 356 /* 357 * This function ID is used only by the SP to indicate it has 358 * finished initialising itself after a cold boot 359 */ 360 case TSP_ENTRY_DONE: 361 if (ns) 362 SMC_RET1(handle, SMC_UNK); 363 364 /* 365 * Stash the SP entry points information. This is done 366 * only once on the primary cpu 367 */ 368 assert(tsp_vectors == NULL); 369 tsp_vectors = (tsp_vectors_t *) x1; 370 371 if (tsp_vectors) { 372 set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); 373 374 /* 375 * TSP has been successfully initialized. Register power 376 * managemnt hooks with PSCI 377 */ 378 psci_register_spd_pm_hook(&tspd_pm); 379 380 /* 381 * Register an interrupt handler for S-EL1 interrupts 382 * when generated during code executing in the 383 * non-secure state. 384 */ 385 flags = 0; 386 set_interrupt_rm_flag(flags, NON_SECURE); 387 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 388 tspd_sel1_interrupt_handler, 389 flags); 390 if (rc) 391 panic(); 392 } 393 394 395 #if TSP_INIT_ASYNC 396 /* Save the Secure EL1 system register context */ 397 assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx); 398 cm_el1_sysregs_context_save(SECURE); 399 400 /* Program EL3 registers to enable entry into the next EL */ 401 next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE); 402 assert(next_image_info); 403 assert(NON_SECURE == 404 GET_SECURITY_STATE(next_image_info->h.attr)); 405 406 cm_init_context(read_mpidr_el1(), next_image_info); 407 cm_prepare_el3_exit(NON_SECURE); 408 SMC_RET0(cm_get_context(NON_SECURE)); 409 #else 410 /* 411 * SP reports completion. The SPD must have initiated 412 * the original request through a synchronous entry 413 * into the SP. Jump back to the original C runtime 414 * context. 415 */ 416 tspd_synchronous_sp_exit(tsp_ctx, x1); 417 #endif 418 419 /* 420 * These function IDs is used only by the SP to indicate it has 421 * finished: 422 * 1. turning itself on in response to an earlier psci 423 * cpu_on request 424 * 2. resuming itself after an earlier psci cpu_suspend 425 * request. 426 */ 427 case TSP_ON_DONE: 428 case TSP_RESUME_DONE: 429 430 /* 431 * These function IDs is used only by the SP to indicate it has 432 * finished: 433 * 1. suspending itself after an earlier psci cpu_suspend 434 * request. 435 * 2. turning itself off in response to an earlier psci 436 * cpu_off request. 437 */ 438 case TSP_OFF_DONE: 439 case TSP_SUSPEND_DONE: 440 case TSP_SYSTEM_OFF_DONE: 441 case TSP_SYSTEM_RESET_DONE: 442 if (ns) 443 SMC_RET1(handle, SMC_UNK); 444 445 /* 446 * SP reports completion. The SPD must have initiated the 447 * original request through a synchronous entry into the SP. 448 * Jump back to the original C runtime context, and pass x1 as 449 * return value to the caller 450 */ 451 tspd_synchronous_sp_exit(tsp_ctx, x1); 452 453 /* 454 * Request from non-secure client to perform an 455 * arithmetic operation or response from secure 456 * payload to an earlier request. 457 */ 458 case TSP_FAST_FID(TSP_ADD): 459 case TSP_FAST_FID(TSP_SUB): 460 case TSP_FAST_FID(TSP_MUL): 461 case TSP_FAST_FID(TSP_DIV): 462 463 case TSP_STD_FID(TSP_ADD): 464 case TSP_STD_FID(TSP_SUB): 465 case TSP_STD_FID(TSP_MUL): 466 case TSP_STD_FID(TSP_DIV): 467 if (ns) { 468 /* 469 * This is a fresh request from the non-secure client. 470 * The parameters are in x1 and x2. Figure out which 471 * registers need to be preserved, save the non-secure 472 * state and send the request to the secure payload. 473 */ 474 assert(handle == cm_get_context(NON_SECURE)); 475 476 /* Check if we are already preempted */ 477 if (get_std_smc_active_flag(tsp_ctx->state)) 478 SMC_RET1(handle, SMC_UNK); 479 480 cm_el1_sysregs_context_save(NON_SECURE); 481 482 /* Save x1 and x2 for use by TSP_GET_ARGS call below */ 483 store_tsp_args(tsp_ctx, x1, x2); 484 485 /* 486 * We are done stashing the non-secure context. Ask the 487 * secure payload to do the work now. 488 */ 489 490 /* 491 * Verify if there is a valid context to use, copy the 492 * operation type and parameters to the secure context 493 * and jump to the fast smc entry point in the secure 494 * payload. Entry into S-EL1 will take place upon exit 495 * from this function. 496 */ 497 assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); 498 499 /* Set appropriate entry for SMC. 500 * We expect the TSP to manage the PSTATE.I and PSTATE.F 501 * flags as appropriate. 502 */ 503 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { 504 cm_set_elr_el3(SECURE, (uint64_t) 505 &tsp_vectors->fast_smc_entry); 506 } else { 507 set_std_smc_active_flag(tsp_ctx->state); 508 cm_set_elr_el3(SECURE, (uint64_t) 509 &tsp_vectors->std_smc_entry); 510 } 511 512 cm_el1_sysregs_context_restore(SECURE); 513 cm_set_next_eret_context(SECURE); 514 SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2); 515 } else { 516 /* 517 * This is the result from the secure client of an 518 * earlier request. The results are in x1-x3. Copy it 519 * into the non-secure context, save the secure state 520 * and return to the non-secure state. 521 */ 522 assert(handle == cm_get_context(SECURE)); 523 cm_el1_sysregs_context_save(SECURE); 524 525 /* Get a reference to the non-secure context */ 526 ns_cpu_context = cm_get_context(NON_SECURE); 527 assert(ns_cpu_context); 528 529 /* Restore non-secure state */ 530 cm_el1_sysregs_context_restore(NON_SECURE); 531 cm_set_next_eret_context(NON_SECURE); 532 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_STD) 533 clr_std_smc_active_flag(tsp_ctx->state); 534 SMC_RET3(ns_cpu_context, x1, x2, x3); 535 } 536 537 break; 538 539 /* 540 * Request from non secure world to resume the preempted 541 * Standard SMC call. 542 */ 543 case TSP_FID_RESUME: 544 /* RESUME should be invoked only by normal world */ 545 if (!ns) { 546 assert(0); 547 break; 548 } 549 550 /* 551 * This is a resume request from the non-secure client. 552 * save the non-secure state and send the request to 553 * the secure payload. 554 */ 555 assert(handle == cm_get_context(NON_SECURE)); 556 557 /* Check if we are already preempted before resume */ 558 if (!get_std_smc_active_flag(tsp_ctx->state)) 559 SMC_RET1(handle, SMC_UNK); 560 561 cm_el1_sysregs_context_save(NON_SECURE); 562 563 /* 564 * We are done stashing the non-secure context. Ask the 565 * secure payload to do the work now. 566 */ 567 568 /* We just need to return to the preempted point in 569 * TSP and the execution will resume as normal. 570 */ 571 cm_el1_sysregs_context_restore(SECURE); 572 cm_set_next_eret_context(SECURE); 573 SMC_RET0(&tsp_ctx->cpu_ctx); 574 575 /* 576 * This is a request from the secure payload for more arguments 577 * for an ongoing arithmetic operation requested by the 578 * non-secure world. Simply return the arguments from the non- 579 * secure client in the original call. 580 */ 581 case TSP_GET_ARGS: 582 if (ns) 583 SMC_RET1(handle, SMC_UNK); 584 585 get_tsp_args(tsp_ctx, x1, x2); 586 SMC_RET2(handle, x1, x2); 587 588 case TOS_CALL_COUNT: 589 /* 590 * Return the number of service function IDs implemented to 591 * provide service to non-secure 592 */ 593 SMC_RET1(handle, TSP_NUM_FID); 594 595 case TOS_UID: 596 /* Return TSP UID to the caller */ 597 SMC_UUID_RET(handle, tsp_uuid); 598 599 case TOS_CALL_VERSION: 600 /* Return the version of current implementation */ 601 SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR); 602 603 default: 604 break; 605 } 606 607 SMC_RET1(handle, SMC_UNK); 608 } 609 610 /* Define a SPD runtime service descriptor for fast SMC calls */ 611 DECLARE_RT_SVC( 612 tspd_fast, 613 614 OEN_TOS_START, 615 OEN_TOS_END, 616 SMC_TYPE_FAST, 617 tspd_setup, 618 tspd_smc_handler 619 ); 620 621 /* Define a SPD runtime service descriptor for standard SMC calls */ 622 DECLARE_RT_SVC( 623 tspd_std, 624 625 OEN_TOS_START, 626 OEN_TOS_END, 627 SMC_TYPE_STD, 628 NULL, 629 tspd_smc_handler 630 ); 631