1 /* 2 * Copyright (c) 2013-2024, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 8 /******************************************************************************* 9 * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a 10 * plug-in component to the Secure Monitor, registered as a runtime service. The 11 * SPD is expected to be a functional extension of the Secure Payload (SP) that 12 * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting 13 * the Trusted OS/Applications range to the dispatcher. The SPD will either 14 * handle the request locally or delegate it to the Secure Payload. It is also 15 * responsible for initialising and maintaining communication with the SP. 16 ******************************************************************************/ 17 #include <assert.h> 18 #include <errno.h> 19 #include <stddef.h> 20 #include <string.h> 21 22 #include <arch_helpers.h> 23 #include <bl31/bl31.h> 24 #include <bl31/ehf.h> 25 #include <bl32/tsp/tsp.h> 26 #include <common/bl_common.h> 27 #include <common/debug.h> 28 #include <common/runtime_svc.h> 29 #include <lib/el3_runtime/context_mgmt.h> 30 #include <plat/common/platform.h> 31 #include <tools_share/uuid.h> 32 33 #include "tspd_private.h" 34 35 /******************************************************************************* 36 * Address of the entrypoint vector table in the Secure Payload. It is 37 * initialised once on the primary core after a cold boot. 38 ******************************************************************************/ 39 tsp_vectors_t *tsp_vectors; 40 41 /******************************************************************************* 42 * Array to keep track of per-cpu Secure Payload state 43 ******************************************************************************/ 44 tsp_context_t tspd_sp_context[TSPD_CORE_COUNT]; 45 46 47 /* TSP UID */ 48 DEFINE_SVC_UUID2(tsp_uuid, 49 0xa056305b, 0x9132, 0x7b42, 0x98, 0x11, 50 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa); 51 52 int32_t tspd_init(void); 53 54 /* 55 * This helper function handles Secure EL1 preemption. The preemption could be 56 * due Non Secure interrupts or EL3 interrupts. In both the cases we context 57 * switch to the normal world and in case of EL3 interrupts, it will again be 58 * routed to EL3 which will get handled at the exception vectors. 59 */ 60 uint64_t tspd_handle_sp_preemption(void *handle) 61 { 62 cpu_context_t *ns_cpu_context; 63 64 assert(handle == cm_get_context(SECURE)); 65 cm_el1_sysregs_context_save(SECURE); 66 /* Get a reference to the non-secure context */ 67 ns_cpu_context = cm_get_context(NON_SECURE); 68 assert(ns_cpu_context); 69 70 /* 71 * To allow Secure EL1 interrupt handler to re-enter TSP while TSP 72 * is preempted, the secure system register context which will get 73 * overwritten must be additionally saved. This is currently done 74 * by the TSPD S-EL1 interrupt handler. 75 */ 76 77 /* 78 * Restore non-secure state. 79 */ 80 cm_el1_sysregs_context_restore(NON_SECURE); 81 cm_set_next_eret_context(NON_SECURE); 82 83 /* 84 * The TSP was preempted during execution of a Yielding SMC Call. 85 * Return back to the normal world with SMC_PREEMPTED as error 86 * code in x0. 87 */ 88 SMC_RET1(ns_cpu_context, SMC_PREEMPTED); 89 } 90 91 /******************************************************************************* 92 * This function is the handler registered for S-EL1 interrupts by the TSPD. It 93 * validates the interrupt and upon success arranges entry into the TSP at 94 * 'tsp_sel1_intr_entry()' for handling the interrupt. 95 * Typically, interrupts for a specific security state get handled in the same 96 * security execption level if the execution is in the same security state. For 97 * example, if a non-secure interrupt gets fired when CPU is executing in NS-EL2 98 * it gets handled in the non-secure world. 99 * However, interrupts belonging to the opposite security state typically demand 100 * a world(context) switch. This is inline with the security principle which 101 * states a secure interrupt has to be handled in the secure world. 102 * Hence, the TSPD in EL3 expects the context(handle) for a secure interrupt to 103 * be non-secure and vice versa. 104 * However, a race condition between non-secure and secure interrupts can lead to 105 * a scenario where the above assumptions do not hold true. This is demonstrated 106 * below through Note 1. 107 ******************************************************************************/ 108 static uint64_t tspd_sel1_interrupt_handler(uint32_t id, 109 uint32_t flags, 110 void *handle, 111 void *cookie) 112 { 113 uint32_t linear_id; 114 tsp_context_t *tsp_ctx; 115 116 /* Get a reference to this cpu's TSP context */ 117 linear_id = plat_my_core_pos(); 118 tsp_ctx = &tspd_sp_context[linear_id]; 119 120 #if TSP_NS_INTR_ASYNC_PREEMPT 121 122 /* 123 * Note 1: 124 * Under the current interrupt routing model, interrupts from other 125 * world are routed to EL3 when TSP_NS_INTR_ASYNC_PREEMPT is enabled. 126 * Consider the following scenario: 127 * 1/ A non-secure payload(like tftf) requests a secure service from 128 * TSP by invoking a yielding SMC call. 129 * 2/ Later, execution jumps to TSP in S-EL1 with the help of TSP 130 * Dispatcher in Secure Monitor(EL3). 131 * 3/ While CPU is executing TSP, a Non-secure interrupt gets fired. 132 * this demands a context switch to the non-secure world through 133 * secure monitor. 134 * 4/ Consequently, TSP in S-EL1 get asynchronously pre-empted and 135 * execution switches to secure monitor(EL3). 136 * 5/ EL3 tries to triage the (Non-secure) interrupt based on the 137 * highest pending interrupt. 138 * 6/ However, while the NS Interrupt was pending, secure timer gets 139 * fired which makes a S-EL1 interrupt to be pending. 140 * 7/ Hence, execution jumps to this companion handler of S-EL1 141 * interrupt (i.e., tspd_sel1_interrupt_handler) even though the TSP 142 * was pre-empted due to non-secure interrupt. 143 * 8/ The above sequence of events explain how TSP was pre-empted by 144 * S-EL1 interrupt indirectly in an asynchronous way. 145 * 9/ Hence, we track the TSP pre-emption by S-EL1 interrupt using a 146 * boolean variable per each core. 147 * 10/ This helps us to indicate that SMC call for TSP service was 148 * pre-empted when execution resumes in non-secure world. 149 */ 150 151 /* Check the security state when the exception was generated */ 152 if (get_interrupt_src_ss(flags) == NON_SECURE) { 153 /* Sanity check the pointer to this cpu's context */ 154 assert(handle == cm_get_context(NON_SECURE)); 155 156 /* Save the non-secure context before entering the TSP */ 157 cm_el1_sysregs_context_save(NON_SECURE); 158 tsp_ctx->preempted_by_sel1_intr = false; 159 } else { 160 /* Sanity check the pointer to this cpu's context */ 161 assert(handle == cm_get_context(SECURE)); 162 163 /* Save the secure context before entering the TSP for S-EL1 164 * interrupt handling 165 */ 166 cm_el1_sysregs_context_save(SECURE); 167 tsp_ctx->preempted_by_sel1_intr = true; 168 } 169 #else 170 /* Check the security state when the exception was generated */ 171 assert(get_interrupt_src_ss(flags) == NON_SECURE); 172 173 /* Sanity check the pointer to this cpu's context */ 174 assert(handle == cm_get_context(NON_SECURE)); 175 176 /* Save the non-secure context before entering the TSP */ 177 cm_el1_sysregs_context_save(NON_SECURE); 178 #endif 179 180 assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); 181 182 /* 183 * Determine if the TSP was previously preempted. Its last known 184 * context has to be preserved in this case. 185 * The TSP should return control to the TSPD after handling this 186 * S-EL1 interrupt. Preserve essential EL3 context to allow entry into 187 * the TSP at the S-EL1 interrupt entry point using the 'cpu_context' 188 * structure. There is no need to save the secure system register 189 * context since the TSP is supposed to preserve it during S-EL1 190 * interrupt handling. 191 */ 192 if (get_yield_smc_active_flag(tsp_ctx->state)) { 193 tsp_ctx->saved_spsr_el3 = (uint32_t)SMC_GET_EL3(&tsp_ctx->cpu_ctx, 194 CTX_SPSR_EL3); 195 tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, 196 CTX_ELR_EL3); 197 #if TSP_NS_INTR_ASYNC_PREEMPT 198 memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE); 199 #endif 200 } 201 202 cm_el1_sysregs_context_restore(SECURE); 203 cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry, 204 SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 205 206 cm_set_next_eret_context(SECURE); 207 208 /* 209 * Tell the TSP that it has to handle a S-EL1 interrupt synchronously. 210 * Also the instruction in normal world where the interrupt was 211 * generated is passed for debugging purposes. It is safe to retrieve 212 * this address from ELR_EL3 as the secure context will not take effect 213 * until el3_exit(). 214 */ 215 SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3()); 216 } 217 218 #if TSP_NS_INTR_ASYNC_PREEMPT 219 /******************************************************************************* 220 * This function is the handler registered for Non secure interrupts by the 221 * TSPD. It validates the interrupt and upon success arranges entry into the 222 * normal world for handling the interrupt. 223 ******************************************************************************/ 224 static uint64_t tspd_ns_interrupt_handler(uint32_t id, 225 uint32_t flags, 226 void *handle, 227 void *cookie) 228 { 229 /* Check the security state when the exception was generated */ 230 assert(get_interrupt_src_ss(flags) == SECURE); 231 232 /* 233 * Disable the routing of NS interrupts from secure world to EL3 while 234 * interrupted on this core. 235 */ 236 disable_intr_rm_local(INTR_TYPE_NS, SECURE); 237 238 return tspd_handle_sp_preemption(handle); 239 } 240 #endif 241 242 /******************************************************************************* 243 * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type 244 * (aarch32/aarch64) if not already known and initialises the context for entry 245 * into the SP for its initialisation. 246 ******************************************************************************/ 247 static int32_t tspd_setup(void) 248 { 249 entry_point_info_t *tsp_ep_info; 250 uint32_t linear_id; 251 252 linear_id = plat_my_core_pos(); 253 254 /* 255 * Get information about the Secure Payload (BL32) image. Its 256 * absence is a critical failure. TODO: Add support to 257 * conditionally include the SPD service 258 */ 259 tsp_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 260 if (!tsp_ep_info) { 261 WARN("No TSP provided by BL2 boot loader, Booting device" 262 " without TSP initialization. SMC`s destined for TSP" 263 " will return SMC_UNK\n"); 264 return 1; 265 } 266 267 /* 268 * If there's no valid entry point for SP, we return a non-zero value 269 * signalling failure initializing the service. We bail out without 270 * registering any handlers 271 */ 272 if (!tsp_ep_info->pc) 273 return 1; 274 275 /* 276 * We could inspect the SP image and determine its execution 277 * state i.e whether AArch32 or AArch64. Assuming it's AArch64 278 * for the time being. 279 */ 280 tspd_init_tsp_ep_state(tsp_ep_info, 281 TSP_AARCH64, 282 tsp_ep_info->pc, 283 &tspd_sp_context[linear_id]); 284 285 #if TSP_INIT_ASYNC 286 bl31_set_next_image_type(SECURE); 287 #else 288 /* 289 * All TSPD initialization done. Now register our init function with 290 * BL31 for deferred invocation 291 */ 292 bl31_register_bl32_init(&tspd_init); 293 #endif 294 return 0; 295 } 296 297 /******************************************************************************* 298 * This function passes control to the Secure Payload image (BL32) for the first 299 * time on the primary cpu after a cold boot. It assumes that a valid secure 300 * context has already been created by tspd_setup() which can be directly used. 301 * It also assumes that a valid non-secure context has been initialised by PSCI 302 * so it does not need to save and restore any non-secure state. This function 303 * performs a synchronous entry into the Secure payload. The SP passes control 304 * back to this routine through a SMC. 305 ******************************************************************************/ 306 int32_t tspd_init(void) 307 { 308 uint32_t linear_id = plat_my_core_pos(); 309 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 310 entry_point_info_t *tsp_entry_point; 311 uint64_t rc; 312 313 /* 314 * Get information about the Secure Payload (BL32) image. Its 315 * absence is a critical failure. 316 */ 317 tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE); 318 assert(tsp_entry_point); 319 320 cm_init_my_context(tsp_entry_point); 321 322 /* 323 * Arrange for an entry into the test secure payload. It will be 324 * returned via TSP_ENTRY_DONE case 325 */ 326 rc = tspd_synchronous_sp_entry(tsp_ctx); 327 assert(rc != 0); 328 329 return rc; 330 } 331 332 333 /******************************************************************************* 334 * This function is responsible for handling all SMCs in the Trusted OS/App 335 * range from the non-secure state as defined in the SMC Calling Convention 336 * Document. It is also responsible for communicating with the Secure payload 337 * to delegate work and return results back to the non-secure state. Lastly it 338 * will also return any information that the secure payload needs to do the 339 * work assigned to it. 340 ******************************************************************************/ 341 static uintptr_t tspd_smc_handler(uint32_t smc_fid, 342 u_register_t x1, 343 u_register_t x2, 344 u_register_t x3, 345 u_register_t x4, 346 void *cookie, 347 void *handle, 348 u_register_t flags) 349 { 350 cpu_context_t *ns_cpu_context; 351 uint32_t linear_id = plat_my_core_pos(), ns; 352 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 353 uint64_t rc; 354 #if TSP_INIT_ASYNC 355 entry_point_info_t *next_image_info; 356 #endif 357 358 /* Determine which security state this SMC originated from */ 359 ns = is_caller_non_secure(flags); 360 361 switch (smc_fid) { 362 363 /* 364 * This function ID is used by TSP to indicate that it was 365 * preempted by a normal world IRQ. 366 * 367 */ 368 case TSP_PREEMPTED: 369 if (ns) 370 SMC_RET1(handle, SMC_UNK); 371 372 return tspd_handle_sp_preemption(handle); 373 374 /* 375 * This function ID is used only by the TSP to indicate that it has 376 * finished handling a S-EL1 interrupt or was preempted by a higher 377 * priority pending EL3 interrupt. Execution should resume 378 * in the normal world. 379 */ 380 case TSP_HANDLED_S_EL1_INTR: 381 if (ns) 382 SMC_RET1(handle, SMC_UNK); 383 384 assert(handle == cm_get_context(SECURE)); 385 386 /* 387 * Restore the relevant EL3 state which saved to service 388 * this SMC. 389 */ 390 if (get_yield_smc_active_flag(tsp_ctx->state)) { 391 SMC_SET_EL3(&tsp_ctx->cpu_ctx, 392 CTX_SPSR_EL3, 393 tsp_ctx->saved_spsr_el3); 394 SMC_SET_EL3(&tsp_ctx->cpu_ctx, 395 CTX_ELR_EL3, 396 tsp_ctx->saved_elr_el3); 397 #if TSP_NS_INTR_ASYNC_PREEMPT 398 /* 399 * Need to restore the previously interrupted 400 * secure context. 401 */ 402 memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx, 403 TSPD_SP_CTX_SIZE); 404 #endif 405 } 406 407 /* Get a reference to the non-secure context */ 408 ns_cpu_context = cm_get_context(NON_SECURE); 409 assert(ns_cpu_context); 410 411 /* 412 * Restore non-secure state. There is no need to save the 413 * secure system register context since the TSP was supposed 414 * to preserve it during S-EL1 interrupt handling. 415 */ 416 cm_el1_sysregs_context_restore(NON_SECURE); 417 cm_set_next_eret_context(NON_SECURE); 418 419 /* Refer to Note 1 in function tspd_sel1_interrupt_handler()*/ 420 #if TSP_NS_INTR_ASYNC_PREEMPT 421 if (tsp_ctx->preempted_by_sel1_intr) { 422 /* Reset the flag */ 423 tsp_ctx->preempted_by_sel1_intr = false; 424 425 SMC_RET1(ns_cpu_context, SMC_PREEMPTED); 426 } else { 427 SMC_RET0((uint64_t) ns_cpu_context); 428 } 429 #else 430 SMC_RET0((uint64_t) ns_cpu_context); 431 #endif 432 433 434 /* 435 * This function ID is used only by the SP to indicate it has 436 * finished initialising itself after a cold boot 437 */ 438 case TSP_ENTRY_DONE: 439 if (ns) 440 SMC_RET1(handle, SMC_UNK); 441 442 /* 443 * Stash the SP entry points information. This is done 444 * only once on the primary cpu 445 */ 446 assert(tsp_vectors == NULL); 447 tsp_vectors = (tsp_vectors_t *) x1; 448 449 if (tsp_vectors) { 450 set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); 451 452 /* 453 * TSP has been successfully initialized. Register power 454 * management hooks with PSCI 455 */ 456 psci_register_spd_pm_hook(&tspd_pm); 457 458 /* 459 * Register an interrupt handler for S-EL1 interrupts 460 * when generated during code executing in the 461 * non-secure state. 462 */ 463 flags = 0; 464 set_interrupt_rm_flag(flags, NON_SECURE); 465 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 466 tspd_sel1_interrupt_handler, 467 flags); 468 if (rc) 469 panic(); 470 471 #if TSP_NS_INTR_ASYNC_PREEMPT 472 /* 473 * Register an interrupt handler for NS interrupts when 474 * generated during code executing in secure state are 475 * routed to EL3. 476 */ 477 flags = 0; 478 set_interrupt_rm_flag(flags, SECURE); 479 480 rc = register_interrupt_type_handler(INTR_TYPE_NS, 481 tspd_ns_interrupt_handler, 482 flags); 483 if (rc) 484 panic(); 485 486 /* 487 * Disable the NS interrupt locally. 488 */ 489 disable_intr_rm_local(INTR_TYPE_NS, SECURE); 490 #endif 491 } 492 493 494 #if TSP_INIT_ASYNC 495 /* Save the Secure EL1 system register context */ 496 assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx); 497 cm_el1_sysregs_context_save(SECURE); 498 499 /* Program EL3 registers to enable entry into the next EL */ 500 next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE); 501 assert(next_image_info); 502 assert(NON_SECURE == 503 GET_SECURITY_STATE(next_image_info->h.attr)); 504 505 cm_init_my_context(next_image_info); 506 cm_prepare_el3_exit(NON_SECURE); 507 SMC_RET0(cm_get_context(NON_SECURE)); 508 #else 509 /* 510 * SP reports completion. The SPD must have initiated 511 * the original request through a synchronous entry 512 * into the SP. Jump back to the original C runtime 513 * context. 514 */ 515 tspd_synchronous_sp_exit(tsp_ctx, x1); 516 break; 517 #endif 518 /* 519 * This function ID is used only by the SP to indicate it has finished 520 * aborting a preempted Yielding SMC Call. 521 */ 522 case TSP_ABORT_DONE: 523 524 /* 525 * These function IDs are used only by the SP to indicate it has 526 * finished: 527 * 1. turning itself on in response to an earlier psci 528 * cpu_on request 529 * 2. resuming itself after an earlier psci cpu_suspend 530 * request. 531 */ 532 case TSP_ON_DONE: 533 case TSP_RESUME_DONE: 534 535 /* 536 * These function IDs are used only by the SP to indicate it has 537 * finished: 538 * 1. suspending itself after an earlier psci cpu_suspend 539 * request. 540 * 2. turning itself off in response to an earlier psci 541 * cpu_off request. 542 */ 543 case TSP_OFF_DONE: 544 case TSP_SUSPEND_DONE: 545 case TSP_SYSTEM_OFF_DONE: 546 case TSP_SYSTEM_RESET_DONE: 547 if (ns) 548 SMC_RET1(handle, SMC_UNK); 549 550 /* 551 * SP reports completion. The SPD must have initiated the 552 * original request through a synchronous entry into the SP. 553 * Jump back to the original C runtime context, and pass x1 as 554 * return value to the caller 555 */ 556 tspd_synchronous_sp_exit(tsp_ctx, x1); 557 break; 558 559 /* 560 * Request from non-secure client to perform an 561 * arithmetic operation or response from secure 562 * payload to an earlier request. 563 */ 564 case TSP_FAST_FID(TSP_ADD): 565 case TSP_FAST_FID(TSP_SUB): 566 case TSP_FAST_FID(TSP_MUL): 567 case TSP_FAST_FID(TSP_DIV): 568 569 case TSP_YIELD_FID(TSP_ADD): 570 case TSP_YIELD_FID(TSP_SUB): 571 case TSP_YIELD_FID(TSP_MUL): 572 case TSP_YIELD_FID(TSP_DIV): 573 /* 574 * Request from non-secure client to perform a check 575 * of the DIT PSTATE bit. 576 */ 577 case TSP_YIELD_FID(TSP_CHECK_DIT): 578 /* 579 * Request from non-secure client to modify the EL1 580 * context registers. 581 */ 582 case TSP_YIELD_FID(TSP_MODIFY_EL1_CTX): 583 if (ns) { 584 /* 585 * This is a fresh request from the non-secure client. 586 * The parameters are in x1 and x2. Figure out which 587 * registers need to be preserved, save the non-secure 588 * state and send the request to the secure payload. 589 */ 590 assert(handle == cm_get_context(NON_SECURE)); 591 592 /* Check if we are already preempted */ 593 if (get_yield_smc_active_flag(tsp_ctx->state)) 594 SMC_RET1(handle, SMC_UNK); 595 596 cm_el1_sysregs_context_save(NON_SECURE); 597 598 /* Save x1 and x2 for use by TSP_GET_ARGS call below */ 599 store_tsp_args(tsp_ctx, x1, x2); 600 601 /* 602 * We are done stashing the non-secure context. Ask the 603 * secure payload to do the work now. 604 */ 605 606 /* 607 * Verify if there is a valid context to use, copy the 608 * operation type and parameters to the secure context 609 * and jump to the fast smc entry point in the secure 610 * payload. Entry into S-EL1 will take place upon exit 611 * from this function. 612 */ 613 assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); 614 615 /* Set appropriate entry for SMC. 616 * We expect the TSP to manage the PSTATE.I and PSTATE.F 617 * flags as appropriate. 618 */ 619 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { 620 cm_set_elr_el3(SECURE, (uint64_t) 621 &tsp_vectors->fast_smc_entry); 622 } else { 623 set_yield_smc_active_flag(tsp_ctx->state); 624 cm_set_elr_el3(SECURE, (uint64_t) 625 &tsp_vectors->yield_smc_entry); 626 #if TSP_NS_INTR_ASYNC_PREEMPT 627 /* 628 * Enable the routing of NS interrupts to EL3 629 * during processing of a Yielding SMC Call on 630 * this core. 631 */ 632 enable_intr_rm_local(INTR_TYPE_NS, SECURE); 633 #endif 634 635 #if EL3_EXCEPTION_HANDLING 636 /* 637 * With EL3 exception handling, while an SMC is 638 * being processed, Non-secure interrupts can't 639 * preempt Secure execution. However, for 640 * yielding SMCs, we want preemption to happen; 641 * so explicitly allow NS preemption in this 642 * case, and supply the preemption return code 643 * for TSP. 644 */ 645 ehf_allow_ns_preemption(TSP_PREEMPTED); 646 #endif 647 } 648 649 cm_el1_sysregs_context_restore(SECURE); 650 cm_set_next_eret_context(SECURE); 651 SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2); 652 } else { 653 /* 654 * This is the result from the secure client of an 655 * earlier request. The results are in x1-x3. Copy it 656 * into the non-secure context, save the secure state 657 * and return to the non-secure state. 658 */ 659 assert(handle == cm_get_context(SECURE)); 660 cm_el1_sysregs_context_save(SECURE); 661 662 /* Get a reference to the non-secure context */ 663 ns_cpu_context = cm_get_context(NON_SECURE); 664 assert(ns_cpu_context); 665 666 /* Restore non-secure state */ 667 cm_el1_sysregs_context_restore(NON_SECURE); 668 cm_set_next_eret_context(NON_SECURE); 669 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) { 670 clr_yield_smc_active_flag(tsp_ctx->state); 671 #if TSP_NS_INTR_ASYNC_PREEMPT 672 /* 673 * Disable the routing of NS interrupts to EL3 674 * after processing of a Yielding SMC Call on 675 * this core is finished. 676 */ 677 disable_intr_rm_local(INTR_TYPE_NS, SECURE); 678 #endif 679 } 680 681 SMC_RET3(ns_cpu_context, x1, x2, x3); 682 } 683 assert(0); /* Unreachable */ 684 685 /* 686 * Request from the non-secure world to abort a preempted Yielding SMC 687 * Call. 688 */ 689 case TSP_FID_ABORT: 690 /* ABORT should only be invoked by normal world */ 691 if (!ns) { 692 assert(0); 693 break; 694 } 695 696 assert(handle == cm_get_context(NON_SECURE)); 697 cm_el1_sysregs_context_save(NON_SECURE); 698 699 /* Abort the preempted SMC request */ 700 if (!tspd_abort_preempted_smc(tsp_ctx)) { 701 /* 702 * If there was no preempted SMC to abort, return 703 * SMC_UNK. 704 * 705 * Restoring the NON_SECURE context is not necessary as 706 * the synchronous entry did not take place if the 707 * return code of tspd_abort_preempted_smc is zero. 708 */ 709 cm_set_next_eret_context(NON_SECURE); 710 break; 711 } 712 713 cm_el1_sysregs_context_restore(NON_SECURE); 714 cm_set_next_eret_context(NON_SECURE); 715 SMC_RET1(handle, SMC_OK); 716 717 /* 718 * Request from non secure world to resume the preempted 719 * Yielding SMC Call. 720 */ 721 case TSP_FID_RESUME: 722 /* RESUME should be invoked only by normal world */ 723 if (!ns) { 724 assert(0); 725 break; 726 } 727 728 /* 729 * This is a resume request from the non-secure client. 730 * save the non-secure state and send the request to 731 * the secure payload. 732 */ 733 assert(handle == cm_get_context(NON_SECURE)); 734 735 /* Check if we are already preempted before resume */ 736 if (!get_yield_smc_active_flag(tsp_ctx->state)) 737 SMC_RET1(handle, SMC_UNK); 738 739 cm_el1_sysregs_context_save(NON_SECURE); 740 741 /* 742 * We are done stashing the non-secure context. Ask the 743 * secure payload to do the work now. 744 */ 745 #if TSP_NS_INTR_ASYNC_PREEMPT 746 /* 747 * Enable the routing of NS interrupts to EL3 during resumption 748 * of a Yielding SMC Call on this core. 749 */ 750 enable_intr_rm_local(INTR_TYPE_NS, SECURE); 751 #endif 752 753 #if EL3_EXCEPTION_HANDLING 754 /* 755 * Allow the resumed yielding SMC processing to be preempted by 756 * Non-secure interrupts. Also, supply the preemption return 757 * code for TSP. 758 */ 759 ehf_allow_ns_preemption(TSP_PREEMPTED); 760 #endif 761 762 /* We just need to return to the preempted point in 763 * TSP and the execution will resume as normal. 764 */ 765 cm_el1_sysregs_context_restore(SECURE); 766 cm_set_next_eret_context(SECURE); 767 SMC_RET0(&tsp_ctx->cpu_ctx); 768 769 /* 770 * This is a request from the secure payload for more arguments 771 * for an ongoing arithmetic operation requested by the 772 * non-secure world. Simply return the arguments from the non- 773 * secure client in the original call. 774 */ 775 case TSP_GET_ARGS: 776 if (ns) 777 SMC_RET1(handle, SMC_UNK); 778 779 get_tsp_args(tsp_ctx, x1, x2); 780 SMC_RET2(handle, x1, x2); 781 782 case TOS_CALL_COUNT: 783 /* 784 * Return the number of service function IDs implemented to 785 * provide service to non-secure 786 */ 787 SMC_RET1(handle, TSP_NUM_FID); 788 789 case TOS_UID: 790 /* Return TSP UID to the caller */ 791 SMC_UUID_RET(handle, tsp_uuid); 792 793 case TOS_CALL_VERSION: 794 /* Return the version of current implementation */ 795 SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR); 796 797 default: 798 break; 799 } 800 801 SMC_RET1(handle, SMC_UNK); 802 } 803 804 /* Define a SPD runtime service descriptor for fast SMC calls */ 805 DECLARE_RT_SVC( 806 tspd_fast, 807 808 OEN_TOS_START, 809 OEN_TOS_END, 810 SMC_TYPE_FAST, 811 tspd_setup, 812 tspd_smc_handler 813 ); 814 815 /* Define a SPD runtime service descriptor for Yielding SMC Calls */ 816 DECLARE_RT_SVC( 817 tspd_std, 818 819 OEN_TOS_START, 820 OEN_TOS_END, 821 SMC_TYPE_YIELD, 822 NULL, 823 tspd_smc_handler 824 ); 825