1 /* 2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 8 /******************************************************************************* 9 * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a 10 * plug-in component to the Secure Monitor, registered as a runtime service. The 11 * SPD is expected to be a functional extension of the Secure Payload (SP) that 12 * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting 13 * the Trusted OS/Applications range to the dispatcher. The SPD will either 14 * handle the request locally or delegate it to the Secure Payload. It is also 15 * responsible for initialising and maintaining communication with the SP. 16 ******************************************************************************/ 17 #include <arch_helpers.h> 18 #include <assert.h> 19 #include <bl31.h> 20 #include <bl_common.h> 21 #include <context_mgmt.h> 22 #include <debug.h> 23 #include <ehf.h> 24 #include <errno.h> 25 #include <platform.h> 26 #include <runtime_svc.h> 27 #include <stddef.h> 28 #include <string.h> 29 #include <tsp.h> 30 #include <uuid.h> 31 #include "tspd_private.h" 32 33 /******************************************************************************* 34 * Address of the entrypoint vector table in the Secure Payload. It is 35 * initialised once on the primary core after a cold boot. 36 ******************************************************************************/ 37 tsp_vectors_t *tsp_vectors; 38 39 /******************************************************************************* 40 * Array to keep track of per-cpu Secure Payload state 41 ******************************************************************************/ 42 tsp_context_t tspd_sp_context[TSPD_CORE_COUNT]; 43 44 45 /* TSP UID */ 46 DEFINE_SVC_UUID(tsp_uuid, 47 0x5b3056a0, 0x3291, 0x427b, 0x98, 0x11, 48 0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa); 49 50 int32_t tspd_init(void); 51 52 /* 53 * This helper function handles Secure EL1 preemption. The preemption could be 54 * due Non Secure interrupts or EL3 interrupts. In both the cases we context 55 * switch to the normal world and in case of EL3 interrupts, it will again be 56 * routed to EL3 which will get handled at the exception vectors. 57 */ 58 uint64_t tspd_handle_sp_preemption(void *handle) 59 { 60 cpu_context_t *ns_cpu_context; 61 62 assert(handle == cm_get_context(SECURE)); 63 cm_el1_sysregs_context_save(SECURE); 64 /* Get a reference to the non-secure context */ 65 ns_cpu_context = cm_get_context(NON_SECURE); 66 assert(ns_cpu_context); 67 68 /* 69 * To allow Secure EL1 interrupt handler to re-enter TSP while TSP 70 * is preempted, the secure system register context which will get 71 * overwritten must be additionally saved. This is currently done 72 * by the TSPD S-EL1 interrupt handler. 73 */ 74 75 /* 76 * Restore non-secure state. 77 */ 78 cm_el1_sysregs_context_restore(NON_SECURE); 79 cm_set_next_eret_context(NON_SECURE); 80 81 /* 82 * The TSP was preempted during execution of a Yielding SMC Call. 83 * Return back to the normal world with SMC_PREEMPTED as error 84 * code in x0. 85 */ 86 SMC_RET1(ns_cpu_context, SMC_PREEMPTED); 87 } 88 89 /******************************************************************************* 90 * This function is the handler registered for S-EL1 interrupts by the TSPD. It 91 * validates the interrupt and upon success arranges entry into the TSP at 92 * 'tsp_sel1_intr_entry()' for handling the interrupt. 93 ******************************************************************************/ 94 static uint64_t tspd_sel1_interrupt_handler(uint32_t id, 95 uint32_t flags, 96 void *handle, 97 void *cookie) 98 { 99 uint32_t linear_id; 100 tsp_context_t *tsp_ctx; 101 102 /* Check the security state when the exception was generated */ 103 assert(get_interrupt_src_ss(flags) == NON_SECURE); 104 105 /* Sanity check the pointer to this cpu's context */ 106 assert(handle == cm_get_context(NON_SECURE)); 107 108 /* Save the non-secure context before entering the TSP */ 109 cm_el1_sysregs_context_save(NON_SECURE); 110 111 /* Get a reference to this cpu's TSP context */ 112 linear_id = plat_my_core_pos(); 113 tsp_ctx = &tspd_sp_context[linear_id]; 114 assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); 115 116 /* 117 * Determine if the TSP was previously preempted. Its last known 118 * context has to be preserved in this case. 119 * The TSP should return control to the TSPD after handling this 120 * S-EL1 interrupt. Preserve essential EL3 context to allow entry into 121 * the TSP at the S-EL1 interrupt entry point using the 'cpu_context' 122 * structure. There is no need to save the secure system register 123 * context since the TSP is supposed to preserve it during S-EL1 124 * interrupt handling. 125 */ 126 if (get_yield_smc_active_flag(tsp_ctx->state)) { 127 tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, 128 CTX_SPSR_EL3); 129 tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, 130 CTX_ELR_EL3); 131 #if TSP_NS_INTR_ASYNC_PREEMPT 132 /*Need to save the previously interrupted secure context */ 133 memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE); 134 #endif 135 } 136 137 cm_el1_sysregs_context_restore(SECURE); 138 cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry, 139 SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); 140 141 cm_set_next_eret_context(SECURE); 142 143 /* 144 * Tell the TSP that it has to handle a S-EL1 interrupt synchronously. 145 * Also the instruction in normal world where the interrupt was 146 * generated is passed for debugging purposes. It is safe to retrieve 147 * this address from ELR_EL3 as the secure context will not take effect 148 * until el3_exit(). 149 */ 150 SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3()); 151 } 152 153 #if TSP_NS_INTR_ASYNC_PREEMPT 154 /******************************************************************************* 155 * This function is the handler registered for Non secure interrupts by the 156 * TSPD. It validates the interrupt and upon success arranges entry into the 157 * normal world for handling the interrupt. 158 ******************************************************************************/ 159 static uint64_t tspd_ns_interrupt_handler(uint32_t id, 160 uint32_t flags, 161 void *handle, 162 void *cookie) 163 { 164 /* Check the security state when the exception was generated */ 165 assert(get_interrupt_src_ss(flags) == SECURE); 166 167 /* 168 * Disable the routing of NS interrupts from secure world to EL3 while 169 * interrupted on this core. 170 */ 171 disable_intr_rm_local(INTR_TYPE_NS, SECURE); 172 173 return tspd_handle_sp_preemption(handle); 174 } 175 #endif 176 177 /******************************************************************************* 178 * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type 179 * (aarch32/aarch64) if not already known and initialises the context for entry 180 * into the SP for its initialisation. 181 ******************************************************************************/ 182 static int32_t tspd_setup(void) 183 { 184 entry_point_info_t *tsp_ep_info; 185 uint32_t linear_id; 186 187 linear_id = plat_my_core_pos(); 188 189 /* 190 * Get information about the Secure Payload (BL32) image. Its 191 * absence is a critical failure. TODO: Add support to 192 * conditionally include the SPD service 193 */ 194 tsp_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 195 if (!tsp_ep_info) { 196 WARN("No TSP provided by BL2 boot loader, Booting device" 197 " without TSP initialization. SMC`s destined for TSP" 198 " will return SMC_UNK\n"); 199 return 1; 200 } 201 202 /* 203 * If there's no valid entry point for SP, we return a non-zero value 204 * signalling failure initializing the service. We bail out without 205 * registering any handlers 206 */ 207 if (!tsp_ep_info->pc) 208 return 1; 209 210 /* 211 * We could inspect the SP image and determine its execution 212 * state i.e whether AArch32 or AArch64. Assuming it's AArch64 213 * for the time being. 214 */ 215 tspd_init_tsp_ep_state(tsp_ep_info, 216 TSP_AARCH64, 217 tsp_ep_info->pc, 218 &tspd_sp_context[linear_id]); 219 220 #if TSP_INIT_ASYNC 221 bl31_set_next_image_type(SECURE); 222 #else 223 /* 224 * All TSPD initialization done. Now register our init function with 225 * BL31 for deferred invocation 226 */ 227 bl31_register_bl32_init(&tspd_init); 228 #endif 229 return 0; 230 } 231 232 /******************************************************************************* 233 * This function passes control to the Secure Payload image (BL32) for the first 234 * time on the primary cpu after a cold boot. It assumes that a valid secure 235 * context has already been created by tspd_setup() which can be directly used. 236 * It also assumes that a valid non-secure context has been initialised by PSCI 237 * so it does not need to save and restore any non-secure state. This function 238 * performs a synchronous entry into the Secure payload. The SP passes control 239 * back to this routine through a SMC. 240 ******************************************************************************/ 241 int32_t tspd_init(void) 242 { 243 uint32_t linear_id = plat_my_core_pos(); 244 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 245 entry_point_info_t *tsp_entry_point; 246 uint64_t rc; 247 248 /* 249 * Get information about the Secure Payload (BL32) image. Its 250 * absence is a critical failure. 251 */ 252 tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE); 253 assert(tsp_entry_point); 254 255 cm_init_my_context(tsp_entry_point); 256 257 /* 258 * Arrange for an entry into the test secure payload. It will be 259 * returned via TSP_ENTRY_DONE case 260 */ 261 rc = tspd_synchronous_sp_entry(tsp_ctx); 262 assert(rc != 0); 263 264 return rc; 265 } 266 267 268 /******************************************************************************* 269 * This function is responsible for handling all SMCs in the Trusted OS/App 270 * range from the non-secure state as defined in the SMC Calling Convention 271 * Document. It is also responsible for communicating with the Secure payload 272 * to delegate work and return results back to the non-secure state. Lastly it 273 * will also return any information that the secure payload needs to do the 274 * work assigned to it. 275 ******************************************************************************/ 276 static uintptr_t tspd_smc_handler(uint32_t smc_fid, 277 u_register_t x1, 278 u_register_t x2, 279 u_register_t x3, 280 u_register_t x4, 281 void *cookie, 282 void *handle, 283 u_register_t flags) 284 { 285 cpu_context_t *ns_cpu_context; 286 uint32_t linear_id = plat_my_core_pos(), ns; 287 tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; 288 uint64_t rc; 289 #if TSP_INIT_ASYNC 290 entry_point_info_t *next_image_info; 291 #endif 292 293 /* Determine which security state this SMC originated from */ 294 ns = is_caller_non_secure(flags); 295 296 switch (smc_fid) { 297 298 /* 299 * This function ID is used by TSP to indicate that it was 300 * preempted by a normal world IRQ. 301 * 302 */ 303 case TSP_PREEMPTED: 304 if (ns) 305 SMC_RET1(handle, SMC_UNK); 306 307 return tspd_handle_sp_preemption(handle); 308 309 /* 310 * This function ID is used only by the TSP to indicate that it has 311 * finished handling a S-EL1 interrupt or was preempted by a higher 312 * priority pending EL3 interrupt. Execution should resume 313 * in the normal world. 314 */ 315 case TSP_HANDLED_S_EL1_INTR: 316 if (ns) 317 SMC_RET1(handle, SMC_UNK); 318 319 assert(handle == cm_get_context(SECURE)); 320 321 /* 322 * Restore the relevant EL3 state which saved to service 323 * this SMC. 324 */ 325 if (get_yield_smc_active_flag(tsp_ctx->state)) { 326 SMC_SET_EL3(&tsp_ctx->cpu_ctx, 327 CTX_SPSR_EL3, 328 tsp_ctx->saved_spsr_el3); 329 SMC_SET_EL3(&tsp_ctx->cpu_ctx, 330 CTX_ELR_EL3, 331 tsp_ctx->saved_elr_el3); 332 #if TSP_NS_INTR_ASYNC_PREEMPT 333 /* 334 * Need to restore the previously interrupted 335 * secure context. 336 */ 337 memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx, 338 TSPD_SP_CTX_SIZE); 339 #endif 340 } 341 342 /* Get a reference to the non-secure context */ 343 ns_cpu_context = cm_get_context(NON_SECURE); 344 assert(ns_cpu_context); 345 346 /* 347 * Restore non-secure state. There is no need to save the 348 * secure system register context since the TSP was supposed 349 * to preserve it during S-EL1 interrupt handling. 350 */ 351 cm_el1_sysregs_context_restore(NON_SECURE); 352 cm_set_next_eret_context(NON_SECURE); 353 354 SMC_RET0((uint64_t) ns_cpu_context); 355 356 /* 357 * This function ID is used only by the SP to indicate it has 358 * finished initialising itself after a cold boot 359 */ 360 case TSP_ENTRY_DONE: 361 if (ns) 362 SMC_RET1(handle, SMC_UNK); 363 364 /* 365 * Stash the SP entry points information. This is done 366 * only once on the primary cpu 367 */ 368 assert(tsp_vectors == NULL); 369 tsp_vectors = (tsp_vectors_t *) x1; 370 371 if (tsp_vectors) { 372 set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); 373 374 /* 375 * TSP has been successfully initialized. Register power 376 * managemnt hooks with PSCI 377 */ 378 psci_register_spd_pm_hook(&tspd_pm); 379 380 /* 381 * Register an interrupt handler for S-EL1 interrupts 382 * when generated during code executing in the 383 * non-secure state. 384 */ 385 flags = 0; 386 set_interrupt_rm_flag(flags, NON_SECURE); 387 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 388 tspd_sel1_interrupt_handler, 389 flags); 390 if (rc) 391 panic(); 392 393 #if TSP_NS_INTR_ASYNC_PREEMPT 394 /* 395 * Register an interrupt handler for NS interrupts when 396 * generated during code executing in secure state are 397 * routed to EL3. 398 */ 399 flags = 0; 400 set_interrupt_rm_flag(flags, SECURE); 401 402 rc = register_interrupt_type_handler(INTR_TYPE_NS, 403 tspd_ns_interrupt_handler, 404 flags); 405 if (rc) 406 panic(); 407 408 /* 409 * Disable the NS interrupt locally. 410 */ 411 disable_intr_rm_local(INTR_TYPE_NS, SECURE); 412 #endif 413 } 414 415 416 #if TSP_INIT_ASYNC 417 /* Save the Secure EL1 system register context */ 418 assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx); 419 cm_el1_sysregs_context_save(SECURE); 420 421 /* Program EL3 registers to enable entry into the next EL */ 422 next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE); 423 assert(next_image_info); 424 assert(NON_SECURE == 425 GET_SECURITY_STATE(next_image_info->h.attr)); 426 427 cm_init_my_context(next_image_info); 428 cm_prepare_el3_exit(NON_SECURE); 429 SMC_RET0(cm_get_context(NON_SECURE)); 430 #else 431 /* 432 * SP reports completion. The SPD must have initiated 433 * the original request through a synchronous entry 434 * into the SP. Jump back to the original C runtime 435 * context. 436 */ 437 tspd_synchronous_sp_exit(tsp_ctx, x1); 438 break; 439 #endif 440 /* 441 * This function ID is used only by the SP to indicate it has finished 442 * aborting a preempted Yielding SMC Call. 443 */ 444 case TSP_ABORT_DONE: 445 446 /* 447 * These function IDs are used only by the SP to indicate it has 448 * finished: 449 * 1. turning itself on in response to an earlier psci 450 * cpu_on request 451 * 2. resuming itself after an earlier psci cpu_suspend 452 * request. 453 */ 454 case TSP_ON_DONE: 455 case TSP_RESUME_DONE: 456 457 /* 458 * These function IDs are used only by the SP to indicate it has 459 * finished: 460 * 1. suspending itself after an earlier psci cpu_suspend 461 * request. 462 * 2. turning itself off in response to an earlier psci 463 * cpu_off request. 464 */ 465 case TSP_OFF_DONE: 466 case TSP_SUSPEND_DONE: 467 case TSP_SYSTEM_OFF_DONE: 468 case TSP_SYSTEM_RESET_DONE: 469 if (ns) 470 SMC_RET1(handle, SMC_UNK); 471 472 /* 473 * SP reports completion. The SPD must have initiated the 474 * original request through a synchronous entry into the SP. 475 * Jump back to the original C runtime context, and pass x1 as 476 * return value to the caller 477 */ 478 tspd_synchronous_sp_exit(tsp_ctx, x1); 479 break; 480 481 /* 482 * Request from non-secure client to perform an 483 * arithmetic operation or response from secure 484 * payload to an earlier request. 485 */ 486 case TSP_FAST_FID(TSP_ADD): 487 case TSP_FAST_FID(TSP_SUB): 488 case TSP_FAST_FID(TSP_MUL): 489 case TSP_FAST_FID(TSP_DIV): 490 491 case TSP_YIELD_FID(TSP_ADD): 492 case TSP_YIELD_FID(TSP_SUB): 493 case TSP_YIELD_FID(TSP_MUL): 494 case TSP_YIELD_FID(TSP_DIV): 495 if (ns) { 496 /* 497 * This is a fresh request from the non-secure client. 498 * The parameters are in x1 and x2. Figure out which 499 * registers need to be preserved, save the non-secure 500 * state and send the request to the secure payload. 501 */ 502 assert(handle == cm_get_context(NON_SECURE)); 503 504 /* Check if we are already preempted */ 505 if (get_yield_smc_active_flag(tsp_ctx->state)) 506 SMC_RET1(handle, SMC_UNK); 507 508 cm_el1_sysregs_context_save(NON_SECURE); 509 510 /* Save x1 and x2 for use by TSP_GET_ARGS call below */ 511 store_tsp_args(tsp_ctx, x1, x2); 512 513 /* 514 * We are done stashing the non-secure context. Ask the 515 * secure payload to do the work now. 516 */ 517 518 /* 519 * Verify if there is a valid context to use, copy the 520 * operation type and parameters to the secure context 521 * and jump to the fast smc entry point in the secure 522 * payload. Entry into S-EL1 will take place upon exit 523 * from this function. 524 */ 525 assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); 526 527 /* Set appropriate entry for SMC. 528 * We expect the TSP to manage the PSTATE.I and PSTATE.F 529 * flags as appropriate. 530 */ 531 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { 532 cm_set_elr_el3(SECURE, (uint64_t) 533 &tsp_vectors->fast_smc_entry); 534 } else { 535 set_yield_smc_active_flag(tsp_ctx->state); 536 cm_set_elr_el3(SECURE, (uint64_t) 537 &tsp_vectors->yield_smc_entry); 538 #if TSP_NS_INTR_ASYNC_PREEMPT 539 /* 540 * Enable the routing of NS interrupts to EL3 541 * during processing of a Yielding SMC Call on 542 * this core. 543 */ 544 enable_intr_rm_local(INTR_TYPE_NS, SECURE); 545 #endif 546 547 #if EL3_EXCEPTION_HANDLING 548 /* 549 * With EL3 exception handling, while an SMC is 550 * being processed, Non-secure interrupts can't 551 * preempt Secure execution. However, for 552 * yielding SMCs, we want preemption to happen; 553 * so explicitly allow NS preemption in this 554 * case, and supply the preemption return code 555 * for TSP. 556 */ 557 ehf_allow_ns_preemption(TSP_PREEMPTED); 558 #endif 559 } 560 561 cm_el1_sysregs_context_restore(SECURE); 562 cm_set_next_eret_context(SECURE); 563 SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2); 564 } else { 565 /* 566 * This is the result from the secure client of an 567 * earlier request. The results are in x1-x3. Copy it 568 * into the non-secure context, save the secure state 569 * and return to the non-secure state. 570 */ 571 assert(handle == cm_get_context(SECURE)); 572 cm_el1_sysregs_context_save(SECURE); 573 574 /* Get a reference to the non-secure context */ 575 ns_cpu_context = cm_get_context(NON_SECURE); 576 assert(ns_cpu_context); 577 578 /* Restore non-secure state */ 579 cm_el1_sysregs_context_restore(NON_SECURE); 580 cm_set_next_eret_context(NON_SECURE); 581 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) { 582 clr_yield_smc_active_flag(tsp_ctx->state); 583 #if TSP_NS_INTR_ASYNC_PREEMPT 584 /* 585 * Disable the routing of NS interrupts to EL3 586 * after processing of a Yielding SMC Call on 587 * this core is finished. 588 */ 589 disable_intr_rm_local(INTR_TYPE_NS, SECURE); 590 #endif 591 } 592 593 SMC_RET3(ns_cpu_context, x1, x2, x3); 594 } 595 596 /* 597 * Request from the non-secure world to abort a preempted Yielding SMC 598 * Call. 599 */ 600 case TSP_FID_ABORT: 601 /* ABORT should only be invoked by normal world */ 602 if (!ns) { 603 assert(0); 604 break; 605 } 606 607 assert(handle == cm_get_context(NON_SECURE)); 608 cm_el1_sysregs_context_save(NON_SECURE); 609 610 /* Abort the preempted SMC request */ 611 if (!tspd_abort_preempted_smc(tsp_ctx)) { 612 /* 613 * If there was no preempted SMC to abort, return 614 * SMC_UNK. 615 * 616 * Restoring the NON_SECURE context is not necessary as 617 * the synchronous entry did not take place if the 618 * return code of tspd_abort_preempted_smc is zero. 619 */ 620 cm_set_next_eret_context(NON_SECURE); 621 break; 622 } 623 624 cm_el1_sysregs_context_restore(NON_SECURE); 625 cm_set_next_eret_context(NON_SECURE); 626 SMC_RET1(handle, SMC_OK); 627 628 /* 629 * Request from non secure world to resume the preempted 630 * Yielding SMC Call. 631 */ 632 case TSP_FID_RESUME: 633 /* RESUME should be invoked only by normal world */ 634 if (!ns) { 635 assert(0); 636 break; 637 } 638 639 /* 640 * This is a resume request from the non-secure client. 641 * save the non-secure state and send the request to 642 * the secure payload. 643 */ 644 assert(handle == cm_get_context(NON_SECURE)); 645 646 /* Check if we are already preempted before resume */ 647 if (!get_yield_smc_active_flag(tsp_ctx->state)) 648 SMC_RET1(handle, SMC_UNK); 649 650 cm_el1_sysregs_context_save(NON_SECURE); 651 652 /* 653 * We are done stashing the non-secure context. Ask the 654 * secure payload to do the work now. 655 */ 656 #if TSP_NS_INTR_ASYNC_PREEMPT 657 /* 658 * Enable the routing of NS interrupts to EL3 during resumption 659 * of a Yielding SMC Call on this core. 660 */ 661 enable_intr_rm_local(INTR_TYPE_NS, SECURE); 662 #endif 663 664 #if EL3_EXCEPTION_HANDLING 665 /* 666 * Allow the resumed yielding SMC processing to be preempted by 667 * Non-secure interrupts. Also, supply the preemption return 668 * code for TSP. 669 */ 670 ehf_allow_ns_preemption(TSP_PREEMPTED); 671 #endif 672 673 /* We just need to return to the preempted point in 674 * TSP and the execution will resume as normal. 675 */ 676 cm_el1_sysregs_context_restore(SECURE); 677 cm_set_next_eret_context(SECURE); 678 SMC_RET0(&tsp_ctx->cpu_ctx); 679 680 /* 681 * This is a request from the secure payload for more arguments 682 * for an ongoing arithmetic operation requested by the 683 * non-secure world. Simply return the arguments from the non- 684 * secure client in the original call. 685 */ 686 case TSP_GET_ARGS: 687 if (ns) 688 SMC_RET1(handle, SMC_UNK); 689 690 get_tsp_args(tsp_ctx, x1, x2); 691 SMC_RET2(handle, x1, x2); 692 693 case TOS_CALL_COUNT: 694 /* 695 * Return the number of service function IDs implemented to 696 * provide service to non-secure 697 */ 698 SMC_RET1(handle, TSP_NUM_FID); 699 700 case TOS_UID: 701 /* Return TSP UID to the caller */ 702 SMC_UUID_RET(handle, tsp_uuid); 703 704 case TOS_CALL_VERSION: 705 /* Return the version of current implementation */ 706 SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR); 707 708 default: 709 break; 710 } 711 712 SMC_RET1(handle, SMC_UNK); 713 } 714 715 /* Define a SPD runtime service descriptor for fast SMC calls */ 716 DECLARE_RT_SVC( 717 tspd_fast, 718 719 OEN_TOS_START, 720 OEN_TOS_END, 721 SMC_TYPE_FAST, 722 tspd_setup, 723 tspd_smc_handler 724 ); 725 726 /* Define a SPD runtime service descriptor for Yielding SMC Calls */ 727 DECLARE_RT_SVC( 728 tspd_std, 729 730 OEN_TOS_START, 731 OEN_TOS_END, 732 SMC_TYPE_YIELD, 733 NULL, 734 tspd_smc_handler 735 ); 736