1 /* 2 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 8 /******************************************************************************* 9 * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a 10 * plug-in component to the Secure Monitor, registered as a runtime service. The 11 * SPD is expected to be a functional extension of the Secure Payload (SP) that 12 * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting 13 * the Trusted OS/Applications range to the dispatcher. The SPD will either 14 * handle the request locally or delegate it to the Secure Payload. It is also 15 * responsible for initialising and maintaining communication with the SP. 16 ******************************************************************************/ 17 #include <assert.h> 18 #include <errno.h> 19 #include <inttypes.h> 20 #include <stddef.h> 21 22 #include <arch_helpers.h> 23 #include <bl31/bl31.h> 24 #include <common/bl_common.h> 25 #include <common/debug.h> 26 #include <common/runtime_svc.h> 27 #include <lib/coreboot.h> 28 #include <lib/el3_runtime/context_mgmt.h> 29 #include <lib/optee_utils.h> 30 #if TRANSFER_LIST 31 #include <transfer_list.h> 32 #endif 33 #include <lib/xlat_tables/xlat_tables_v2.h> 34 #if OPTEE_ALLOW_SMC_LOAD 35 #include <libfdt.h> 36 #endif /* OPTEE_ALLOW_SMC_LOAD */ 37 #include <plat/common/platform.h> 38 #include <services/oem/chromeos/widevine_smc_handlers.h> 39 #include <tools_share/uuid.h> 40 41 #include "opteed_private.h" 42 #include "teesmc_opteed.h" 43 44 #if OPTEE_ALLOW_SMC_LOAD 45 static struct transfer_list_header __maybe_unused *bl31_tl; 46 #endif 47 48 /******************************************************************************* 49 * Address of the entrypoint vector table in OPTEE. It is 50 * initialised once on the primary core after a cold boot. 51 ******************************************************************************/ 52 struct optee_vectors *optee_vector_table; 53 54 /******************************************************************************* 55 * Array to keep track of per-cpu OPTEE state 56 ******************************************************************************/ 57 optee_context_t opteed_sp_context[OPTEED_CORE_COUNT]; 58 uint32_t opteed_rw; 59 60 #if OPTEE_ALLOW_SMC_LOAD 61 static bool opteed_allow_load; 62 /* OP-TEE image loading service UUID */ 63 DEFINE_SVC_UUID2(optee_image_load_uuid, 64 0xb1eafba3, 0x5d31, 0x4612, 0xb9, 0x06, 65 0xc4, 0xc7, 0xa4, 0xbe, 0x3c, 0xc0); 66 67 #define OPTEED_FDT_SIZE 1024 68 static uint8_t fdt_buf[OPTEED_FDT_SIZE] __aligned(CACHE_WRITEBACK_GRANULE); 69 70 #else 71 static int32_t opteed_init(void); 72 #endif 73 74 uint64_t dual32to64(uint32_t high, uint32_t low) 75 { 76 return ((uint64_t)high << 32) | low; 77 } 78 79 /******************************************************************************* 80 * This function is the handler registered for S-EL1 interrupts by the 81 * OPTEED. It validates the interrupt and upon success arranges entry into 82 * the OPTEE at 'optee_fiq_entry()' for handling the interrupt. 83 ******************************************************************************/ 84 static uint64_t opteed_sel1_interrupt_handler(uint32_t id, 85 uint32_t flags, 86 void *handle, 87 void *cookie) 88 { 89 uint32_t linear_id; 90 optee_context_t *optee_ctx; 91 92 #if OPTEE_ALLOW_SMC_LOAD 93 if (optee_vector_table == NULL) { 94 /* OPTEE is not loaded yet, ignore this interrupt */ 95 SMC_RET0(handle); 96 } 97 #endif 98 99 /* Check the security state when the exception was generated */ 100 assert(get_interrupt_src_ss(flags) == NON_SECURE); 101 102 /* Sanity check the pointer to this cpu's context */ 103 assert(handle == cm_get_context(NON_SECURE)); 104 105 /* Save the non-secure context before entering the OPTEE */ 106 cm_el1_sysregs_context_save(NON_SECURE); 107 108 /* Get a reference to this cpu's OPTEE context */ 109 linear_id = plat_my_core_pos(); 110 optee_ctx = &opteed_sp_context[linear_id]; 111 assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE)); 112 113 cm_set_elr_el3(SECURE, (uint64_t)&optee_vector_table->fiq_entry); 114 cm_el1_sysregs_context_restore(SECURE); 115 cm_set_next_eret_context(SECURE); 116 117 /* 118 * Tell the OPTEE that it has to handle an FIQ (synchronously). 119 * Also the instruction in normal world where the interrupt was 120 * generated is passed for debugging purposes. It is safe to 121 * retrieve this address from ELR_EL3 as the secure context will 122 * not take effect until el3_exit(). 123 */ 124 SMC_RET1(&optee_ctx->cpu_ctx, read_elr_el3()); 125 } 126 127 /* 128 * Registers an interrupt handler for S-EL1 interrupts when generated during 129 * code executing in the non-secure state. Panics if it fails to do so. 130 */ 131 static void register_opteed_interrupt_handler(void) 132 { 133 u_register_t flags; 134 uint64_t rc; 135 136 flags = 0; 137 set_interrupt_rm_flag(flags, NON_SECURE); 138 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 139 opteed_sel1_interrupt_handler, 140 flags); 141 if (rc) 142 panic(); 143 } 144 145 /******************************************************************************* 146 * OPTEE Dispatcher setup. The OPTEED finds out the OPTEE entrypoint and type 147 * (aarch32/aarch64) if not already known and initialises the context for entry 148 * into OPTEE for its initialization. 149 ******************************************************************************/ 150 static int32_t opteed_setup(void) 151 { 152 #if OPTEE_ALLOW_SMC_LOAD 153 opteed_allow_load = true; 154 INFO("Delaying OP-TEE setup until we receive an SMC call to load it\n"); 155 /* 156 * We must register the interrupt handler now so that the interrupt 157 * priorities are not changed after starting the linux kernel. 158 */ 159 register_opteed_interrupt_handler(); 160 return 0; 161 #else 162 entry_point_info_t *optee_ep_info; 163 uint32_t linear_id; 164 uint64_t arg0; 165 uint64_t arg1; 166 uint64_t arg2; 167 uint64_t arg3; 168 struct transfer_list_header __maybe_unused *tl = NULL; 169 struct transfer_list_entry __maybe_unused *te = NULL; 170 void __maybe_unused *dt = NULL; 171 172 linear_id = plat_my_core_pos(); 173 174 /* 175 * Get information about the Secure Payload (BL32) image. Its 176 * absence is a critical failure. TODO: Add support to 177 * conditionally include the SPD service 178 */ 179 optee_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 180 if (!optee_ep_info) { 181 WARN("No OPTEE provided by BL2 boot loader, Booting device" 182 " without OPTEE initialization. SMC`s destined for OPTEE" 183 " will return SMC_UNK\n"); 184 return 1; 185 } 186 187 /* 188 * If there's no valid entry point for SP, we return a non-zero value 189 * signalling failure initializing the service. We bail out without 190 * registering any handlers 191 */ 192 if (!optee_ep_info->pc) 193 return 1; 194 195 #if TRANSFER_LIST 196 tl = (void *)optee_ep_info->args.arg3; 197 198 if (transfer_list_check_header(tl)) { 199 te = transfer_list_find(tl, TL_TAG_FDT); 200 dt = transfer_list_entry_data(te); 201 202 opteed_rw = GET_RW(optee_ep_info->spsr); 203 if (opteed_rw == OPTEE_AARCH64) { 204 if (optee_ep_info->args.arg1 != 205 TRANSFER_LIST_HANDOFF_X1_VALUE( 206 REGISTER_CONVENTION_VERSION)) 207 return 1; 208 209 arg0 = (uint64_t)dt; 210 arg2 = 0; 211 } else { 212 if (optee_ep_info->args.arg1 != 213 TRANSFER_LIST_HANDOFF_R1_VALUE( 214 REGISTER_CONVENTION_VERSION)) 215 return 1; 216 217 arg0 = 0; 218 arg2 = (uint64_t)dt; 219 } 220 221 arg1 = optee_ep_info->args.arg1; 222 arg3 = optee_ep_info->args.arg3; 223 224 } else 225 #endif /* TRANSFER_LIST */ 226 { 227 /* Default handoff arguments */ 228 opteed_rw = optee_ep_info->args.arg0; 229 arg0 = optee_ep_info->args.arg1; /* opteed_pageable_part */ 230 arg1 = optee_ep_info->args.arg2; /* opteed_mem_limit */ 231 arg2 = optee_ep_info->args.arg3; /* dt_addr */ 232 arg3 = 0; 233 } 234 235 opteed_init_optee_ep_state(optee_ep_info, opteed_rw, 236 optee_ep_info->pc, arg0, arg1, arg2, 237 arg3, &opteed_sp_context[linear_id]); 238 239 /* 240 * All OPTEED initialization done. Now register our init function with 241 * BL31 for deferred invocation 242 */ 243 bl31_register_bl32_init(&opteed_init); 244 245 return 0; 246 #endif /* OPTEE_ALLOW_SMC_LOAD */ 247 } 248 249 /******************************************************************************* 250 * This function passes control to the OPTEE image (BL32) for the first time 251 * on the primary cpu after a cold boot. It assumes that a valid secure 252 * context has already been created by opteed_setup() which can be directly 253 * used. It also assumes that a valid non-secure context has been 254 * initialised by PSCI so it does not need to save and restore any 255 * non-secure state. This function performs a synchronous entry into 256 * OPTEE. OPTEE passes control back to this routine through a SMC. This returns 257 * a non-zero value on success and zero on failure. 258 ******************************************************************************/ 259 static int32_t 260 opteed_init_with_entry_point(entry_point_info_t *optee_entry_point) 261 { 262 uint32_t linear_id = plat_my_core_pos(); 263 optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; 264 uint64_t rc; 265 assert(optee_entry_point); 266 267 cm_init_my_context(optee_entry_point); 268 269 /* 270 * Arrange for an entry into OPTEE. It will be returned via 271 * OPTEE_ENTRY_DONE case 272 */ 273 rc = opteed_synchronous_sp_entry(optee_ctx); 274 assert(rc != 0); 275 276 return rc; 277 } 278 279 #if !OPTEE_ALLOW_SMC_LOAD 280 static int32_t opteed_init(void) 281 { 282 entry_point_info_t *optee_entry_point; 283 /* 284 * Get information about the OP-TEE (BL32) image. Its 285 * absence is a critical failure. 286 */ 287 optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE); 288 return opteed_init_with_entry_point(optee_entry_point); 289 } 290 #endif /* !OPTEE_ALLOW_SMC_LOAD */ 291 292 #if OPTEE_ALLOW_SMC_LOAD 293 #if COREBOOT 294 /* 295 * Adds a firmware/coreboot node with the coreboot table information to a device 296 * tree. Returns zero on success or if there is no coreboot table information; 297 * failure code otherwise. 298 */ 299 static int add_coreboot_node(void *fdt) 300 { 301 int ret; 302 uint64_t coreboot_table_addr; 303 uint32_t coreboot_table_size; 304 struct { 305 uint64_t addr; 306 uint32_t size; 307 } reg_node; 308 coreboot_get_table_location(&coreboot_table_addr, &coreboot_table_size); 309 if (!coreboot_table_addr || !coreboot_table_size) { 310 WARN("Unable to get coreboot table location for device tree"); 311 return 0; 312 } 313 ret = fdt_begin_node(fdt, "firmware"); 314 if (ret) 315 return ret; 316 317 ret = fdt_property(fdt, "ranges", NULL, 0); 318 if (ret) 319 return ret; 320 321 ret = fdt_begin_node(fdt, "coreboot"); 322 if (ret) 323 return ret; 324 325 ret = fdt_property_string(fdt, "compatible", "coreboot"); 326 if (ret) 327 return ret; 328 329 reg_node.addr = cpu_to_fdt64(coreboot_table_addr); 330 reg_node.size = cpu_to_fdt32(coreboot_table_size); 331 ret = fdt_property(fdt, "reg", ®_node, 332 sizeof(uint64_t) + sizeof(uint32_t)); 333 if (ret) 334 return ret; 335 336 ret = fdt_end_node(fdt); 337 if (ret) 338 return ret; 339 340 return fdt_end_node(fdt); 341 } 342 #endif /* COREBOOT */ 343 344 #if CROS_WIDEVINE_SMC 345 /* 346 * Adds a options/widevine node with the widevine table information to a device 347 * tree. Returns zero on success or if there is no widevine table information; 348 * failure code otherwise. 349 */ 350 static int add_options_widevine_node(void *fdt) 351 { 352 int ret; 353 354 ret = fdt_begin_node(fdt, "options"); 355 if (ret) 356 return ret; 357 358 ret = fdt_begin_node(fdt, "op-tee"); 359 if (ret) 360 return ret; 361 362 ret = fdt_begin_node(fdt, "widevine"); 363 if (ret) 364 return ret; 365 366 if (cros_oem_tpm_auth_pk.length) { 367 ret = fdt_property(fdt, "tcg,tpm-auth-public-key", 368 cros_oem_tpm_auth_pk.buffer, 369 cros_oem_tpm_auth_pk.length); 370 if (ret) 371 return ret; 372 } 373 374 if (cros_oem_huk.length) { 375 ret = fdt_property(fdt, "op-tee,hardware-unique-key", 376 cros_oem_huk.buffer, cros_oem_huk.length); 377 if (ret) 378 return ret; 379 } 380 381 if (cros_oem_rot.length) { 382 ret = fdt_property(fdt, "google,widevine-root-of-trust-ecc-p256", 383 cros_oem_rot.buffer, cros_oem_rot.length); 384 if (ret) 385 return ret; 386 } 387 388 ret = fdt_end_node(fdt); 389 if (ret) 390 return ret; 391 392 ret = fdt_end_node(fdt); 393 if (ret) 394 return ret; 395 396 return fdt_end_node(fdt); 397 } 398 #endif /* CROS_WIDEVINE_SMC */ 399 400 /* 401 * Creates a device tree for passing into OP-TEE. Currently is populated with 402 * the coreboot table address. 403 * Returns 0 on success, error code otherwise. 404 */ 405 static int create_opteed_dt(void) 406 { 407 int ret; 408 409 ret = fdt_create(fdt_buf, OPTEED_FDT_SIZE); 410 if (ret) 411 return ret; 412 413 ret = fdt_finish_reservemap(fdt_buf); 414 if (ret) 415 return ret; 416 417 ret = fdt_begin_node(fdt_buf, ""); 418 if (ret) 419 return ret; 420 421 #if COREBOOT 422 ret = add_coreboot_node(fdt_buf); 423 if (ret) 424 return ret; 425 #endif /* COREBOOT */ 426 427 #if CROS_WIDEVINE_SMC 428 ret = add_options_widevine_node(fdt_buf); 429 if (ret) 430 return ret; 431 #endif /* CROS_WIDEVINE_SMC */ 432 433 ret = fdt_end_node(fdt_buf); 434 if (ret) 435 return ret; 436 437 return fdt_finish(fdt_buf); 438 } 439 440 #if TRANSFER_LIST 441 static int32_t create_smc_tl(const void *fdt, uint32_t fdt_sz) 442 { 443 bl31_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE, 444 FW_HANDOFF_SIZE); 445 if (!bl31_tl) { 446 ERROR("Failed to initialize Transfer List at 0x%lx\n", 447 (unsigned long)FW_HANDOFF_BASE); 448 return -1; 449 } 450 451 if (!transfer_list_add(bl31_tl, TL_TAG_FDT, fdt_sz, fdt)) { 452 return -1; 453 } 454 return 0; 455 } 456 #endif 457 458 /******************************************************************************* 459 * This function is responsible for handling the SMC that loads the OP-TEE 460 * binary image via a non-secure SMC call. It takes the size and physical 461 * address of the payload as parameters. 462 ******************************************************************************/ 463 static int32_t opteed_handle_smc_load(uint64_t data_size, uint64_t data_pa) 464 { 465 uintptr_t data_va = data_pa; 466 uint64_t mapped_data_pa; 467 uintptr_t mapped_data_va; 468 uint64_t data_map_size; 469 int32_t rc; 470 optee_header_t *image_header; 471 uint8_t *image_ptr; 472 uint64_t target_pa; 473 uint64_t target_end_pa; 474 uint64_t image_pa; 475 uintptr_t image_va; 476 optee_image_t *curr_image; 477 uintptr_t target_va; 478 uint64_t target_size; 479 entry_point_info_t optee_ep_info; 480 uint32_t linear_id = plat_my_core_pos(); 481 uint64_t dt_addr = 0; 482 uint64_t arg0 = 0; 483 uint64_t arg1 = 0; 484 uint64_t arg2 = 0; 485 uint64_t arg3 = 0; 486 487 mapped_data_pa = page_align(data_pa, DOWN); 488 mapped_data_va = mapped_data_pa; 489 data_map_size = page_align(data_size + (mapped_data_pa - data_pa), UP); 490 491 /* 492 * We do not validate the passed in address because we are trusting the 493 * non-secure world at this point still. 494 */ 495 rc = mmap_add_dynamic_region(mapped_data_pa, mapped_data_va, 496 data_map_size, MT_MEMORY | MT_RO | MT_NS); 497 if (rc != 0) { 498 return rc; 499 } 500 501 image_header = (optee_header_t *)data_va; 502 if (image_header->magic != TEE_MAGIC_NUM_OPTEE || 503 image_header->version != 2 || image_header->nb_images != 1) { 504 mmap_remove_dynamic_region(mapped_data_va, data_map_size); 505 return -EINVAL; 506 } 507 508 image_ptr = (uint8_t *)data_va + sizeof(optee_header_t) + 509 sizeof(optee_image_t); 510 if (image_header->arch == 1) { 511 opteed_rw = OPTEE_AARCH64; 512 } else { 513 opteed_rw = OPTEE_AARCH32; 514 } 515 516 curr_image = &image_header->optee_image_list[0]; 517 image_pa = dual32to64(curr_image->load_addr_hi, 518 curr_image->load_addr_lo); 519 image_va = image_pa; 520 target_end_pa = image_pa + curr_image->size; 521 522 /* Now also map the memory we want to copy it to. */ 523 target_pa = page_align(image_pa, DOWN); 524 target_va = target_pa; 525 target_size = page_align(target_end_pa, UP) - target_pa; 526 527 rc = mmap_add_dynamic_region(target_pa, target_va, target_size, 528 MT_MEMORY | MT_RW | MT_SECURE); 529 if (rc != 0) { 530 mmap_remove_dynamic_region(mapped_data_va, data_map_size); 531 return rc; 532 } 533 534 INFO("Loaded OP-TEE via SMC: size %d addr 0x%" PRIx64 "\n", 535 curr_image->size, image_va); 536 537 memcpy((void *)image_va, image_ptr, curr_image->size); 538 flush_dcache_range(target_pa, target_size); 539 540 mmap_remove_dynamic_region(mapped_data_va, data_map_size); 541 mmap_remove_dynamic_region(target_va, target_size); 542 543 /* Save the non-secure state */ 544 cm_el1_sysregs_context_save(NON_SECURE); 545 546 rc = create_opteed_dt(); 547 if (rc) { 548 ERROR("Failed device tree creation %d\n", rc); 549 return rc; 550 } 551 dt_addr = (uint64_t)fdt_buf; 552 flush_dcache_range(dt_addr, OPTEED_FDT_SIZE); 553 554 #if TRANSFER_LIST 555 if (!create_smc_tl((void *)dt_addr, OPTEED_FDT_SIZE)) { 556 struct transfer_list_entry *te = NULL; 557 void *dt = NULL; 558 559 te = transfer_list_find(bl31_tl, TL_TAG_FDT); 560 dt = transfer_list_entry_data(te); 561 562 if (opteed_rw == OPTEE_AARCH64) { 563 arg0 = (uint64_t)dt; 564 arg1 = TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION); 565 arg2 = 0; 566 } else { 567 arg0 = 0; 568 arg1 = TRANSFER_LIST_HANDOFF_R1_VALUE(REGISTER_CONVENTION_VERSION); 569 arg2 = (uint64_t)dt; 570 } 571 572 arg3 = (uint64_t)bl31_tl; 573 } else 574 #endif /* TRANSFER_LIST */ 575 { 576 /* Default handoff arguments */ 577 arg2 = dt_addr; 578 } 579 580 opteed_init_optee_ep_state(&optee_ep_info, 581 opteed_rw, 582 image_pa, 583 arg0, 584 arg1, 585 arg2, 586 arg3, 587 &opteed_sp_context[linear_id]); 588 if (opteed_init_with_entry_point(&optee_ep_info) == 0) { 589 rc = -EFAULT; 590 } 591 592 /* Restore non-secure state */ 593 cm_el1_sysregs_context_restore(NON_SECURE); 594 cm_set_next_eret_context(NON_SECURE); 595 596 return rc; 597 } 598 #endif /* OPTEE_ALLOW_SMC_LOAD */ 599 600 /******************************************************************************* 601 * This function is responsible for handling all SMCs in the Trusted OS/App 602 * range from the non-secure state as defined in the SMC Calling Convention 603 * Document. It is also responsible for communicating with the Secure 604 * payload to delegate work and return results back to the non-secure 605 * state. Lastly it will also return any information that OPTEE needs to do 606 * the work assigned to it. 607 ******************************************************************************/ 608 static uintptr_t opteed_smc_handler(uint32_t smc_fid, 609 u_register_t x1, 610 u_register_t x2, 611 u_register_t x3, 612 u_register_t x4, 613 void *cookie, 614 void *handle, 615 u_register_t flags) 616 { 617 cpu_context_t *ns_cpu_context; 618 uint32_t linear_id = plat_my_core_pos(); 619 optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; 620 621 /* 622 * Determine which security state this SMC originated from 623 */ 624 625 if (is_caller_non_secure(flags)) { 626 #if OPTEE_ALLOW_SMC_LOAD 627 if (opteed_allow_load && smc_fid == NSSMC_OPTEED_CALL_UID) { 628 /* Provide the UUID of the image loading service. */ 629 SMC_UUID_RET(handle, optee_image_load_uuid); 630 } 631 if (smc_fid == NSSMC_OPTEED_CALL_LOAD_IMAGE) { 632 /* 633 * TODO: Consider wiping the code for SMC loading from 634 * memory after it has been invoked similar to what is 635 * done under RECLAIM_INIT, but extended to happen 636 * later. 637 */ 638 if (!opteed_allow_load) { 639 SMC_RET1(handle, -EPERM); 640 } 641 642 opteed_allow_load = false; 643 uint64_t data_size = dual32to64(x1, x2); 644 uint64_t data_pa = dual32to64(x3, x4); 645 if (!data_size || !data_pa) { 646 /* 647 * This is invoked when the OP-TEE image didn't 648 * load correctly in the kernel but we want to 649 * block off loading of it later for security 650 * reasons. 651 */ 652 SMC_RET1(handle, -EINVAL); 653 } 654 SMC_RET1(handle, opteed_handle_smc_load( 655 data_size, data_pa)); 656 } 657 #endif /* OPTEE_ALLOW_SMC_LOAD */ 658 /* 659 * This is a fresh request from the non-secure client. 660 * The parameters are in x1 and x2. Figure out which 661 * registers need to be preserved, save the non-secure 662 * state and send the request to the secure payload. 663 */ 664 assert(handle == cm_get_context(NON_SECURE)); 665 666 cm_el1_sysregs_context_save(NON_SECURE); 667 668 /* 669 * We are done stashing the non-secure context. Ask the 670 * OP-TEE to do the work now. If we are loading vi an SMC, 671 * then we also need to init this CPU context if not done 672 * already. 673 */ 674 if (optee_vector_table == NULL) { 675 SMC_RET1(handle, -EINVAL); 676 } 677 678 if (get_optee_pstate(optee_ctx->state) == 679 OPTEE_PSTATE_UNKNOWN) { 680 opteed_cpu_on_finish_handler(0); 681 } 682 683 /* 684 * Verify if there is a valid context to use, copy the 685 * operation type and parameters to the secure context 686 * and jump to the fast smc entry point in the secure 687 * payload. Entry into S-EL1 will take place upon exit 688 * from this function. 689 */ 690 assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE)); 691 692 /* Set appropriate entry for SMC. 693 * We expect OPTEE to manage the PSTATE.I and PSTATE.F 694 * flags as appropriate. 695 */ 696 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { 697 cm_set_elr_el3(SECURE, (uint64_t) 698 &optee_vector_table->fast_smc_entry); 699 } else { 700 cm_set_elr_el3(SECURE, (uint64_t) 701 &optee_vector_table->yield_smc_entry); 702 } 703 704 cm_el1_sysregs_context_restore(SECURE); 705 cm_set_next_eret_context(SECURE); 706 707 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), 708 CTX_GPREG_X4, 709 read_ctx_reg(get_gpregs_ctx(handle), 710 CTX_GPREG_X4)); 711 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), 712 CTX_GPREG_X5, 713 read_ctx_reg(get_gpregs_ctx(handle), 714 CTX_GPREG_X5)); 715 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), 716 CTX_GPREG_X6, 717 read_ctx_reg(get_gpregs_ctx(handle), 718 CTX_GPREG_X6)); 719 /* Propagate hypervisor client ID */ 720 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), 721 CTX_GPREG_X7, 722 read_ctx_reg(get_gpregs_ctx(handle), 723 CTX_GPREG_X7)); 724 725 SMC_RET4(&optee_ctx->cpu_ctx, smc_fid, x1, x2, x3); 726 } 727 728 /* 729 * Returning from OPTEE 730 */ 731 732 switch (smc_fid) { 733 /* 734 * OPTEE has finished initialising itself after a cold boot 735 */ 736 case TEESMC_OPTEED_RETURN_ENTRY_DONE: 737 /* 738 * Stash the OPTEE entry points information. This is done 739 * only once on the primary cpu 740 */ 741 assert(optee_vector_table == NULL); 742 optee_vector_table = (optee_vectors_t *) x1; 743 744 if (optee_vector_table) { 745 set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON); 746 747 /* 748 * OPTEE has been successfully initialized. 749 * Register power management hooks with PSCI 750 */ 751 psci_register_spd_pm_hook(&opteed_pm); 752 753 #if !OPTEE_ALLOW_SMC_LOAD 754 register_opteed_interrupt_handler(); 755 #endif 756 } 757 758 /* 759 * OPTEE reports completion. The OPTEED must have initiated 760 * the original request through a synchronous entry into 761 * OPTEE. Jump back to the original C runtime context. 762 */ 763 opteed_synchronous_sp_exit(optee_ctx, x1); 764 break; 765 766 767 /* 768 * These function IDs is used only by OP-TEE to indicate it has 769 * finished: 770 * 1. turning itself on in response to an earlier psci 771 * cpu_on request 772 * 2. resuming itself after an earlier psci cpu_suspend 773 * request. 774 */ 775 case TEESMC_OPTEED_RETURN_ON_DONE: 776 case TEESMC_OPTEED_RETURN_RESUME_DONE: 777 778 779 /* 780 * These function IDs is used only by the SP to indicate it has 781 * finished: 782 * 1. suspending itself after an earlier psci cpu_suspend 783 * request. 784 * 2. turning itself off in response to an earlier psci 785 * cpu_off request. 786 */ 787 case TEESMC_OPTEED_RETURN_OFF_DONE: 788 case TEESMC_OPTEED_RETURN_SUSPEND_DONE: 789 case TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE: 790 case TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE: 791 792 /* 793 * OPTEE reports completion. The OPTEED must have initiated the 794 * original request through a synchronous entry into OPTEE. 795 * Jump back to the original C runtime context, and pass x1 as 796 * return value to the caller 797 */ 798 opteed_synchronous_sp_exit(optee_ctx, x1); 799 break; 800 801 /* 802 * OPTEE is returning from a call or being preempted from a call, in 803 * either case execution should resume in the normal world. 804 */ 805 case TEESMC_OPTEED_RETURN_CALL_DONE: 806 /* 807 * This is the result from the secure client of an 808 * earlier request. The results are in x0-x3. Copy it 809 * into the non-secure context, save the secure state 810 * and return to the non-secure state. 811 */ 812 assert(handle == cm_get_context(SECURE)); 813 cm_el1_sysregs_context_save(SECURE); 814 815 /* Get a reference to the non-secure context */ 816 ns_cpu_context = cm_get_context(NON_SECURE); 817 assert(ns_cpu_context); 818 819 /* Restore non-secure state */ 820 cm_el1_sysregs_context_restore(NON_SECURE); 821 cm_set_next_eret_context(NON_SECURE); 822 823 SMC_RET4(ns_cpu_context, x1, x2, x3, x4); 824 825 /* 826 * OPTEE has finished handling a S-EL1 FIQ interrupt. Execution 827 * should resume in the normal world. 828 */ 829 case TEESMC_OPTEED_RETURN_FIQ_DONE: 830 /* Get a reference to the non-secure context */ 831 ns_cpu_context = cm_get_context(NON_SECURE); 832 assert(ns_cpu_context); 833 834 /* 835 * Restore non-secure state. There is no need to save the 836 * secure system register context since OPTEE was supposed 837 * to preserve it during S-EL1 interrupt handling. 838 */ 839 cm_el1_sysregs_context_restore(NON_SECURE); 840 cm_set_next_eret_context(NON_SECURE); 841 842 SMC_RET0((uint64_t) ns_cpu_context); 843 844 default: 845 panic(); 846 } 847 } 848 849 /* Define an OPTEED runtime service descriptor for fast SMC calls */ 850 DECLARE_RT_SVC( 851 opteed_fast, 852 853 OEN_TOS_START, 854 OEN_TOS_END, 855 SMC_TYPE_FAST, 856 opteed_setup, 857 opteed_smc_handler 858 ); 859 860 /* Define an OPTEED runtime service descriptor for yielding SMC calls */ 861 DECLARE_RT_SVC( 862 opteed_std, 863 864 OEN_TOS_START, 865 OEN_TOS_END, 866 SMC_TYPE_YIELD, 867 NULL, 868 opteed_smc_handler 869 ); 870