1 /* 2 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 8 /******************************************************************************* 9 * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a 10 * plug-in component to the Secure Monitor, registered as a runtime service. The 11 * SPD is expected to be a functional extension of the Secure Payload (SP) that 12 * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting 13 * the Trusted OS/Applications range to the dispatcher. The SPD will either 14 * handle the request locally or delegate it to the Secure Payload. It is also 15 * responsible for initialising and maintaining communication with the SP. 16 ******************************************************************************/ 17 #include <assert.h> 18 #include <errno.h> 19 #include <inttypes.h> 20 #include <stddef.h> 21 22 #include <arch_helpers.h> 23 #include <bl31/bl31.h> 24 #include <common/bl_common.h> 25 #include <common/debug.h> 26 #include <common/runtime_svc.h> 27 #include <lib/coreboot.h> 28 #include <lib/el3_runtime/context_mgmt.h> 29 #include <lib/optee_utils.h> 30 #if TRANSFER_LIST 31 #include <transfer_list.h> 32 #endif 33 #include <lib/xlat_tables/xlat_tables_v2.h> 34 #if OPTEE_ALLOW_SMC_LOAD 35 #include <libfdt.h> 36 #endif /* OPTEE_ALLOW_SMC_LOAD */ 37 #include <plat/common/platform.h> 38 #include <services/oem/chromeos/widevine_smc_handlers.h> 39 #include <tools_share/uuid.h> 40 41 #include "opteed_private.h" 42 #include "teesmc_opteed.h" 43 44 #if OPTEE_ALLOW_SMC_LOAD 45 static struct transfer_list_header __maybe_unused *bl31_tl; 46 #endif 47 48 /******************************************************************************* 49 * Address of the entrypoint vector table in OPTEE. It is 50 * initialised once on the primary core after a cold boot. 51 ******************************************************************************/ 52 struct optee_vectors *optee_vector_table; 53 54 /******************************************************************************* 55 * Array to keep track of per-cpu OPTEE state 56 ******************************************************************************/ 57 optee_context_t opteed_sp_context[OPTEED_CORE_COUNT]; 58 uint32_t opteed_rw; 59 60 #if OPTEE_ALLOW_SMC_LOAD 61 static bool opteed_allow_load; 62 /* OP-TEE image loading service UUID */ 63 DEFINE_SVC_UUID2(optee_image_load_uuid, 64 0xb1eafba3, 0x5d31, 0x4612, 0xb9, 0x06, 65 0xc4, 0xc7, 0xa4, 0xbe, 0x3c, 0xc0); 66 67 static uint64_t dual32to64(uint32_t high, uint32_t low) 68 { 69 return ((uint64_t)high << 32) | low; 70 } 71 72 #define OPTEED_FDT_SIZE 1024 73 static uint8_t fdt_buf[OPTEED_FDT_SIZE] __aligned(CACHE_WRITEBACK_GRANULE); 74 75 #else 76 static int32_t opteed_init(void); 77 #endif 78 /******************************************************************************* 79 * This function is the handler registered for S-EL1 interrupts by the 80 * OPTEED. It validates the interrupt and upon success arranges entry into 81 * the OPTEE at 'optee_fiq_entry()' for handling the interrupt. 82 ******************************************************************************/ 83 static uint64_t opteed_sel1_interrupt_handler(uint32_t id, 84 uint32_t flags, 85 void *handle, 86 void *cookie) 87 { 88 uint32_t linear_id; 89 optee_context_t *optee_ctx; 90 91 #if OPTEE_ALLOW_SMC_LOAD 92 if (optee_vector_table == NULL) { 93 /* OPTEE is not loaded yet, ignore this interrupt */ 94 SMC_RET0(handle); 95 } 96 #endif 97 98 /* Check the security state when the exception was generated */ 99 assert(get_interrupt_src_ss(flags) == NON_SECURE); 100 101 /* Sanity check the pointer to this cpu's context */ 102 assert(handle == cm_get_context(NON_SECURE)); 103 104 /* Save the non-secure context before entering the OPTEE */ 105 cm_el1_sysregs_context_save(NON_SECURE); 106 107 /* Get a reference to this cpu's OPTEE context */ 108 linear_id = plat_my_core_pos(); 109 optee_ctx = &opteed_sp_context[linear_id]; 110 assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE)); 111 112 cm_set_elr_el3(SECURE, (uint64_t)&optee_vector_table->fiq_entry); 113 cm_el1_sysregs_context_restore(SECURE); 114 cm_set_next_eret_context(SECURE); 115 116 /* 117 * Tell the OPTEE that it has to handle an FIQ (synchronously). 118 * Also the instruction in normal world where the interrupt was 119 * generated is passed for debugging purposes. It is safe to 120 * retrieve this address from ELR_EL3 as the secure context will 121 * not take effect until el3_exit(). 122 */ 123 SMC_RET1(&optee_ctx->cpu_ctx, read_elr_el3()); 124 } 125 126 /* 127 * Registers an interrupt handler for S-EL1 interrupts when generated during 128 * code executing in the non-secure state. Panics if it fails to do so. 129 */ 130 static void register_opteed_interrupt_handler(void) 131 { 132 u_register_t flags; 133 uint64_t rc; 134 135 flags = 0; 136 set_interrupt_rm_flag(flags, NON_SECURE); 137 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 138 opteed_sel1_interrupt_handler, 139 flags); 140 if (rc) 141 panic(); 142 } 143 144 /******************************************************************************* 145 * OPTEE Dispatcher setup. The OPTEED finds out the OPTEE entrypoint and type 146 * (aarch32/aarch64) if not already known and initialises the context for entry 147 * into OPTEE for its initialization. 148 ******************************************************************************/ 149 static int32_t opteed_setup(void) 150 { 151 #if OPTEE_ALLOW_SMC_LOAD 152 opteed_allow_load = true; 153 INFO("Delaying OP-TEE setup until we receive an SMC call to load it\n"); 154 /* 155 * We must register the interrupt handler now so that the interrupt 156 * priorities are not changed after starting the linux kernel. 157 */ 158 register_opteed_interrupt_handler(); 159 return 0; 160 #else 161 entry_point_info_t *optee_ep_info; 162 uint32_t linear_id; 163 uint64_t arg0; 164 uint64_t arg1; 165 uint64_t arg2; 166 uint64_t arg3; 167 struct transfer_list_header __maybe_unused *tl = NULL; 168 struct transfer_list_entry __maybe_unused *te = NULL; 169 void __maybe_unused *dt = NULL; 170 171 linear_id = plat_my_core_pos(); 172 173 /* 174 * Get information about the Secure Payload (BL32) image. Its 175 * absence is a critical failure. TODO: Add support to 176 * conditionally include the SPD service 177 */ 178 optee_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 179 if (optee_ep_info == NULL) { 180 WARN("No OPTEE provided by BL2 boot loader, Booting device" 181 " without OPTEE initialization. SMC`s destined for OPTEE" 182 " will return SMC_UNK\n"); 183 return 1; 184 } 185 186 /* 187 * If there's no valid entry point for SP, we return a non-zero value 188 * signalling failure initializing the service. We bail out without 189 * registering any handlers 190 */ 191 if (optee_ep_info->pc == 0U) 192 return 1; 193 194 #if TRANSFER_LIST 195 tl = (void *)optee_ep_info->args.arg3; 196 197 if (transfer_list_check_header(tl)) { 198 te = transfer_list_find(tl, TL_TAG_FDT); 199 dt = transfer_list_entry_data(te); 200 201 opteed_rw = GET_RW(optee_ep_info->spsr); 202 if (opteed_rw == OPTEE_AARCH64) { 203 if (optee_ep_info->args.arg1 != 204 TRANSFER_LIST_HANDOFF_X1_VALUE( 205 REGISTER_CONVENTION_VERSION)) 206 return 1; 207 208 arg0 = (uint64_t)dt; 209 arg2 = 0; 210 } else { 211 if (optee_ep_info->args.arg1 != 212 TRANSFER_LIST_HANDOFF_R1_VALUE( 213 REGISTER_CONVENTION_VERSION)) 214 return 1; 215 216 arg0 = 0; 217 arg2 = (uint64_t)dt; 218 } 219 220 arg1 = optee_ep_info->args.arg1; 221 arg3 = optee_ep_info->args.arg3; 222 223 } else 224 #endif /* TRANSFER_LIST */ 225 { 226 /* Default handoff arguments */ 227 opteed_rw = optee_ep_info->args.arg0; 228 arg0 = optee_ep_info->args.arg1; /* opteed_pageable_part */ 229 arg1 = optee_ep_info->args.arg2; /* opteed_mem_limit */ 230 arg2 = optee_ep_info->args.arg3; /* dt_addr */ 231 arg3 = 0; 232 } 233 234 opteed_init_optee_ep_state(optee_ep_info, opteed_rw, 235 optee_ep_info->pc, arg0, arg1, arg2, 236 arg3, &opteed_sp_context[linear_id]); 237 238 /* 239 * All OPTEED initialization done. Now register our init function with 240 * BL31 for deferred invocation 241 */ 242 bl31_register_bl32_init(&opteed_init); 243 244 return 0; 245 #endif /* OPTEE_ALLOW_SMC_LOAD */ 246 } 247 248 /******************************************************************************* 249 * This function passes control to the OPTEE image (BL32) for the first time 250 * on the primary cpu after a cold boot. It assumes that a valid secure 251 * context has already been created by opteed_setup() which can be directly 252 * used. It also assumes that a valid non-secure context has been 253 * initialised by PSCI so it does not need to save and restore any 254 * non-secure state. This function performs a synchronous entry into 255 * OPTEE. OPTEE passes control back to this routine through a SMC. This returns 256 * a non-zero value on success and zero on failure. 257 ******************************************************************************/ 258 static int32_t 259 opteed_init_with_entry_point(entry_point_info_t *optee_entry_point) 260 { 261 uint32_t linear_id = plat_my_core_pos(); 262 optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; 263 uint64_t rc; 264 assert(optee_entry_point); 265 266 cm_init_my_context(optee_entry_point); 267 268 /* 269 * Arrange for an entry into OPTEE. It will be returned via 270 * OPTEE_ENTRY_DONE case 271 */ 272 rc = opteed_synchronous_sp_entry(optee_ctx); 273 assert(rc != 0); 274 275 return rc; 276 } 277 278 #if !OPTEE_ALLOW_SMC_LOAD 279 static int32_t opteed_init(void) 280 { 281 entry_point_info_t *optee_entry_point; 282 /* 283 * Get information about the OP-TEE (BL32) image. Its 284 * absence is a critical failure. 285 */ 286 optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE); 287 return opteed_init_with_entry_point(optee_entry_point); 288 } 289 #endif /* !OPTEE_ALLOW_SMC_LOAD */ 290 291 #if OPTEE_ALLOW_SMC_LOAD 292 #if COREBOOT 293 /* 294 * Adds a firmware/coreboot node with the coreboot table information to a device 295 * tree. Returns zero on success or if there is no coreboot table information; 296 * failure code otherwise. 297 */ 298 static int add_coreboot_node(void *fdt) 299 { 300 int ret; 301 uint64_t coreboot_table_addr; 302 uint32_t coreboot_table_size; 303 struct { 304 uint64_t addr; 305 uint32_t size; 306 } reg_node; 307 coreboot_get_table_location(&coreboot_table_addr, &coreboot_table_size); 308 if (!coreboot_table_addr || !coreboot_table_size) { 309 WARN("Unable to get coreboot table location for device tree"); 310 return 0; 311 } 312 ret = fdt_begin_node(fdt, "firmware"); 313 if (ret) 314 return ret; 315 316 ret = fdt_property(fdt, "ranges", NULL, 0); 317 if (ret) 318 return ret; 319 320 ret = fdt_begin_node(fdt, "coreboot"); 321 if (ret) 322 return ret; 323 324 ret = fdt_property_string(fdt, "compatible", "coreboot"); 325 if (ret) 326 return ret; 327 328 reg_node.addr = cpu_to_fdt64(coreboot_table_addr); 329 reg_node.size = cpu_to_fdt32(coreboot_table_size); 330 ret = fdt_property(fdt, "reg", ®_node, 331 sizeof(uint64_t) + sizeof(uint32_t)); 332 if (ret) 333 return ret; 334 335 ret = fdt_end_node(fdt); 336 if (ret) 337 return ret; 338 339 return fdt_end_node(fdt); 340 } 341 #endif /* COREBOOT */ 342 343 #if CROS_WIDEVINE_SMC 344 /* 345 * Adds a options/widevine node with the widevine table information to a device 346 * tree. Returns zero on success or if there is no widevine table information; 347 * failure code otherwise. 348 */ 349 static int add_options_widevine_node(void *fdt) 350 { 351 int ret; 352 353 ret = fdt_begin_node(fdt, "options"); 354 if (ret) 355 return ret; 356 357 ret = fdt_begin_node(fdt, "op-tee"); 358 if (ret) 359 return ret; 360 361 ret = fdt_begin_node(fdt, "widevine"); 362 if (ret) 363 return ret; 364 365 if (cros_oem_tpm_auth_pk.length) { 366 ret = fdt_property(fdt, "tcg,tpm-auth-public-key", 367 cros_oem_tpm_auth_pk.buffer, 368 cros_oem_tpm_auth_pk.length); 369 if (ret) 370 return ret; 371 } 372 373 if (cros_oem_huk.length) { 374 ret = fdt_property(fdt, "op-tee,hardware-unique-key", 375 cros_oem_huk.buffer, cros_oem_huk.length); 376 if (ret) 377 return ret; 378 } 379 380 if (cros_oem_rot.length) { 381 ret = fdt_property(fdt, "google,widevine-root-of-trust-ecc-p256", 382 cros_oem_rot.buffer, cros_oem_rot.length); 383 if (ret) 384 return ret; 385 } 386 387 ret = fdt_end_node(fdt); 388 if (ret) 389 return ret; 390 391 ret = fdt_end_node(fdt); 392 if (ret) 393 return ret; 394 395 return fdt_end_node(fdt); 396 } 397 #endif /* CROS_WIDEVINE_SMC */ 398 399 /* 400 * Creates a device tree for passing into OP-TEE. Currently is populated with 401 * the coreboot table address. 402 * Returns 0 on success, error code otherwise. 403 */ 404 static int create_opteed_dt(void) 405 { 406 int ret; 407 408 ret = fdt_create(fdt_buf, OPTEED_FDT_SIZE); 409 if (ret) 410 return ret; 411 412 ret = fdt_finish_reservemap(fdt_buf); 413 if (ret) 414 return ret; 415 416 ret = fdt_begin_node(fdt_buf, ""); 417 if (ret) 418 return ret; 419 420 #if COREBOOT 421 ret = add_coreboot_node(fdt_buf); 422 if (ret) 423 return ret; 424 #endif /* COREBOOT */ 425 426 #if CROS_WIDEVINE_SMC 427 ret = add_options_widevine_node(fdt_buf); 428 if (ret) 429 return ret; 430 #endif /* CROS_WIDEVINE_SMC */ 431 432 ret = fdt_end_node(fdt_buf); 433 if (ret) 434 return ret; 435 436 return fdt_finish(fdt_buf); 437 } 438 439 #if TRANSFER_LIST 440 static int32_t create_smc_tl(const void *fdt, uint32_t fdt_sz) 441 { 442 bl31_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE, 443 FW_HANDOFF_SIZE); 444 if (!bl31_tl) { 445 ERROR("Failed to initialize Transfer List at 0x%lx\n", 446 (unsigned long)FW_HANDOFF_BASE); 447 return -1; 448 } 449 450 if (!transfer_list_add(bl31_tl, TL_TAG_FDT, fdt_sz, fdt)) { 451 return -1; 452 } 453 return 0; 454 } 455 #endif 456 457 /******************************************************************************* 458 * This function is responsible for handling the SMC that loads the OP-TEE 459 * binary image via a non-secure SMC call. It takes the size and physical 460 * address of the payload as parameters. 461 ******************************************************************************/ 462 static int32_t opteed_handle_smc_load(uint64_t data_size, uint64_t data_pa) 463 { 464 uintptr_t data_va = data_pa; 465 uint64_t mapped_data_pa; 466 uintptr_t mapped_data_va; 467 uint64_t data_map_size; 468 int32_t rc; 469 optee_header_t *image_header; 470 uint8_t *image_ptr; 471 uint64_t target_pa; 472 uint64_t target_end_pa; 473 uint64_t image_pa; 474 uintptr_t image_va; 475 optee_image_t *curr_image; 476 uintptr_t target_va; 477 uint64_t target_size; 478 entry_point_info_t optee_ep_info; 479 uint32_t linear_id = plat_my_core_pos(); 480 uint64_t dt_addr = 0; 481 uint64_t arg0 = 0; 482 uint64_t arg1 = 0; 483 uint64_t arg2 = 0; 484 uint64_t arg3 = 0; 485 486 mapped_data_pa = page_align(data_pa, DOWN); 487 mapped_data_va = mapped_data_pa; 488 data_map_size = page_align(data_size + (mapped_data_pa - data_pa), UP); 489 490 /* 491 * We do not validate the passed in address because we are trusting the 492 * non-secure world at this point still. 493 */ 494 rc = mmap_add_dynamic_region(mapped_data_pa, mapped_data_va, 495 data_map_size, MT_MEMORY | MT_RO | MT_NS); 496 if (rc != 0) { 497 return rc; 498 } 499 500 image_header = (optee_header_t *)data_va; 501 if (image_header->magic != TEE_MAGIC_NUM_OPTEE || 502 image_header->version != 2 || image_header->nb_images != 1) { 503 mmap_remove_dynamic_region(mapped_data_va, data_map_size); 504 return -EINVAL; 505 } 506 507 image_ptr = (uint8_t *)data_va + sizeof(optee_header_t) + 508 sizeof(optee_image_t); 509 if (image_header->arch == 1) { 510 opteed_rw = OPTEE_AARCH64; 511 } else { 512 opteed_rw = OPTEE_AARCH32; 513 } 514 515 curr_image = &image_header->optee_image_list[0]; 516 image_pa = dual32to64(curr_image->load_addr_hi, 517 curr_image->load_addr_lo); 518 image_va = image_pa; 519 target_end_pa = image_pa + curr_image->size; 520 521 /* Now also map the memory we want to copy it to. */ 522 target_pa = page_align(image_pa, DOWN); 523 target_va = target_pa; 524 target_size = page_align(target_end_pa, UP) - target_pa; 525 526 rc = mmap_add_dynamic_region(target_pa, target_va, target_size, 527 MT_MEMORY | MT_RW | MT_SECURE); 528 if (rc != 0) { 529 mmap_remove_dynamic_region(mapped_data_va, data_map_size); 530 return rc; 531 } 532 533 INFO("Loaded OP-TEE via SMC: size %d addr 0x%" PRIx64 "\n", 534 curr_image->size, image_va); 535 536 memcpy((void *)image_va, image_ptr, curr_image->size); 537 flush_dcache_range(target_pa, target_size); 538 539 mmap_remove_dynamic_region(mapped_data_va, data_map_size); 540 mmap_remove_dynamic_region(target_va, target_size); 541 542 /* Save the non-secure state */ 543 cm_el1_sysregs_context_save(NON_SECURE); 544 545 rc = create_opteed_dt(); 546 if (rc) { 547 ERROR("Failed device tree creation %d\n", rc); 548 return rc; 549 } 550 dt_addr = (uint64_t)fdt_buf; 551 flush_dcache_range(dt_addr, OPTEED_FDT_SIZE); 552 553 #if TRANSFER_LIST 554 if (!create_smc_tl((void *)dt_addr, OPTEED_FDT_SIZE)) { 555 struct transfer_list_entry *te = NULL; 556 void *dt = NULL; 557 558 te = transfer_list_find(bl31_tl, TL_TAG_FDT); 559 dt = transfer_list_entry_data(te); 560 561 if (opteed_rw == OPTEE_AARCH64) { 562 arg0 = (uint64_t)dt; 563 arg1 = TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION); 564 arg2 = 0; 565 } else { 566 arg0 = 0; 567 arg1 = TRANSFER_LIST_HANDOFF_R1_VALUE(REGISTER_CONVENTION_VERSION); 568 arg2 = (uint64_t)dt; 569 } 570 571 arg3 = (uint64_t)bl31_tl; 572 } else 573 #endif /* TRANSFER_LIST */ 574 { 575 /* Default handoff arguments */ 576 arg2 = dt_addr; 577 } 578 579 opteed_init_optee_ep_state(&optee_ep_info, 580 opteed_rw, 581 image_pa, 582 arg0, 583 arg1, 584 arg2, 585 arg3, 586 &opteed_sp_context[linear_id]); 587 if (opteed_init_with_entry_point(&optee_ep_info) == 0) { 588 rc = -EFAULT; 589 } 590 591 /* Restore non-secure state */ 592 cm_el1_sysregs_context_restore(NON_SECURE); 593 cm_set_next_eret_context(NON_SECURE); 594 595 return rc; 596 } 597 #endif /* OPTEE_ALLOW_SMC_LOAD */ 598 599 /******************************************************************************* 600 * This function is responsible for handling all SMCs in the Trusted OS/App 601 * range from the non-secure state as defined in the SMC Calling Convention 602 * Document. It is also responsible for communicating with the Secure 603 * payload to delegate work and return results back to the non-secure 604 * state. Lastly it will also return any information that OPTEE needs to do 605 * the work assigned to it. 606 ******************************************************************************/ 607 static uintptr_t opteed_smc_handler(uint32_t smc_fid, 608 u_register_t x1, 609 u_register_t x2, 610 u_register_t x3, 611 u_register_t x4, 612 void *cookie, 613 void *handle, 614 u_register_t flags) 615 { 616 cpu_context_t *ns_cpu_context; 617 uint32_t linear_id = plat_my_core_pos(); 618 optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; 619 620 /* 621 * Determine which security state this SMC originated from 622 */ 623 624 if (is_caller_non_secure(flags)) { 625 #if OPTEE_ALLOW_SMC_LOAD 626 if (opteed_allow_load && smc_fid == NSSMC_OPTEED_CALL_UID) { 627 /* Provide the UUID of the image loading service. */ 628 SMC_UUID_RET(handle, optee_image_load_uuid); 629 } 630 if (smc_fid == NSSMC_OPTEED_CALL_LOAD_IMAGE) { 631 /* 632 * TODO: Consider wiping the code for SMC loading from 633 * memory after it has been invoked similar to what is 634 * done under RECLAIM_INIT, but extended to happen 635 * later. 636 */ 637 if (!opteed_allow_load) { 638 SMC_RET1(handle, -EPERM); 639 } 640 641 opteed_allow_load = false; 642 uint64_t data_size = dual32to64(x1, x2); 643 uint64_t data_pa = dual32to64(x3, x4); 644 if (!data_size || !data_pa) { 645 /* 646 * This is invoked when the OP-TEE image didn't 647 * load correctly in the kernel but we want to 648 * block off loading of it later for security 649 * reasons. 650 */ 651 SMC_RET1(handle, -EINVAL); 652 } 653 SMC_RET1(handle, opteed_handle_smc_load( 654 data_size, data_pa)); 655 } 656 #endif /* OPTEE_ALLOW_SMC_LOAD */ 657 /* 658 * This is a fresh request from the non-secure client. 659 * The parameters are in x1 and x2. Figure out which 660 * registers need to be preserved, save the non-secure 661 * state and send the request to the secure payload. 662 */ 663 assert(handle == cm_get_context(NON_SECURE)); 664 665 cm_el1_sysregs_context_save(NON_SECURE); 666 667 /* 668 * We are done stashing the non-secure context. Ask the 669 * OP-TEE to do the work now. If we are loading vi an SMC, 670 * then we also need to init this CPU context if not done 671 * already. 672 */ 673 if (optee_vector_table == NULL) { 674 SMC_RET1(handle, -EINVAL); 675 } 676 677 if (get_optee_pstate(optee_ctx->state) == 678 OPTEE_PSTATE_UNKNOWN) { 679 opteed_cpu_on_finish_handler(0); 680 } 681 682 /* 683 * Verify if there is a valid context to use, copy the 684 * operation type and parameters to the secure context 685 * and jump to the fast smc entry point in the secure 686 * payload. Entry into S-EL1 will take place upon exit 687 * from this function. 688 */ 689 assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE)); 690 691 /* Set appropriate entry for SMC. 692 * We expect OPTEE to manage the PSTATE.I and PSTATE.F 693 * flags as appropriate. 694 */ 695 if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { 696 cm_set_elr_el3(SECURE, (uint64_t) 697 &optee_vector_table->fast_smc_entry); 698 } else { 699 cm_set_elr_el3(SECURE, (uint64_t) 700 &optee_vector_table->yield_smc_entry); 701 } 702 703 cm_el1_sysregs_context_restore(SECURE); 704 cm_set_next_eret_context(SECURE); 705 706 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), 707 CTX_GPREG_X4, 708 read_ctx_reg(get_gpregs_ctx(handle), 709 CTX_GPREG_X4)); 710 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), 711 CTX_GPREG_X5, 712 read_ctx_reg(get_gpregs_ctx(handle), 713 CTX_GPREG_X5)); 714 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), 715 CTX_GPREG_X6, 716 read_ctx_reg(get_gpregs_ctx(handle), 717 CTX_GPREG_X6)); 718 /* Propagate hypervisor client ID */ 719 write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), 720 CTX_GPREG_X7, 721 read_ctx_reg(get_gpregs_ctx(handle), 722 CTX_GPREG_X7)); 723 724 SMC_RET4(&optee_ctx->cpu_ctx, smc_fid, x1, x2, x3); 725 } 726 727 /* 728 * Returning from OPTEE 729 */ 730 731 switch (smc_fid) { 732 /* 733 * OPTEE has finished initialising itself after a cold boot 734 */ 735 case TEESMC_OPTEED_RETURN_ENTRY_DONE: 736 /* 737 * Stash the OPTEE entry points information. This is done 738 * only once on the primary cpu 739 */ 740 assert(optee_vector_table == NULL); 741 optee_vector_table = (optee_vectors_t *) x1; 742 743 if (optee_vector_table != NULL) { 744 set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON); 745 746 /* 747 * OPTEE has been successfully initialized. 748 * Register power management hooks with PSCI 749 */ 750 psci_register_spd_pm_hook(&opteed_pm); 751 752 #if !OPTEE_ALLOW_SMC_LOAD 753 register_opteed_interrupt_handler(); 754 #endif 755 } 756 757 /* 758 * OPTEE reports completion. The OPTEED must have initiated 759 * the original request through a synchronous entry into 760 * OPTEE. Jump back to the original C runtime context. 761 */ 762 opteed_synchronous_sp_exit(optee_ctx, x1); 763 break; 764 765 766 /* 767 * These function IDs is used only by OP-TEE to indicate it has 768 * finished: 769 * 1. turning itself on in response to an earlier psci 770 * cpu_on request 771 * 2. resuming itself after an earlier psci cpu_suspend 772 * request. 773 */ 774 case TEESMC_OPTEED_RETURN_ON_DONE: 775 case TEESMC_OPTEED_RETURN_RESUME_DONE: 776 777 778 /* 779 * These function IDs is used only by the SP to indicate it has 780 * finished: 781 * 1. suspending itself after an earlier psci cpu_suspend 782 * request. 783 * 2. turning itself off in response to an earlier psci 784 * cpu_off request. 785 */ 786 case TEESMC_OPTEED_RETURN_OFF_DONE: 787 case TEESMC_OPTEED_RETURN_SUSPEND_DONE: 788 case TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE: 789 case TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE: 790 791 /* 792 * OPTEE reports completion. The OPTEED must have initiated the 793 * original request through a synchronous entry into OPTEE. 794 * Jump back to the original C runtime context, and pass x1 as 795 * return value to the caller 796 */ 797 opteed_synchronous_sp_exit(optee_ctx, x1); 798 break; 799 800 /* 801 * OPTEE is returning from a call or being preempted from a call, in 802 * either case execution should resume in the normal world. 803 */ 804 case TEESMC_OPTEED_RETURN_CALL_DONE: 805 /* 806 * This is the result from the secure client of an 807 * earlier request. The results are in x0-x3. Copy it 808 * into the non-secure context, save the secure state 809 * and return to the non-secure state. 810 */ 811 assert(handle == cm_get_context(SECURE)); 812 cm_el1_sysregs_context_save(SECURE); 813 814 /* Get a reference to the non-secure context */ 815 ns_cpu_context = cm_get_context(NON_SECURE); 816 assert(ns_cpu_context); 817 818 /* Restore non-secure state */ 819 cm_el1_sysregs_context_restore(NON_SECURE); 820 cm_set_next_eret_context(NON_SECURE); 821 822 SMC_RET4(ns_cpu_context, x1, x2, x3, x4); 823 824 /* 825 * OPTEE has finished handling a S-EL1 FIQ interrupt. Execution 826 * should resume in the normal world. 827 */ 828 case TEESMC_OPTEED_RETURN_FIQ_DONE: 829 /* Get a reference to the non-secure context */ 830 ns_cpu_context = cm_get_context(NON_SECURE); 831 assert(ns_cpu_context); 832 833 /* 834 * Restore non-secure state. There is no need to save the 835 * secure system register context since OPTEE was supposed 836 * to preserve it during S-EL1 interrupt handling. 837 */ 838 cm_el1_sysregs_context_restore(NON_SECURE); 839 cm_set_next_eret_context(NON_SECURE); 840 841 SMC_RET0((uint64_t) ns_cpu_context); 842 843 default: 844 panic(); 845 } 846 } 847 848 /* Define an OPTEED runtime service descriptor for fast SMC calls */ 849 DECLARE_RT_SVC( 850 opteed_fast, 851 852 OEN_TOS_START, 853 OEN_TOS_END, 854 SMC_TYPE_FAST, 855 opteed_setup, 856 opteed_smc_handler 857 ); 858 859 /* Define an OPTEED runtime service descriptor for yielding SMC calls */ 860 DECLARE_RT_SVC( 861 opteed_std, 862 863 OEN_TOS_START, 864 OEN_TOS_END, 865 SMC_TYPE_YIELD, 866 NULL, 867 opteed_smc_handler 868 ); 869