1 /* 2 * Copyright (c) 2020-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch/aarch64/arch_features.h> 15 #include <bl31/bl31.h> 16 #include <bl31/interrupt_mgmt.h> 17 #include <common/debug.h> 18 #include <common/runtime_svc.h> 19 #include <common/tbbr/tbbr_img_def.h> 20 #include <lib/el3_runtime/context_mgmt.h> 21 #include <lib/fconf/fconf.h> 22 #include <lib/fconf/fconf_dyn_cfg_getter.h> 23 #include <lib/smccc.h> 24 #include <lib/spinlock.h> 25 #include <lib/utils.h> 26 #include <lib/xlat_tables/xlat_tables_v2.h> 27 #include <plat/common/common_def.h> 28 #include <plat/common/platform.h> 29 #include <platform_def.h> 30 #include <services/el3_spmd_logical_sp.h> 31 #include <services/ffa_svc.h> 32 #include <services/spmc_svc.h> 33 #include <services/spmd_svc.h> 34 #include <smccc_helpers.h> 35 #include "spmd_private.h" 36 #if TRANSFER_LIST 37 #include <transfer_list.h> 38 #endif 39 40 /******************************************************************************* 41 * SPM Core context information. 42 ******************************************************************************/ 43 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 44 45 /******************************************************************************* 46 * SPM Core attribute information is read from its manifest if the SPMC is not 47 * at EL3. Else, it is populated from the SPMC directly. 48 ******************************************************************************/ 49 static spmc_manifest_attribute_t spmc_attrs; 50 51 /******************************************************************************* 52 * FFA version used by nonsecure endpoint. 53 ******************************************************************************/ 54 static uint32_t nonsecure_ffa_version; 55 56 /******************************************************************************* 57 * Whether the normal world finished negotiating its version. 58 ******************************************************************************/ 59 static bool nonsecure_version_negotiated; 60 61 /******************************************************************************* 62 * FFA version used by SPMC, as seen by the normal world. 63 ******************************************************************************/ 64 static uint32_t spmc_nwd_ffa_version; 65 66 /******************************************************************************* 67 * SPM Core entry point information. Discovered on the primary core and reused 68 * on secondary cores. 69 ******************************************************************************/ 70 static entry_point_info_t *spmc_ep_info; 71 72 /******************************************************************************* 73 * SPM Core context on current CPU get helper. 74 ******************************************************************************/ 75 spmd_spm_core_context_t *spmd_get_context(void) 76 { 77 return &spm_core_context[plat_my_core_pos()]; 78 } 79 80 /******************************************************************************* 81 * SPM Core ID getter. 82 ******************************************************************************/ 83 uint16_t spmd_spmc_id_get(void) 84 { 85 return spmc_attrs.spmc_id; 86 } 87 88 /******************************************************************************* 89 * Static function declaration. 90 ******************************************************************************/ 91 static int32_t spmd_init(void); 92 static int spmd_spmc_init(void *pm_addr); 93 94 static uint64_t spmd_smc_forward(uint32_t smc_fid, 95 bool secure_origin, 96 uint64_t x1, 97 uint64_t x2, 98 uint64_t x3, 99 uint64_t x4, 100 void *cookie, 101 void *handle, 102 uint64_t flags, 103 uint32_t secure_ffa_version); 104 105 /****************************************************************************** 106 * Builds an SPMD to SPMC direct message request. 107 *****************************************************************************/ 108 void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func, 109 unsigned long long message) 110 { 111 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32); 112 write_ctx_reg(gpregs, CTX_GPREG_X1, 113 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) | 114 spmd_spmc_id_get()); 115 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func); 116 write_ctx_reg(gpregs, CTX_GPREG_X3, message); 117 118 /* Zero out x4-x7 for the direct request emitted towards the SPMC. */ 119 write_ctx_reg(gpregs, CTX_GPREG_X4, 0); 120 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 121 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 122 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 123 } 124 125 126 /******************************************************************************* 127 * This function takes an SPMC context pointer and performs a synchronous 128 * SPMC entry. 129 ******************************************************************************/ 130 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 131 { 132 uint64_t rc; 133 134 assert(spmc_ctx != NULL); 135 136 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 137 138 /* Restore the context assigned above */ 139 #if SPMD_SPM_AT_SEL2 140 cm_el2_sysregs_context_restore(SECURE); 141 #else 142 cm_el1_sysregs_context_restore(SECURE); 143 #endif 144 cm_set_next_eret_context(SECURE); 145 146 /* Enter SPMC */ 147 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 148 149 /* Save secure state */ 150 #if SPMD_SPM_AT_SEL2 151 cm_el2_sysregs_context_save(SECURE); 152 #else 153 cm_el1_sysregs_context_save(SECURE); 154 #endif 155 156 return rc; 157 } 158 159 /******************************************************************************* 160 * This function returns to the place where spmd_spm_core_sync_entry() was 161 * called originally. 162 ******************************************************************************/ 163 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 164 { 165 spmd_spm_core_context_t *ctx = spmd_get_context(); 166 167 /* Get current CPU context from SPMC context */ 168 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 169 170 /* 171 * The SPMD must have initiated the original request through a 172 * synchronous entry into SPMC. Jump back to the original C runtime 173 * context with the value of rc in x0; 174 */ 175 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 176 177 panic(); 178 } 179 180 /******************************************************************************* 181 * Jump to the SPM Core for the first time. 182 ******************************************************************************/ 183 static int32_t spmd_init(void) 184 { 185 spmd_spm_core_context_t *ctx = spmd_get_context(); 186 uint64_t rc; 187 188 VERBOSE("SPM Core init start.\n"); 189 190 /* Primary boot core enters the SPMC for initialization. */ 191 ctx->state = SPMC_STATE_ON_PENDING; 192 193 rc = spmd_spm_core_sync_entry(ctx); 194 if (rc != 0ULL) { 195 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc); 196 return 0; 197 } 198 199 ctx->state = SPMC_STATE_ON; 200 201 VERBOSE("SPM Core init end.\n"); 202 203 spmd_logical_sp_set_spmc_initialized(); 204 rc = spmd_logical_sp_init(); 205 if (rc != 0) { 206 WARN("SPMD Logical partitions failed init.\n"); 207 } 208 209 return 1; 210 } 211 212 /******************************************************************************* 213 * spmd_secure_interrupt_handler 214 * Enter the SPMC for further handling of the secure interrupt by the SPMC 215 * itself or a Secure Partition. 216 ******************************************************************************/ 217 static uint64_t spmd_secure_interrupt_handler(uint32_t id, 218 uint32_t flags, 219 void *handle, 220 void *cookie) 221 { 222 spmd_spm_core_context_t *ctx = spmd_get_context(); 223 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 224 int64_t rc; 225 226 /* Sanity check the security state when the exception was generated */ 227 assert(get_interrupt_src_ss(flags) == NON_SECURE); 228 229 /* Sanity check the pointer to this cpu's context */ 230 assert(handle == cm_get_context(NON_SECURE)); 231 232 /* Save the non-secure context before entering SPMC */ 233 #if SPMD_SPM_AT_SEL2 234 cm_el2_sysregs_context_save(NON_SECURE); 235 #else 236 cm_el1_sysregs_context_save(NON_SECURE); 237 238 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 239 /* 240 * The hint bit denoting absence of SVE live state is effectively false 241 * in this scenario where execution was trapped to EL3 due to FIQ. 242 */ 243 simd_ctx_save(NON_SECURE, false); 244 simd_ctx_restore(SECURE); 245 #endif 246 #endif 247 248 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */ 249 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT); 250 write_ctx_reg(gpregs, CTX_GPREG_X1, 0); 251 write_ctx_reg(gpregs, CTX_GPREG_X2, 0); 252 write_ctx_reg(gpregs, CTX_GPREG_X3, 0); 253 write_ctx_reg(gpregs, CTX_GPREG_X4, 0); 254 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 255 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 256 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 257 258 /* Mark current core as handling a secure interrupt. */ 259 ctx->secure_interrupt_ongoing = true; 260 261 rc = spmd_spm_core_sync_entry(ctx); 262 263 if (rc != 0ULL) { 264 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, plat_my_core_pos()); 265 } 266 267 ctx->secure_interrupt_ongoing = false; 268 269 #if SPMD_SPM_AT_SEL2 270 cm_el2_sysregs_context_restore(NON_SECURE); 271 #else 272 cm_el1_sysregs_context_restore(NON_SECURE); 273 274 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 275 simd_ctx_save(SECURE, false); 276 simd_ctx_restore(NON_SECURE); 277 #endif 278 #endif 279 cm_set_next_eret_context(NON_SECURE); 280 281 SMC_RET0(&ctx->cpu_ctx); 282 } 283 284 #if (EL3_EXCEPTION_HANDLING == 0) 285 /******************************************************************************* 286 * spmd_group0_interrupt_handler_nwd 287 * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the 288 * handling of the interrupt to the platform handler, and return only upon 289 * successfully handling the Group0 interrupt. 290 ******************************************************************************/ 291 static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id, 292 uint32_t flags, 293 void *handle, 294 void *cookie) 295 { 296 uint32_t intid, intr_raw; 297 298 /* Sanity check the security state when the exception was generated. */ 299 assert(get_interrupt_src_ss(flags) == NON_SECURE); 300 301 /* Sanity check the pointer to this cpu's context. */ 302 assert(handle == cm_get_context(NON_SECURE)); 303 304 assert(id == INTR_ID_UNAVAILABLE); 305 306 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3); 307 308 intr_raw = plat_ic_acknowledge_interrupt(); 309 intid = plat_ic_get_interrupt_id(intr_raw); 310 311 if (intid == INTR_ID_UNAVAILABLE) { 312 return 0U; 313 } 314 315 if (plat_spmd_handle_group0_interrupt(intid) < 0) { 316 ERROR("Group0 interrupt %u not handled\n", intid); 317 panic(); 318 } 319 320 /* Deactivate the corresponding Group0 interrupt. */ 321 plat_ic_end_of_interrupt(intid); 322 323 return 0U; 324 } 325 #endif 326 327 /******************************************************************************* 328 * spmd_handle_group0_intr_swd 329 * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using 330 * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the 331 * interrupt to the platform handler, and returns only upon successfully 332 * handling the Group0 interrupt. 333 ******************************************************************************/ 334 static uint64_t spmd_handle_group0_intr_swd(void *handle) 335 { 336 uint32_t intid, intr_raw; 337 338 /* Sanity check the pointer to this cpu's context */ 339 assert(handle == cm_get_context(SECURE)); 340 341 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3); 342 343 intr_raw = plat_ic_acknowledge_interrupt(); 344 intid = plat_ic_get_interrupt_id(intr_raw); 345 346 if (intid == INTR_ID_UNAVAILABLE) { 347 return 0U; 348 } 349 350 /* 351 * TODO: Currently due to a limitation in SPMD implementation, the 352 * platform handler is expected to not delegate handling to NWd while 353 * processing Group0 secure interrupt. 354 */ 355 if (plat_spmd_handle_group0_interrupt(intid) < 0) { 356 /* Group0 interrupt was not handled by the platform. */ 357 ERROR("Group0 interrupt %u not handled\n", intid); 358 panic(); 359 } 360 361 /* Deactivate the corresponding Group0 interrupt. */ 362 plat_ic_end_of_interrupt(intid); 363 364 /* Return success. */ 365 SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 366 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 367 FFA_PARAM_MBZ); 368 } 369 370 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 371 static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size, 372 unsigned int attr, uintptr_t *align_addr, 373 size_t *align_size) 374 { 375 uintptr_t base_addr_align; 376 size_t mapped_size_align; 377 int rc; 378 379 /* Page aligned address and size if necessary */ 380 base_addr_align = page_align(base_addr, DOWN); 381 mapped_size_align = page_align(size, UP); 382 383 if ((base_addr != base_addr_align) && 384 (size == mapped_size_align)) { 385 mapped_size_align += PAGE_SIZE; 386 } 387 388 /* 389 * Map dynamically given region with its aligned base address and 390 * size 391 */ 392 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align, 393 base_addr_align, 394 mapped_size_align, 395 attr); 396 if (rc == 0) { 397 *align_addr = base_addr_align; 398 *align_size = mapped_size_align; 399 } 400 401 return rc; 402 } 403 404 static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr, 405 size_t size) 406 { 407 uintptr_t root_base_addr_align, sec_base_addr_align; 408 size_t root_mapped_size_align, sec_mapped_size_align; 409 int rc; 410 411 assert(root_base_addr != 0UL); 412 assert(sec_base_addr != 0UL); 413 assert(size != 0UL); 414 415 /* Map the memory with required attributes */ 416 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT, 417 &root_base_addr_align, 418 &root_mapped_size_align); 419 if (rc != 0) { 420 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region", 421 root_base_addr, rc); 422 panic(); 423 } 424 425 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE, 426 &sec_base_addr_align, &sec_mapped_size_align); 427 if (rc != 0) { 428 ERROR("%s %s %lu (%d)\n", "Error while mapping", 429 "secure region", sec_base_addr, rc); 430 panic(); 431 } 432 433 /* Do copy operation */ 434 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size); 435 436 /* Unmap root memory region */ 437 rc = mmap_remove_dynamic_region(root_base_addr_align, 438 root_mapped_size_align); 439 if (rc != 0) { 440 ERROR("%s %s %lu (%d)\n", "Error while unmapping", 441 "root region", root_base_addr_align, rc); 442 panic(); 443 } 444 445 /* Unmap secure memory region */ 446 rc = mmap_remove_dynamic_region(sec_base_addr_align, 447 sec_mapped_size_align); 448 if (rc != 0) { 449 ERROR("%s %s %lu (%d)\n", "Error while unmapping", 450 "secure region", sec_base_addr_align, rc); 451 panic(); 452 } 453 } 454 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 455 456 /******************************************************************************* 457 * Loads SPMC manifest and inits SPMC. 458 ******************************************************************************/ 459 static int spmd_spmc_init(void *pm_addr) 460 { 461 cpu_context_t *cpu_ctx; 462 unsigned int core_id; 463 uint32_t ep_attr, flags; 464 int rc; 465 const struct dyn_cfg_dtb_info_t *image_info __unused; 466 467 /* Load the SPM Core manifest */ 468 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 469 if (rc != 0) { 470 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 471 return rc; 472 } 473 474 /* 475 * Ensure that the SPM Core version is compatible with the SPM 476 * Dispatcher version. 477 */ 478 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 479 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 480 WARN("Unsupported FFA version (%u.%u)\n", 481 spmc_attrs.major_version, spmc_attrs.minor_version); 482 return -EINVAL; 483 } 484 485 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 486 spmc_attrs.minor_version); 487 488 VERBOSE("SPM Core run time EL%x.\n", 489 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 490 491 /* Validate the SPMC ID, Ensure high bit is set */ 492 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 493 SPMC_SECURE_ID_MASK) == 0U) { 494 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 495 return -EINVAL; 496 } 497 498 /* Validate the SPM Core execution state */ 499 if ((spmc_attrs.exec_state != MODE_RW_64) && 500 (spmc_attrs.exec_state != MODE_RW_32)) { 501 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 502 spmc_attrs.exec_state); 503 return -EINVAL; 504 } 505 506 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 507 spmc_attrs.exec_state); 508 509 #if SPMD_SPM_AT_SEL2 510 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 511 if (spmc_attrs.exec_state == MODE_RW_32) { 512 WARN("AArch32 state at S-EL2 is not supported.\n"); 513 return -EINVAL; 514 } 515 516 /* 517 * Check if S-EL2 is supported on this system if S-EL2 518 * is required for SPM 519 */ 520 if (!is_feat_sel2_supported()) { 521 WARN("SPM Core run time S-EL2 is not supported.\n"); 522 return -EINVAL; 523 } 524 #endif /* SPMD_SPM_AT_SEL2 */ 525 526 /* Initialise an entrypoint to set up the CPU context */ 527 ep_attr = SECURE | EP_ST_ENABLE; 528 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 529 ep_attr |= EP_EE_BIG; 530 } 531 532 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 533 534 /* 535 * Populate SPSR for SPM Core based upon validated parameters from the 536 * manifest. 537 */ 538 if (spmc_attrs.exec_state == MODE_RW_32) { 539 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 540 SPSR_E_LITTLE, 541 DAIF_FIQ_BIT | 542 DAIF_IRQ_BIT | 543 DAIF_ABT_BIT); 544 } else { 545 546 #if SPMD_SPM_AT_SEL2 547 static const uint32_t runtime_el = MODE_EL2; 548 #else 549 static const uint32_t runtime_el = MODE_EL1; 550 #endif 551 spmc_ep_info->spsr = SPSR_64(runtime_el, 552 MODE_SP_ELX, 553 DISABLE_ALL_EXCEPTIONS); 554 } 555 556 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 557 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID); 558 assert(image_info != NULL); 559 560 if ((image_info->config_addr == 0UL) || 561 (image_info->secondary_config_addr == 0UL) || 562 (image_info->config_max_size == 0UL)) { 563 return -EINVAL; 564 } 565 566 /* Copy manifest from root->secure region */ 567 spmd_do_sec_cpy(image_info->config_addr, 568 image_info->secondary_config_addr, 569 image_info->config_max_size); 570 571 /* Update ep info of BL32 */ 572 assert(spmc_ep_info != NULL); 573 spmc_ep_info->args.arg0 = image_info->secondary_config_addr; 574 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 575 576 /* Set an initial SPMC context state for all cores. */ 577 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) { 578 spm_core_context[core_id].state = SPMC_STATE_OFF; 579 580 /* Setup an initial cpu context for the SPMC. */ 581 cpu_ctx = &spm_core_context[core_id].cpu_ctx; 582 cm_setup_context(cpu_ctx, spmc_ep_info); 583 584 /* 585 * Pass the core linear ID to the SPMC through x4. 586 * (TF-A implementation defined behavior helping 587 * a legacy TOS migration to adopt FF-A). 588 */ 589 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id); 590 } 591 592 /* Register power management hooks with PSCI */ 593 psci_register_spd_pm_hook(&spmd_pm); 594 595 /* Register init function for deferred init. */ 596 bl31_register_bl32_init(&spmd_init); 597 598 INFO("SPM Core setup done.\n"); 599 600 /* 601 * Register an interrupt handler routing secure interrupts to SPMD 602 * while the NWd is running. 603 */ 604 flags = 0; 605 set_interrupt_rm_flag(flags, NON_SECURE); 606 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 607 spmd_secure_interrupt_handler, 608 flags); 609 if (rc != 0) { 610 panic(); 611 } 612 613 /* 614 * Permit configurations where the SPM resides at S-EL1/2 and upon a 615 * Group0 interrupt triggering while the normal world runs, the 616 * interrupt is routed either through the EHF or directly to the SPMD: 617 * 618 * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD 619 * for handling by spmd_group0_interrupt_handler_nwd. 620 * 621 * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF. 622 * 623 */ 624 #if (EL3_EXCEPTION_HANDLING == 0) 625 /* 626 * If EL3 interrupts are supported by the platform, register an 627 * interrupt handler routing Group0 interrupts to SPMD while the NWd is 628 * running. 629 */ 630 if (plat_ic_has_interrupt_type(INTR_TYPE_EL3)) { 631 rc = register_interrupt_type_handler(INTR_TYPE_EL3, 632 spmd_group0_interrupt_handler_nwd, 633 flags); 634 if (rc != 0) { 635 panic(); 636 } 637 } 638 #endif 639 640 return 0; 641 } 642 643 /******************************************************************************* 644 * Initialize context of SPM Core. 645 ******************************************************************************/ 646 int spmd_setup(void) 647 { 648 int rc; 649 void *spmc_manifest; 650 struct transfer_list_header *tl __maybe_unused; 651 struct transfer_list_entry *te __maybe_unused; 652 653 /* 654 * If the SPMC is at EL3, then just initialise it directly. The 655 * shenanigans of when it is at a lower EL are not needed. 656 */ 657 if (is_spmc_at_el3()) { 658 /* Allow the SPMC to populate its attributes directly. */ 659 spmc_populate_attrs(&spmc_attrs); 660 661 rc = spmc_setup(); 662 if (rc != 0) { 663 WARN("SPMC initialisation failed 0x%x.\n", rc); 664 } 665 return 0; 666 } 667 668 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 669 if (spmc_ep_info == NULL) { 670 WARN("No SPM Core image provided by BL2 boot loader.\n"); 671 return 0; 672 } 673 674 /* Under no circumstances will this parameter be 0 */ 675 assert(spmc_ep_info->pc != 0ULL); 676 677 678 #if TRANSFER_LIST && !RESET_TO_BL31 679 tl = (struct transfer_list_header *)spmc_ep_info->args.arg3; 680 te = transfer_list_find(tl, TL_TAG_DT_SPMC_MANIFEST); 681 if (te == NULL) { 682 WARN("SPM Core manifest absent in TRANSFER_LIST.\n"); 683 return -ENOENT; 684 } 685 686 spmc_manifest = (void *)transfer_list_entry_data(te); 687 688 /* Change the DT in the handoff */ 689 if (sizeof(spmc_ep_info->args.arg0) == sizeof(uint64_t)) { 690 spmc_ep_info->args.arg0 = (uintptr_t)spmc_manifest; 691 } else { 692 spmc_ep_info->args.arg3 = (uintptr_t)spmc_manifest; 693 } 694 #else 695 /* 696 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 697 * be used as a manifest for the SPM Core at the next lower EL/mode. 698 */ 699 spmc_manifest = (void *)spmc_ep_info->args.arg0; 700 #endif 701 702 if (spmc_manifest == NULL) { 703 WARN("Invalid or absent SPM Core manifest.\n"); 704 return 0; 705 } 706 707 /* Load manifest, init SPMC */ 708 rc = spmd_spmc_init(spmc_manifest); 709 if (rc != 0) { 710 WARN("Booting device without SPM initialization.\n"); 711 } 712 713 return 0; 714 } 715 716 /******************************************************************************* 717 * Forward FF-A SMCs to the other security state. 718 ******************************************************************************/ 719 uint64_t spmd_smc_switch_state(uint32_t smc_fid, 720 bool secure_origin, 721 uint64_t x1, 722 uint64_t x2, 723 uint64_t x3, 724 uint64_t x4, 725 void *handle, 726 uint64_t flags, 727 uint32_t secure_ffa_version) 728 { 729 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 730 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 731 uint32_t version_in = (secure_origin) ? secure_ffa_version : nonsecure_ffa_version; 732 uint32_t version_out = (!secure_origin) ? secure_ffa_version : nonsecure_ffa_version; 733 void *ctx_out; 734 735 #if SPMD_SPM_AT_SEL2 736 if ((secure_state_out == SECURE) && (is_sve_hint_set(flags) == true)) { 737 /* 738 * Set the SVE hint bit in x0 and pass to the lower secure EL, 739 * if it was set by the caller. 740 */ 741 smc_fid |= (FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT); 742 } 743 #endif 744 745 /* Save incoming security state */ 746 #if SPMD_SPM_AT_SEL2 747 cm_el2_sysregs_context_save(secure_state_in); 748 #else 749 cm_el1_sysregs_context_save(secure_state_in); 750 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 751 /* Forward the hint bit denoting the absence of SVE live state. */ 752 simd_ctx_save(secure_state_in, (!secure_origin && (is_sve_hint_set(flags) == true))); 753 #endif 754 #endif 755 756 /* Restore outgoing security state */ 757 #if SPMD_SPM_AT_SEL2 758 cm_el2_sysregs_context_restore(secure_state_out); 759 #else 760 cm_el1_sysregs_context_restore(secure_state_out); 761 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 762 simd_ctx_restore(secure_state_out); 763 #endif 764 #endif 765 cm_set_next_eret_context(secure_state_out); 766 767 ctx_out = cm_get_context(secure_state_out); 768 if (smc_fid == FFA_NORMAL_WORLD_RESUME) { 769 SMC_RET0(ctx_out); 770 } 771 772 if ((GET_SMC_CC(smc_fid) == SMC_64) && (version_out >= MAKE_FFA_VERSION(U(1), U(2)))) { 773 if (version_in < MAKE_FFA_VERSION(U(1), U(2))) { 774 /* FFA version mismatch, with dest >= 1.2 - set outgoing x8-x17 to zero */ 775 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4, 776 SMC_GET_GP(handle, CTX_GPREG_X5), 777 SMC_GET_GP(handle, CTX_GPREG_X6), 778 SMC_GET_GP(handle, CTX_GPREG_X7), 779 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); 780 } else { 781 /* Both FFA versions >= 1.2 - pass incoming x8-x17 to dest */ 782 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4, 783 SMC_GET_GP(handle, CTX_GPREG_X5), 784 SMC_GET_GP(handle, CTX_GPREG_X6), 785 SMC_GET_GP(handle, CTX_GPREG_X7), 786 SMC_GET_GP(handle, CTX_GPREG_X8), 787 SMC_GET_GP(handle, CTX_GPREG_X9), 788 SMC_GET_GP(handle, CTX_GPREG_X10), 789 SMC_GET_GP(handle, CTX_GPREG_X11), 790 SMC_GET_GP(handle, CTX_GPREG_X12), 791 SMC_GET_GP(handle, CTX_GPREG_X13), 792 SMC_GET_GP(handle, CTX_GPREG_X14), 793 SMC_GET_GP(handle, CTX_GPREG_X15), 794 SMC_GET_GP(handle, CTX_GPREG_X16), 795 SMC_GET_GP(handle, CTX_GPREG_X17) 796 ); 797 } 798 } else { 799 /* 32 bit call or dest has FFA version < 1.2 or unknown */ 800 SMC_RET8(ctx_out, smc_fid, x1, x2, x3, x4, 801 SMC_GET_GP(handle, CTX_GPREG_X5), 802 SMC_GET_GP(handle, CTX_GPREG_X6), 803 SMC_GET_GP(handle, CTX_GPREG_X7)); 804 } 805 } 806 807 /******************************************************************************* 808 * Forward SMCs to the other security state. 809 ******************************************************************************/ 810 static uint64_t spmd_smc_forward(uint32_t smc_fid, 811 bool secure_origin, 812 uint64_t x1, 813 uint64_t x2, 814 uint64_t x3, 815 uint64_t x4, 816 void *cookie, 817 void *handle, 818 uint64_t flags, 819 uint32_t secure_ffa_version) 820 { 821 if (is_spmc_at_el3() && !secure_origin) { 822 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4, 823 cookie, handle, flags); 824 } 825 826 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4, 827 handle, flags, secure_ffa_version); 828 829 } 830 831 /******************************************************************************* 832 * Return FFA_ERROR with specified error code 833 ******************************************************************************/ 834 uint64_t spmd_ffa_error_return(void *handle, int error_code) 835 { 836 SMC_RET8(handle, (uint32_t) FFA_ERROR, 837 FFA_TARGET_INFO_MBZ, (uint32_t)error_code, 838 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 839 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 840 } 841 842 /******************************************************************************* 843 * spmd_check_address_in_binary_image 844 ******************************************************************************/ 845 bool spmd_check_address_in_binary_image(uint64_t address) 846 { 847 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 848 849 return ((address >= spmc_attrs.load_address) && 850 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 851 } 852 853 /****************************************************************************** 854 * spmd_is_spmc_message 855 *****************************************************************************/ 856 static bool spmd_is_spmc_message(unsigned int ep) 857 { 858 if (is_spmc_at_el3()) { 859 return false; 860 } 861 862 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 863 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 864 } 865 866 /******************************************************************************* 867 * This function forwards FF-A SMCs to either the main SPMD handler or the 868 * SPMC at EL3, depending on the origin security state, if enabled. 869 ******************************************************************************/ 870 uint64_t spmd_ffa_smc_handler(uint32_t smc_fid, 871 uint64_t x1, 872 uint64_t x2, 873 uint64_t x3, 874 uint64_t x4, 875 void *cookie, 876 void *handle, 877 uint64_t flags) 878 { 879 if (is_spmc_at_el3()) { 880 /* 881 * If we have an SPMC at EL3 allow handling of the SMC first. 882 * The SPMC will call back through to SPMD handler if required. 883 */ 884 if (is_caller_secure(flags)) { 885 return spmc_smc_handler(smc_fid, 886 is_caller_secure(flags), 887 x1, x2, x3, x4, cookie, 888 handle, flags); 889 } 890 } 891 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie, 892 handle, flags, spmc_nwd_ffa_version); 893 } 894 895 static uint32_t get_common_ffa_version(uint32_t secure_ffa_version) 896 { 897 if (secure_ffa_version <= nonsecure_ffa_version) { 898 return secure_ffa_version; 899 } else { 900 return nonsecure_ffa_version; 901 } 902 } 903 904 /******************************************************************************* 905 * This function handles all SMCs in the range reserved for FFA. Each call is 906 * either forwarded to the other security state or handled by the SPM dispatcher 907 ******************************************************************************/ 908 uint64_t spmd_smc_handler(uint32_t smc_fid, 909 uint64_t x1, 910 uint64_t x2, 911 uint64_t x3, 912 uint64_t x4, 913 void *cookie, 914 void *handle, 915 uint64_t flags, 916 uint32_t secure_ffa_version) 917 { 918 spmd_spm_core_context_t *ctx = spmd_get_context(); 919 bool secure_origin; 920 int ret; 921 uint32_t input_version; 922 923 /* Determine which security state this SMC originated from */ 924 secure_origin = is_caller_secure(flags); 925 926 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 927 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n", 928 plat_my_core_pos(), smc_fid, x1, x2, x3, x4, 929 SMC_GET_GP(handle, CTX_GPREG_X5), 930 SMC_GET_GP(handle, CTX_GPREG_X6), 931 SMC_GET_GP(handle, CTX_GPREG_X7)); 932 933 /* 934 * If there is an on-going info regs from EL3 SPMD LP, unconditionally 935 * return, we don't expect any other FF-A ABIs to be called between 936 * calls to FFA_PARTITION_INFO_GET_REGS. 937 */ 938 if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) { 939 assert(secure_origin); 940 spmd_spm_core_sync_exit(0ULL); 941 } 942 943 if ((!secure_origin) && (smc_fid != FFA_VERSION)) { 944 /* 945 * Once the caller invokes any FF-A ABI other than FFA_VERSION, 946 * the version negotiation phase is complete. 947 */ 948 nonsecure_version_negotiated = true; 949 } 950 951 switch (smc_fid) { 952 case FFA_ERROR: 953 /* 954 * Check if this is the first invocation of this interface on 955 * this CPU. If so, then indicate that the SPM Core initialised 956 * unsuccessfully. 957 */ 958 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 959 spmd_spm_core_sync_exit(x2); 960 } 961 962 /* 963 * Perform a synchronous exit: 964 * 1. If there was an SPMD logical partition direct request on-going, 965 * return back to the SPMD logical partition so the error can be 966 * consumed. 967 * 2. SPMC sent FFA_ERROR in response to a power management 968 * operation sent through direct request. 969 */ 970 if (is_spmd_logical_sp_dir_req_in_progress(ctx) || 971 ctx->psci_operation_ongoing) { 972 assert(secure_origin); 973 spmd_spm_core_sync_exit(0ULL); 974 } 975 976 return spmd_smc_forward(smc_fid, secure_origin, 977 x1, x2, x3, x4, cookie, 978 handle, flags, secure_ffa_version); 979 break; /* not reached */ 980 981 case FFA_VERSION: 982 input_version = (uint32_t)(0xFFFFFFFF & x1); 983 /* 984 * If caller is secure and SPMC was initialized, 985 * return FFA_VERSION of SPMD. 986 * If caller is non secure and SPMC was initialized, 987 * forward to the EL3 SPMC if enabled, otherwise send a 988 * framework message to the SPMC at the lower EL to 989 * negotiate a version that is compatible between the 990 * normal world and the SPMC. 991 * Sanity check to "input_version". 992 * If the EL3 SPMC is enabled, ignore the SPMC state as 993 * this is not used. 994 */ 995 if ((input_version & FFA_VERSION_BIT31_MASK) || 996 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) { 997 ret = FFA_ERROR_NOT_SUPPORTED; 998 } else if (!secure_origin) { 999 if (!nonsecure_version_negotiated) { 1000 /* 1001 * Once an FF-A version has been negotiated 1002 * between a caller and a callee, the version 1003 * may not be changed for the lifetime of 1004 * the calling component. 1005 */ 1006 nonsecure_ffa_version = input_version; 1007 } 1008 1009 if (is_spmc_at_el3()) { 1010 /* 1011 * Forward the call directly to the EL3 SPMC, if 1012 * enabled, as we don't need to wrap the call in 1013 * a direct request. 1014 */ 1015 spmc_nwd_ffa_version = 1016 MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 1017 return spmc_smc_handler(smc_fid, secure_origin, 1018 x1, x2, x3, x4, cookie, 1019 handle, flags); 1020 } 1021 1022 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 1023 uint64_t rc; 1024 1025 if (spmc_attrs.major_version == 1 && 1026 spmc_attrs.minor_version == 0) { 1027 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, 1028 spmc_attrs.minor_version); 1029 spmc_nwd_ffa_version = (uint32_t)ret; 1030 SMC_RET8(handle, (uint32_t)ret, 1031 FFA_TARGET_INFO_MBZ, 1032 FFA_TARGET_INFO_MBZ, 1033 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1034 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1035 FFA_PARAM_MBZ); 1036 break; 1037 } 1038 /* Save non-secure system registers context */ 1039 #if SPMD_SPM_AT_SEL2 1040 cm_el2_sysregs_context_save(NON_SECURE); 1041 #else 1042 cm_el1_sysregs_context_save(NON_SECURE); 1043 #endif 1044 1045 /* 1046 * The incoming request has FFA_VERSION as X0 smc_fid 1047 * and requested version in x1. Prepare a direct request 1048 * from SPMD to SPMC with FFA_VERSION framework function 1049 * identifier in X2 and requested version in X3. 1050 */ 1051 spmd_build_spmc_message(gpregs, 1052 SPMD_FWK_MSG_FFA_VERSION_REQ, 1053 input_version); 1054 1055 /* 1056 * Ensure x8-x17 NS GP register values are untouched when returning 1057 * from the SPMC. 1058 */ 1059 write_ctx_reg(gpregs, CTX_GPREG_X8, SMC_GET_GP(handle, CTX_GPREG_X8)); 1060 write_ctx_reg(gpregs, CTX_GPREG_X9, SMC_GET_GP(handle, CTX_GPREG_X9)); 1061 write_ctx_reg(gpregs, CTX_GPREG_X10, SMC_GET_GP(handle, CTX_GPREG_X10)); 1062 write_ctx_reg(gpregs, CTX_GPREG_X11, SMC_GET_GP(handle, CTX_GPREG_X11)); 1063 write_ctx_reg(gpregs, CTX_GPREG_X12, SMC_GET_GP(handle, CTX_GPREG_X12)); 1064 write_ctx_reg(gpregs, CTX_GPREG_X13, SMC_GET_GP(handle, CTX_GPREG_X13)); 1065 write_ctx_reg(gpregs, CTX_GPREG_X14, SMC_GET_GP(handle, CTX_GPREG_X14)); 1066 write_ctx_reg(gpregs, CTX_GPREG_X15, SMC_GET_GP(handle, CTX_GPREG_X15)); 1067 write_ctx_reg(gpregs, CTX_GPREG_X16, SMC_GET_GP(handle, CTX_GPREG_X16)); 1068 write_ctx_reg(gpregs, CTX_GPREG_X17, SMC_GET_GP(handle, CTX_GPREG_X17)); 1069 1070 rc = spmd_spm_core_sync_entry(ctx); 1071 1072 if ((rc != 0ULL) || 1073 (SMC_GET_GP(gpregs, CTX_GPREG_X0) != 1074 FFA_MSG_SEND_DIRECT_RESP_SMC32) || 1075 (SMC_GET_GP(gpregs, CTX_GPREG_X2) != 1076 (FFA_FWK_MSG_BIT | 1077 SPMD_FWK_MSG_FFA_VERSION_RESP))) { 1078 ERROR("Failed to forward FFA_VERSION\n"); 1079 ret = FFA_ERROR_NOT_SUPPORTED; 1080 } else { 1081 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3); 1082 spmc_nwd_ffa_version = (uint32_t)ret; 1083 } 1084 1085 /* 1086 * x0-x4 are updated by spmd_smc_forward below. 1087 * Zero out x5-x7 in the FFA_VERSION response. 1088 */ 1089 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 1090 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 1091 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 1092 1093 /* 1094 * Return here after SPMC has handled FFA_VERSION. 1095 * The returned SPMC version is held in X3. 1096 * Forward this version in X0 to the non-secure caller. 1097 */ 1098 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ, 1099 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1100 FFA_PARAM_MBZ, cookie, gpregs, 1101 flags, spmc_nwd_ffa_version); 1102 } else { 1103 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 1104 FFA_VERSION_MINOR); 1105 } 1106 1107 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ, 1108 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1109 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 1110 break; /* not reached */ 1111 1112 case FFA_FEATURES: 1113 /* 1114 * This is an optional interface. Do the minimal checks and 1115 * forward to SPM Core which will handle it if implemented. 1116 */ 1117 1118 /* Forward SMC from Normal world to the SPM Core */ 1119 if (!secure_origin) { 1120 return spmd_smc_forward(smc_fid, secure_origin, 1121 x1, x2, x3, x4, cookie, 1122 handle, flags, secure_ffa_version); 1123 } 1124 1125 /* 1126 * Return success if call was from secure world i.e. all 1127 * FFA functions are supported. This is essentially a 1128 * nop. 1129 */ 1130 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 1131 SMC_GET_GP(handle, CTX_GPREG_X5), 1132 SMC_GET_GP(handle, CTX_GPREG_X6), 1133 SMC_GET_GP(handle, CTX_GPREG_X7)); 1134 1135 break; /* not reached */ 1136 1137 case FFA_ID_GET: 1138 /* 1139 * Returns the ID of the calling FFA component. 1140 */ 1141 if (!secure_origin) { 1142 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1143 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 1144 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1145 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1146 FFA_PARAM_MBZ); 1147 } 1148 1149 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1150 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 1151 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1152 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1153 FFA_PARAM_MBZ); 1154 1155 break; /* not reached */ 1156 1157 case FFA_SECONDARY_EP_REGISTER_SMC64: 1158 if (secure_origin) { 1159 ret = spmd_pm_secondary_ep_register(x1); 1160 1161 if (ret < 0) { 1162 SMC_RET8(handle, FFA_ERROR_SMC64, 1163 FFA_TARGET_INFO_MBZ, ret, 1164 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1165 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1166 FFA_PARAM_MBZ); 1167 } else { 1168 SMC_RET8(handle, FFA_SUCCESS_SMC64, 1169 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, 1170 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1171 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1172 FFA_PARAM_MBZ); 1173 } 1174 } 1175 1176 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1177 break; /* Not reached */ 1178 1179 case FFA_SPM_ID_GET: 1180 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) { 1181 return spmd_ffa_error_return(handle, 1182 FFA_ERROR_NOT_SUPPORTED); 1183 } 1184 /* 1185 * Returns the ID of the SPMC or SPMD depending on the FF-A 1186 * instance where this function is invoked 1187 */ 1188 if (!secure_origin) { 1189 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1190 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 1191 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1192 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1193 FFA_PARAM_MBZ); 1194 } 1195 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1196 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID, 1197 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1198 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1199 FFA_PARAM_MBZ); 1200 1201 break; /* not reached */ 1202 1203 case FFA_MSG_SEND_DIRECT_REQ2_SMC64: 1204 if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) { 1205 /* Call not supported at this version */ 1206 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1207 } 1208 /* fallthrough */ 1209 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 1210 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 1211 /* 1212 * Regardless of secure_origin, SPMD logical partitions cannot 1213 * handle direct messages. They can only initiate direct 1214 * messages and consume direct responses or errors. 1215 */ 1216 if (is_spmd_lp_id(ffa_endpoint_source(x1)) || 1217 is_spmd_lp_id(ffa_endpoint_destination(x1))) { 1218 return spmd_ffa_error_return(handle, 1219 FFA_ERROR_INVALID_PARAMETER 1220 ); 1221 } 1222 1223 /* 1224 * When there is an ongoing SPMD logical partition direct 1225 * request, there cannot be another direct request. Return 1226 * error in this case. Panic'ing is an option but that does 1227 * not provide the opportunity for caller to abort based on 1228 * error codes. 1229 */ 1230 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1231 assert(secure_origin); 1232 return spmd_ffa_error_return(handle, 1233 FFA_ERROR_DENIED); 1234 } 1235 1236 if (!secure_origin) { 1237 /* Validate source endpoint is non-secure for non-secure caller. */ 1238 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) { 1239 return spmd_ffa_error_return(handle, 1240 FFA_ERROR_INVALID_PARAMETER); 1241 } 1242 } 1243 if (secure_origin && spmd_is_spmc_message(x1)) { 1244 return spmd_ffa_error_return(handle, 1245 FFA_ERROR_DENIED); 1246 } else { 1247 /* Forward direct message to the other world */ 1248 return spmd_smc_forward(smc_fid, secure_origin, 1249 x1, x2, x3, x4, cookie, 1250 handle, flags, secure_ffa_version); 1251 } 1252 break; /* Not reached */ 1253 1254 case FFA_MSG_SEND_DIRECT_RESP2_SMC64: 1255 if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) { 1256 /* Call not supported at this version */ 1257 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1258 } 1259 /* fallthrough */ 1260 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 1261 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1262 if (secure_origin && (spmd_is_spmc_message(x1) || 1263 is_spmd_logical_sp_dir_req_in_progress(ctx))) { 1264 spmd_spm_core_sync_exit(0ULL); 1265 } else { 1266 /* Forward direct message to the other world */ 1267 return spmd_smc_forward(smc_fid, secure_origin, 1268 x1, x2, x3, x4, cookie, 1269 handle, flags, secure_ffa_version); 1270 } 1271 break; /* Not reached */ 1272 case FFA_RX_RELEASE: 1273 case FFA_RXTX_MAP_SMC32: 1274 case FFA_RXTX_MAP_SMC64: 1275 case FFA_RXTX_UNMAP: 1276 case FFA_PARTITION_INFO_GET: 1277 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 1278 case FFA_NOTIFICATION_BITMAP_CREATE: 1279 case FFA_NOTIFICATION_BITMAP_DESTROY: 1280 case FFA_NOTIFICATION_BIND: 1281 case FFA_NOTIFICATION_UNBIND: 1282 case FFA_NOTIFICATION_SET: 1283 case FFA_NOTIFICATION_GET: 1284 case FFA_NOTIFICATION_INFO_GET: 1285 case FFA_NOTIFICATION_INFO_GET_SMC64: 1286 case FFA_MSG_SEND2: 1287 case FFA_RX_ACQUIRE: 1288 case FFA_NS_RES_INFO_GET_SMC64: 1289 #endif 1290 case FFA_MSG_RUN: 1291 /* 1292 * Above calls should be invoked only by the Normal world and 1293 * must not be forwarded from Secure world to Normal world. 1294 */ 1295 if (secure_origin) { 1296 return spmd_ffa_error_return(handle, 1297 FFA_ERROR_NOT_SUPPORTED); 1298 } 1299 1300 /* Forward the call to the other world */ 1301 /* fallthrough */ 1302 case FFA_MSG_SEND: 1303 case FFA_MEM_DONATE_SMC32: 1304 case FFA_MEM_DONATE_SMC64: 1305 case FFA_MEM_LEND_SMC32: 1306 case FFA_MEM_LEND_SMC64: 1307 case FFA_MEM_SHARE_SMC32: 1308 case FFA_MEM_SHARE_SMC64: 1309 case FFA_MEM_RETRIEVE_REQ_SMC32: 1310 case FFA_MEM_RETRIEVE_REQ_SMC64: 1311 case FFA_MEM_RETRIEVE_RESP: 1312 case FFA_MEM_RELINQUISH: 1313 case FFA_MEM_RECLAIM: 1314 case FFA_MEM_FRAG_TX: 1315 case FFA_MEM_FRAG_RX: 1316 case FFA_SUCCESS_SMC32: 1317 case FFA_SUCCESS_SMC64: 1318 /* 1319 * If there is an ongoing direct request from an SPMD logical 1320 * partition, return an error. 1321 */ 1322 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1323 assert(secure_origin); 1324 return spmd_ffa_error_return(handle, 1325 FFA_ERROR_DENIED); 1326 } 1327 1328 return spmd_smc_forward(smc_fid, secure_origin, 1329 x1, x2, x3, x4, cookie, 1330 handle, flags, secure_ffa_version); 1331 break; /* not reached */ 1332 1333 case FFA_MSG_WAIT: 1334 /* 1335 * Check if this is the first invocation of this interface on 1336 * this CPU from the Secure world. If so, then indicate that the 1337 * SPM Core initialised successfully. 1338 */ 1339 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 1340 spmd_spm_core_sync_exit(0ULL); 1341 } 1342 1343 /* Forward the call to the other world */ 1344 /* fallthrough */ 1345 case FFA_INTERRUPT: 1346 case FFA_MSG_YIELD: 1347 /* This interface must be invoked only by the Secure world */ 1348 if (!secure_origin) { 1349 return spmd_ffa_error_return(handle, 1350 FFA_ERROR_NOT_SUPPORTED); 1351 } 1352 1353 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1354 assert(secure_origin); 1355 return spmd_ffa_error_return(handle, 1356 FFA_ERROR_DENIED); 1357 } 1358 1359 return spmd_smc_forward(smc_fid, secure_origin, 1360 x1, x2, x3, x4, cookie, 1361 handle, flags, secure_ffa_version); 1362 break; /* not reached */ 1363 1364 case FFA_NORMAL_WORLD_RESUME: 1365 if (secure_origin && ctx->secure_interrupt_ongoing) { 1366 spmd_spm_core_sync_exit(0ULL); 1367 } else { 1368 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED); 1369 } 1370 break; /* Not reached */ 1371 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 1372 case FFA_PARTITION_INFO_GET_REGS_SMC64: 1373 if (secure_origin) { 1374 return spmd_el3_populate_logical_partition_info(handle, x1, 1375 x2, x3); 1376 } 1377 1378 /* Call only supported with SMCCC 1.2+ */ 1379 if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) { 1380 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1381 } 1382 1383 return spmd_smc_forward(smc_fid, secure_origin, 1384 x1, x2, x3, x4, cookie, 1385 handle, flags, secure_ffa_version); 1386 break; /* Not reached */ 1387 #endif 1388 case FFA_CONSOLE_LOG_SMC32: 1389 case FFA_CONSOLE_LOG_SMC64: 1390 /* This interface must not be forwarded to other worlds. */ 1391 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1392 break; /* not reached */ 1393 1394 case FFA_EL3_INTR_HANDLE: 1395 if (secure_origin) { 1396 return spmd_handle_group0_intr_swd(handle); 1397 } else { 1398 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1399 } 1400 case FFA_ABORT_SMC32: 1401 case FFA_ABORT_SMC64: 1402 /* This interface must be invoked only by the Secure world */ 1403 if (!secure_origin) { 1404 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1405 } 1406 1407 ERROR("SPMC encountered a fatal error. Aborting now\n"); 1408 panic(); 1409 1410 /* Not reached. */ 1411 SMC_RET0(handle); 1412 default: 1413 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 1414 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1415 } 1416 } 1417