1 /* 2 * Copyright (c) 2020-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch/aarch64/arch_features.h> 15 #include <bl31/bl31.h> 16 #include <bl31/interrupt_mgmt.h> 17 #include <common/debug.h> 18 #include <common/runtime_svc.h> 19 #include <common/tbbr/tbbr_img_def.h> 20 #include <lib/el3_runtime/context_mgmt.h> 21 #include <lib/fconf/fconf.h> 22 #include <lib/fconf/fconf_dyn_cfg_getter.h> 23 #include <lib/smccc.h> 24 #include <lib/spinlock.h> 25 #include <lib/utils.h> 26 #include <lib/xlat_tables/xlat_tables_v2.h> 27 #include <plat/common/common_def.h> 28 #include <plat/common/platform.h> 29 #include <platform_def.h> 30 #include <services/el3_spmd_logical_sp.h> 31 #include <services/ffa_svc.h> 32 #include <services/spmc_svc.h> 33 #include <services/spmd_svc.h> 34 #include <smccc_helpers.h> 35 #include "spmd_private.h" 36 #if TRANSFER_LIST 37 #include <transfer_list.h> 38 #endif 39 40 /******************************************************************************* 41 * SPM Core context information. 42 ******************************************************************************/ 43 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 44 45 /******************************************************************************* 46 * SPM Core attribute information is read from its manifest if the SPMC is not 47 * at EL3. Else, it is populated from the SPMC directly. 48 ******************************************************************************/ 49 static spmc_manifest_attribute_t spmc_attrs; 50 51 /******************************************************************************* 52 * FFA version used by nonsecure endpoint. 53 ******************************************************************************/ 54 static uint32_t nonsecure_ffa_version; 55 56 /******************************************************************************* 57 * Whether the normal world finished negotiating its version. 58 ******************************************************************************/ 59 static bool nonsecure_version_negotiated; 60 61 /******************************************************************************* 62 * FFA version used by SPMC, as seen by the normal world. 63 ******************************************************************************/ 64 static uint32_t spmc_nwd_ffa_version; 65 66 /******************************************************************************* 67 * SPM Core entry point information. Discovered on the primary core and reused 68 * on secondary cores. 69 ******************************************************************************/ 70 static entry_point_info_t *spmc_ep_info; 71 72 /******************************************************************************* 73 * SPM Core context on current CPU get helper. 74 ******************************************************************************/ 75 spmd_spm_core_context_t *spmd_get_context(void) 76 { 77 return &spm_core_context[plat_my_core_pos()]; 78 } 79 80 /******************************************************************************* 81 * SPM Core ID getter. 82 ******************************************************************************/ 83 uint16_t spmd_spmc_id_get(void) 84 { 85 return spmc_attrs.spmc_id; 86 } 87 88 /******************************************************************************* 89 * Static function declaration. 90 ******************************************************************************/ 91 static int32_t spmd_init(void); 92 static int spmd_spmc_init(void *pm_addr); 93 94 static uint64_t spmd_smc_forward(uint32_t smc_fid, 95 bool secure_origin, 96 uint64_t x1, 97 uint64_t x2, 98 uint64_t x3, 99 uint64_t x4, 100 void *cookie, 101 void *handle, 102 uint64_t flags, 103 uint32_t secure_ffa_version); 104 105 /****************************************************************************** 106 * Builds an SPMD to SPMC direct message request. 107 *****************************************************************************/ 108 void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func, 109 unsigned long long message) 110 { 111 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32); 112 write_ctx_reg(gpregs, CTX_GPREG_X1, 113 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) | 114 spmd_spmc_id_get()); 115 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func); 116 write_ctx_reg(gpregs, CTX_GPREG_X3, message); 117 118 /* Zero out x4-x7 for the direct request emitted towards the SPMC. */ 119 write_ctx_reg(gpregs, CTX_GPREG_X4, 0); 120 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 121 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 122 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 123 } 124 125 126 /******************************************************************************* 127 * This function takes an SPMC context pointer and performs a synchronous 128 * SPMC entry. 129 ******************************************************************************/ 130 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 131 { 132 uint64_t rc; 133 134 assert(spmc_ctx != NULL); 135 136 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 137 138 /* Restore the context assigned above */ 139 #if SPMD_SPM_AT_SEL2 140 cm_el2_sysregs_context_restore(SECURE); 141 #else 142 cm_el1_sysregs_context_restore(SECURE); 143 #endif 144 cm_set_next_eret_context(SECURE); 145 146 /* Enter SPMC */ 147 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 148 149 /* Save secure state */ 150 #if SPMD_SPM_AT_SEL2 151 cm_el2_sysregs_context_save(SECURE); 152 #else 153 cm_el1_sysregs_context_save(SECURE); 154 #endif 155 156 return rc; 157 } 158 159 /******************************************************************************* 160 * This function returns to the place where spmd_spm_core_sync_entry() was 161 * called originally. 162 ******************************************************************************/ 163 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 164 { 165 spmd_spm_core_context_t *ctx = spmd_get_context(); 166 167 /* Get current CPU context from SPMC context */ 168 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 169 170 /* 171 * The SPMD must have initiated the original request through a 172 * synchronous entry into SPMC. Jump back to the original C runtime 173 * context with the value of rc in x0; 174 */ 175 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 176 177 panic(); 178 } 179 180 void spmd_setup_context(unsigned int core_id) 181 { 182 cpu_context_t *cpu_ctx; 183 184 spm_core_context[core_id].state = SPMC_STATE_OFF; 185 186 /* Setup an initial cpu context for the SPMC. */ 187 cpu_ctx = &spm_core_context[core_id].cpu_ctx; 188 cm_setup_context(cpu_ctx, spmc_ep_info); 189 190 /* 191 * Pass the core linear ID to the SPMC through x4. 192 * (TF-A implementation defined behavior helping 193 * a legacy TOS migration to adopt FF-A). 194 */ 195 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id); 196 } 197 198 /******************************************************************************* 199 * Jump to the SPM Core for the first time. 200 ******************************************************************************/ 201 static int32_t spmd_init(void) 202 { 203 spmd_spm_core_context_t *ctx = spmd_get_context(); 204 uint64_t rc; 205 206 VERBOSE("SPM Core init start.\n"); 207 208 /* Primary boot core enters the SPMC for initialization. */ 209 ctx->state = SPMC_STATE_ON_PENDING; 210 211 rc = spmd_spm_core_sync_entry(ctx); 212 if (rc != 0ULL) { 213 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc); 214 return 0; 215 } 216 217 ctx->state = SPMC_STATE_ON; 218 219 VERBOSE("SPM Core init end.\n"); 220 221 spmd_logical_sp_set_spmc_initialized(); 222 rc = spmd_logical_sp_init(); 223 if (rc != 0) { 224 WARN("SPMD Logical partitions failed init.\n"); 225 } 226 227 return 1; 228 } 229 230 /******************************************************************************* 231 * spmd_secure_interrupt_handler 232 * Enter the SPMC for further handling of the secure interrupt by the SPMC 233 * itself or a Secure Partition. 234 ******************************************************************************/ 235 static uint64_t spmd_secure_interrupt_handler(uint32_t id, 236 uint32_t flags, 237 void *handle, 238 void *cookie) 239 { 240 spmd_spm_core_context_t *ctx = spmd_get_context(); 241 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 242 int64_t rc; 243 244 /* Sanity check the security state when the exception was generated */ 245 assert(get_interrupt_src_ss(flags) == NON_SECURE); 246 247 /* Sanity check the pointer to this cpu's context */ 248 assert(handle == cm_get_context(NON_SECURE)); 249 250 /* Save the non-secure context before entering SPMC */ 251 #if SPMD_SPM_AT_SEL2 252 cm_el2_sysregs_context_save(NON_SECURE); 253 #else 254 cm_el1_sysregs_context_save(NON_SECURE); 255 256 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 257 /* 258 * The hint bit denoting absence of SVE live state is effectively false 259 * in this scenario where execution was trapped to EL3 due to FIQ. 260 */ 261 simd_ctx_save(NON_SECURE, false); 262 simd_ctx_restore(SECURE); 263 #endif 264 #endif 265 266 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */ 267 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT); 268 write_ctx_reg(gpregs, CTX_GPREG_X1, 0); 269 write_ctx_reg(gpregs, CTX_GPREG_X2, 0); 270 write_ctx_reg(gpregs, CTX_GPREG_X3, 0); 271 write_ctx_reg(gpregs, CTX_GPREG_X4, 0); 272 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 273 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 274 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 275 276 /* Mark current core as handling a secure interrupt. */ 277 ctx->secure_interrupt_ongoing = true; 278 279 rc = spmd_spm_core_sync_entry(ctx); 280 281 if (rc != 0ULL) { 282 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, plat_my_core_pos()); 283 } 284 285 ctx->secure_interrupt_ongoing = false; 286 287 #if SPMD_SPM_AT_SEL2 288 cm_el2_sysregs_context_restore(NON_SECURE); 289 #else 290 cm_el1_sysregs_context_restore(NON_SECURE); 291 292 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 293 simd_ctx_save(SECURE, false); 294 simd_ctx_restore(NON_SECURE); 295 #endif 296 #endif 297 cm_set_next_eret_context(NON_SECURE); 298 299 SMC_RET0(&ctx->cpu_ctx); 300 } 301 302 #if (EL3_EXCEPTION_HANDLING == 0) 303 /******************************************************************************* 304 * spmd_group0_interrupt_handler_nwd 305 * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the 306 * handling of the interrupt to the platform handler, and return only upon 307 * successfully handling the Group0 interrupt. 308 ******************************************************************************/ 309 static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id, 310 uint32_t flags, 311 void *handle, 312 void *cookie) 313 { 314 uint32_t intid, intr_raw; 315 316 /* Sanity check the security state when the exception was generated. */ 317 assert(get_interrupt_src_ss(flags) == NON_SECURE); 318 319 /* Sanity check the pointer to this cpu's context. */ 320 assert(handle == cm_get_context(NON_SECURE)); 321 322 assert(id == INTR_ID_UNAVAILABLE); 323 324 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3); 325 326 intr_raw = plat_ic_acknowledge_interrupt(); 327 intid = plat_ic_get_interrupt_id(intr_raw); 328 329 if (intid == INTR_ID_UNAVAILABLE) { 330 return 0U; 331 } 332 333 if (plat_spmd_handle_group0_interrupt(intid) < 0) { 334 ERROR("Group0 interrupt %u not handled\n", intid); 335 panic(); 336 } 337 338 /* Deactivate the corresponding Group0 interrupt. */ 339 plat_ic_end_of_interrupt(intid); 340 341 return 0U; 342 } 343 #endif 344 345 /******************************************************************************* 346 * spmd_handle_group0_intr_swd 347 * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using 348 * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the 349 * interrupt to the platform handler, and returns only upon successfully 350 * handling the Group0 interrupt. 351 ******************************************************************************/ 352 static uint64_t spmd_handle_group0_intr_swd(void *handle) 353 { 354 uint32_t intid, intr_raw; 355 356 /* Sanity check the pointer to this cpu's context */ 357 assert(handle == cm_get_context(SECURE)); 358 359 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3); 360 361 intr_raw = plat_ic_acknowledge_interrupt(); 362 intid = plat_ic_get_interrupt_id(intr_raw); 363 364 if (intid == INTR_ID_UNAVAILABLE) { 365 return 0U; 366 } 367 368 /* 369 * TODO: Currently due to a limitation in SPMD implementation, the 370 * platform handler is expected to not delegate handling to NWd while 371 * processing Group0 secure interrupt. 372 */ 373 if (plat_spmd_handle_group0_interrupt(intid) < 0) { 374 /* Group0 interrupt was not handled by the platform. */ 375 ERROR("Group0 interrupt %u not handled\n", intid); 376 panic(); 377 } 378 379 /* Deactivate the corresponding Group0 interrupt. */ 380 plat_ic_end_of_interrupt(intid); 381 382 /* Return success. */ 383 SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 384 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 385 FFA_PARAM_MBZ); 386 } 387 388 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 389 static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size, 390 unsigned int attr, uintptr_t *align_addr, 391 size_t *align_size) 392 { 393 uintptr_t base_addr_align; 394 size_t mapped_size_align; 395 int rc; 396 397 /* Page aligned address and size if necessary */ 398 base_addr_align = page_align(base_addr, DOWN); 399 mapped_size_align = page_align(size, UP); 400 401 if ((base_addr != base_addr_align) && 402 (size == mapped_size_align)) { 403 mapped_size_align += PAGE_SIZE; 404 } 405 406 /* 407 * Map dynamically given region with its aligned base address and 408 * size 409 */ 410 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align, 411 base_addr_align, 412 mapped_size_align, 413 attr); 414 if (rc == 0) { 415 *align_addr = base_addr_align; 416 *align_size = mapped_size_align; 417 } 418 419 return rc; 420 } 421 422 static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr, 423 size_t size) 424 { 425 uintptr_t root_base_addr_align, sec_base_addr_align; 426 size_t root_mapped_size_align, sec_mapped_size_align; 427 int rc; 428 429 assert(root_base_addr != 0UL); 430 assert(sec_base_addr != 0UL); 431 assert(size != 0UL); 432 433 /* Map the memory with required attributes */ 434 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT, 435 &root_base_addr_align, 436 &root_mapped_size_align); 437 if (rc != 0) { 438 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region", 439 root_base_addr, rc); 440 panic(); 441 } 442 443 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE, 444 &sec_base_addr_align, &sec_mapped_size_align); 445 if (rc != 0) { 446 ERROR("%s %s %lu (%d)\n", "Error while mapping", 447 "secure region", sec_base_addr, rc); 448 panic(); 449 } 450 451 /* Do copy operation */ 452 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size); 453 454 /* Unmap root memory region */ 455 rc = mmap_remove_dynamic_region(root_base_addr_align, 456 root_mapped_size_align); 457 if (rc != 0) { 458 ERROR("%s %s %lu (%d)\n", "Error while unmapping", 459 "root region", root_base_addr_align, rc); 460 panic(); 461 } 462 463 /* Unmap secure memory region */ 464 rc = mmap_remove_dynamic_region(sec_base_addr_align, 465 sec_mapped_size_align); 466 if (rc != 0) { 467 ERROR("%s %s %lu (%d)\n", "Error while unmapping", 468 "secure region", sec_base_addr_align, rc); 469 panic(); 470 } 471 } 472 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 473 474 /******************************************************************************* 475 * Loads SPMC manifest and inits SPMC. 476 ******************************************************************************/ 477 static int spmd_spmc_init(void *pm_addr) 478 { 479 uint32_t ep_attr, flags; 480 int rc; 481 const struct dyn_cfg_dtb_info_t *image_info __unused; 482 483 /* Load the SPM Core manifest */ 484 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 485 if (rc != 0) { 486 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 487 return rc; 488 } 489 490 /* 491 * Ensure that the SPM Core version is compatible with the SPM 492 * Dispatcher version. 493 */ 494 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 495 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 496 WARN("Unsupported FFA version (%u.%u)\n", 497 spmc_attrs.major_version, spmc_attrs.minor_version); 498 return -EINVAL; 499 } 500 501 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 502 spmc_attrs.minor_version); 503 504 VERBOSE("SPM Core run time EL%x.\n", 505 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 506 507 /* Validate the SPMC ID, Ensure high bit is set */ 508 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 509 SPMC_SECURE_ID_MASK) == 0U) { 510 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 511 return -EINVAL; 512 } 513 514 /* Validate the SPM Core execution state */ 515 if ((spmc_attrs.exec_state != MODE_RW_64) && 516 (spmc_attrs.exec_state != MODE_RW_32)) { 517 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 518 spmc_attrs.exec_state); 519 return -EINVAL; 520 } 521 522 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 523 spmc_attrs.exec_state); 524 525 #if SPMD_SPM_AT_SEL2 526 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 527 if (spmc_attrs.exec_state == MODE_RW_32) { 528 WARN("AArch32 state at S-EL2 is not supported.\n"); 529 return -EINVAL; 530 } 531 532 /* 533 * Check if S-EL2 is supported on this system if S-EL2 534 * is required for SPM 535 */ 536 if (!is_feat_sel2_supported()) { 537 WARN("SPM Core run time S-EL2 is not supported.\n"); 538 return -EINVAL; 539 } 540 #endif /* SPMD_SPM_AT_SEL2 */ 541 542 /* Initialise an entrypoint to set up the CPU context */ 543 ep_attr = SECURE | EP_ST_ENABLE; 544 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 545 ep_attr |= EP_EE_BIG; 546 } 547 548 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 549 550 /* 551 * Populate SPSR for SPM Core based upon validated parameters from the 552 * manifest. 553 */ 554 if (spmc_attrs.exec_state == MODE_RW_32) { 555 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 556 SPSR_E_LITTLE, 557 DAIF_FIQ_BIT | 558 DAIF_IRQ_BIT | 559 DAIF_ABT_BIT); 560 } else { 561 562 #if SPMD_SPM_AT_SEL2 563 static const uint32_t runtime_el = MODE_EL2; 564 #else 565 static const uint32_t runtime_el = MODE_EL1; 566 #endif 567 spmc_ep_info->spsr = SPSR_64(runtime_el, 568 MODE_SP_ELX, 569 DISABLE_ALL_EXCEPTIONS); 570 } 571 572 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 573 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID); 574 assert(image_info != NULL); 575 576 if ((image_info->config_addr == 0UL) || 577 (image_info->secondary_config_addr == 0UL) || 578 (image_info->config_max_size == 0UL)) { 579 return -EINVAL; 580 } 581 582 /* Copy manifest from root->secure region */ 583 spmd_do_sec_cpy(image_info->config_addr, 584 image_info->secondary_config_addr, 585 image_info->config_max_size); 586 587 /* Update ep info of BL32 */ 588 assert(spmc_ep_info != NULL); 589 spmc_ep_info->args.arg0 = image_info->secondary_config_addr; 590 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 591 592 spmd_setup_context(plat_my_core_pos()); 593 594 /* Register power management hooks with PSCI */ 595 psci_register_spd_pm_hook(&spmd_pm); 596 597 /* Register init function for deferred init. */ 598 bl31_register_bl32_init(&spmd_init); 599 600 INFO("SPM Core setup done.\n"); 601 602 /* 603 * Register an interrupt handler routing secure interrupts to SPMD 604 * while the NWd is running. 605 */ 606 flags = 0; 607 set_interrupt_rm_flag(flags, NON_SECURE); 608 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 609 spmd_secure_interrupt_handler, 610 flags); 611 if (rc != 0) { 612 panic(); 613 } 614 615 /* 616 * Permit configurations where the SPM resides at S-EL1/2 and upon a 617 * Group0 interrupt triggering while the normal world runs, the 618 * interrupt is routed either through the EHF or directly to the SPMD: 619 * 620 * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD 621 * for handling by spmd_group0_interrupt_handler_nwd. 622 * 623 * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF. 624 * 625 */ 626 #if (EL3_EXCEPTION_HANDLING == 0) 627 /* 628 * If EL3 interrupts are supported by the platform, register an 629 * interrupt handler routing Group0 interrupts to SPMD while the NWd is 630 * running. 631 */ 632 if (plat_ic_has_interrupt_type(INTR_TYPE_EL3)) { 633 rc = register_interrupt_type_handler(INTR_TYPE_EL3, 634 spmd_group0_interrupt_handler_nwd, 635 flags); 636 if (rc != 0) { 637 panic(); 638 } 639 } 640 #endif 641 642 return 0; 643 } 644 645 /******************************************************************************* 646 * Initialize context of SPM Core. 647 ******************************************************************************/ 648 int spmd_setup(void) 649 { 650 int rc; 651 void *spmc_manifest; 652 struct transfer_list_header *tl __maybe_unused; 653 struct transfer_list_entry *te __maybe_unused; 654 655 /* 656 * If the SPMC is at EL3, then just initialise it directly. The 657 * shenanigans of when it is at a lower EL are not needed. 658 */ 659 if (is_spmc_at_el3()) { 660 /* Allow the SPMC to populate its attributes directly. */ 661 spmc_populate_attrs(&spmc_attrs); 662 663 rc = spmc_setup(); 664 if (rc != 0) { 665 WARN("SPMC initialisation failed 0x%x.\n", rc); 666 } 667 return 0; 668 } 669 670 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 671 if (spmc_ep_info == NULL) { 672 WARN("No SPM Core image provided by BL2 boot loader.\n"); 673 return 0; 674 } 675 676 /* Under no circumstances will this parameter be 0 */ 677 assert(spmc_ep_info->pc != 0ULL); 678 679 680 #if TRANSFER_LIST && !RESET_TO_BL31 681 tl = (struct transfer_list_header *)spmc_ep_info->args.arg3; 682 te = transfer_list_find(tl, TL_TAG_DT_SPMC_MANIFEST); 683 if (te == NULL) { 684 WARN("SPM Core manifest absent in TRANSFER_LIST.\n"); 685 return -ENOENT; 686 } 687 688 spmc_manifest = (void *)transfer_list_entry_data(te); 689 690 /* Change the DT in the handoff */ 691 if (sizeof(spmc_ep_info->args.arg0) == sizeof(uint64_t)) { 692 spmc_ep_info->args.arg0 = (uintptr_t)spmc_manifest; 693 } else { 694 spmc_ep_info->args.arg3 = (uintptr_t)spmc_manifest; 695 } 696 #else 697 /* 698 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 699 * be used as a manifest for the SPM Core at the next lower EL/mode. 700 */ 701 spmc_manifest = (void *)spmc_ep_info->args.arg0; 702 #endif 703 704 if (spmc_manifest == NULL) { 705 WARN("Invalid or absent SPM Core manifest.\n"); 706 return 0; 707 } 708 709 /* Load manifest, init SPMC */ 710 rc = spmd_spmc_init(spmc_manifest); 711 if (rc != 0) { 712 WARN("Booting device without SPM initialization.\n"); 713 } 714 715 return 0; 716 } 717 718 /******************************************************************************* 719 * Forward FF-A SMCs to the other security state. 720 ******************************************************************************/ 721 uint64_t spmd_smc_switch_state(uint32_t smc_fid, 722 bool secure_origin, 723 uint64_t x1, 724 uint64_t x2, 725 uint64_t x3, 726 uint64_t x4, 727 void *handle, 728 uint64_t flags, 729 uint32_t secure_ffa_version) 730 { 731 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 732 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 733 uint32_t version_in = (secure_origin) ? secure_ffa_version : nonsecure_ffa_version; 734 uint32_t version_out = (!secure_origin) ? secure_ffa_version : nonsecure_ffa_version; 735 void *ctx_out; 736 737 #if SPMD_SPM_AT_SEL2 738 if ((secure_state_out == SECURE) && (is_sve_hint_set(flags) == true)) { 739 /* 740 * Set the SVE hint bit in x0 and pass to the lower secure EL, 741 * if it was set by the caller. 742 */ 743 smc_fid |= (FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT); 744 } 745 #endif 746 747 /* Save incoming security state */ 748 #if SPMD_SPM_AT_SEL2 749 cm_el2_sysregs_context_save(secure_state_in); 750 #else 751 cm_el1_sysregs_context_save(secure_state_in); 752 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 753 /* Forward the hint bit denoting the absence of SVE live state. */ 754 simd_ctx_save(secure_state_in, (!secure_origin && (is_sve_hint_set(flags) == true))); 755 #endif 756 #endif 757 758 /* Restore outgoing security state */ 759 #if SPMD_SPM_AT_SEL2 760 cm_el2_sysregs_context_restore(secure_state_out); 761 #else 762 cm_el1_sysregs_context_restore(secure_state_out); 763 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 764 simd_ctx_restore(secure_state_out); 765 #endif 766 #endif 767 cm_set_next_eret_context(secure_state_out); 768 769 ctx_out = cm_get_context(secure_state_out); 770 if (smc_fid == FFA_NORMAL_WORLD_RESUME) { 771 SMC_RET0(ctx_out); 772 } 773 774 if ((GET_SMC_CC(smc_fid) == SMC_64) && (version_out >= MAKE_FFA_VERSION(U(1), U(2)))) { 775 if (version_in < MAKE_FFA_VERSION(U(1), U(2))) { 776 /* FFA version mismatch, with dest >= 1.2 - set outgoing x8-x17 to zero */ 777 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4, 778 SMC_GET_GP(handle, CTX_GPREG_X5), 779 SMC_GET_GP(handle, CTX_GPREG_X6), 780 SMC_GET_GP(handle, CTX_GPREG_X7), 781 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); 782 } else { 783 /* Both FFA versions >= 1.2 - pass incoming x8-x17 to dest */ 784 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4, 785 SMC_GET_GP(handle, CTX_GPREG_X5), 786 SMC_GET_GP(handle, CTX_GPREG_X6), 787 SMC_GET_GP(handle, CTX_GPREG_X7), 788 SMC_GET_GP(handle, CTX_GPREG_X8), 789 SMC_GET_GP(handle, CTX_GPREG_X9), 790 SMC_GET_GP(handle, CTX_GPREG_X10), 791 SMC_GET_GP(handle, CTX_GPREG_X11), 792 SMC_GET_GP(handle, CTX_GPREG_X12), 793 SMC_GET_GP(handle, CTX_GPREG_X13), 794 SMC_GET_GP(handle, CTX_GPREG_X14), 795 SMC_GET_GP(handle, CTX_GPREG_X15), 796 SMC_GET_GP(handle, CTX_GPREG_X16), 797 SMC_GET_GP(handle, CTX_GPREG_X17) 798 ); 799 } 800 } else { 801 /* 32 bit call or dest has FFA version < 1.2 or unknown */ 802 SMC_RET8(ctx_out, smc_fid, x1, x2, x3, x4, 803 SMC_GET_GP(handle, CTX_GPREG_X5), 804 SMC_GET_GP(handle, CTX_GPREG_X6), 805 SMC_GET_GP(handle, CTX_GPREG_X7)); 806 } 807 } 808 809 /******************************************************************************* 810 * Forward SMCs to the other security state. 811 ******************************************************************************/ 812 static uint64_t spmd_smc_forward(uint32_t smc_fid, 813 bool secure_origin, 814 uint64_t x1, 815 uint64_t x2, 816 uint64_t x3, 817 uint64_t x4, 818 void *cookie, 819 void *handle, 820 uint64_t flags, 821 uint32_t secure_ffa_version) 822 { 823 if (is_spmc_at_el3() && !secure_origin) { 824 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4, 825 cookie, handle, flags); 826 } 827 828 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4, 829 handle, flags, secure_ffa_version); 830 831 } 832 833 /******************************************************************************* 834 * Return FFA_ERROR with specified error code 835 ******************************************************************************/ 836 uint64_t spmd_ffa_error_return(void *handle, int error_code) 837 { 838 SMC_RET8(handle, (uint32_t) FFA_ERROR, 839 FFA_TARGET_INFO_MBZ, (uint32_t)error_code, 840 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 841 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 842 } 843 844 /******************************************************************************* 845 * spmd_check_address_in_binary_image 846 ******************************************************************************/ 847 bool spmd_check_address_in_binary_image(uint64_t address) 848 { 849 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 850 851 return ((address >= spmc_attrs.load_address) && 852 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 853 } 854 855 /****************************************************************************** 856 * spmd_is_spmc_message 857 *****************************************************************************/ 858 static bool spmd_is_spmc_message(unsigned int ep) 859 { 860 if (is_spmc_at_el3()) { 861 return false; 862 } 863 864 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 865 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 866 } 867 868 /******************************************************************************* 869 * This function forwards FF-A SMCs to either the main SPMD handler or the 870 * SPMC at EL3, depending on the origin security state, if enabled. 871 ******************************************************************************/ 872 uint64_t spmd_ffa_smc_handler(uint32_t smc_fid, 873 uint64_t x1, 874 uint64_t x2, 875 uint64_t x3, 876 uint64_t x4, 877 void *cookie, 878 void *handle, 879 uint64_t flags) 880 { 881 if (is_spmc_at_el3()) { 882 /* 883 * If we have an SPMC at EL3 allow handling of the SMC first. 884 * The SPMC will call back through to SPMD handler if required. 885 */ 886 if (is_caller_secure(flags)) { 887 return spmc_smc_handler(smc_fid, 888 is_caller_secure(flags), 889 x1, x2, x3, x4, cookie, 890 handle, flags); 891 } 892 } 893 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie, 894 handle, flags, spmc_nwd_ffa_version); 895 } 896 897 static uint32_t get_common_ffa_version(uint32_t secure_ffa_version) 898 { 899 if (secure_ffa_version <= nonsecure_ffa_version) { 900 return secure_ffa_version; 901 } else { 902 return nonsecure_ffa_version; 903 } 904 } 905 906 /******************************************************************************* 907 * This function handles all SMCs in the range reserved for FFA. Each call is 908 * either forwarded to the other security state or handled by the SPM dispatcher 909 ******************************************************************************/ 910 uint64_t spmd_smc_handler(uint32_t smc_fid, 911 uint64_t x1, 912 uint64_t x2, 913 uint64_t x3, 914 uint64_t x4, 915 void *cookie, 916 void *handle, 917 uint64_t flags, 918 uint32_t secure_ffa_version) 919 { 920 spmd_spm_core_context_t *ctx = spmd_get_context(); 921 bool secure_origin; 922 int ret; 923 uint32_t input_version; 924 925 /* Determine which security state this SMC originated from */ 926 secure_origin = is_caller_secure(flags); 927 928 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 929 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n", 930 plat_my_core_pos(), smc_fid, x1, x2, x3, x4, 931 SMC_GET_GP(handle, CTX_GPREG_X5), 932 SMC_GET_GP(handle, CTX_GPREG_X6), 933 SMC_GET_GP(handle, CTX_GPREG_X7)); 934 935 /* 936 * If there is an on-going info regs from EL3 SPMD LP, unconditionally 937 * return, we don't expect any other FF-A ABIs to be called between 938 * calls to FFA_PARTITION_INFO_GET_REGS. 939 */ 940 if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) { 941 assert(secure_origin); 942 spmd_spm_core_sync_exit(0ULL); 943 } 944 945 if ((!secure_origin) && (smc_fid != FFA_VERSION)) { 946 /* 947 * Once the caller invokes any FF-A ABI other than FFA_VERSION, 948 * the version negotiation phase is complete. 949 */ 950 nonsecure_version_negotiated = true; 951 } 952 953 switch (smc_fid) { 954 case FFA_ERROR: 955 /* 956 * Check if this is the first invocation of this interface on 957 * this CPU. If so, then indicate that the SPM Core initialised 958 * unsuccessfully. 959 */ 960 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 961 spmd_spm_core_sync_exit(x2); 962 } 963 964 /* 965 * Perform a synchronous exit: 966 * 1. If there was an SPMD logical partition direct request on-going, 967 * return back to the SPMD logical partition so the error can be 968 * consumed. 969 * 2. SPMC sent FFA_ERROR in response to a power management 970 * operation sent through direct request. 971 */ 972 if (is_spmd_logical_sp_dir_req_in_progress(ctx) || 973 ctx->psci_operation_ongoing) { 974 assert(secure_origin); 975 spmd_spm_core_sync_exit(0ULL); 976 } 977 978 return spmd_smc_forward(smc_fid, secure_origin, 979 x1, x2, x3, x4, cookie, 980 handle, flags, secure_ffa_version); 981 break; /* not reached */ 982 983 case FFA_VERSION: 984 input_version = (uint32_t)(0xFFFFFFFF & x1); 985 /* 986 * If caller is secure and SPMC was initialized, 987 * return FFA_VERSION of SPMD. 988 * If caller is non secure and SPMC was initialized, 989 * forward to the EL3 SPMC if enabled, otherwise send a 990 * framework message to the SPMC at the lower EL to 991 * negotiate a version that is compatible between the 992 * normal world and the SPMC. 993 * Sanity check to "input_version". 994 * If the EL3 SPMC is enabled, ignore the SPMC state as 995 * this is not used. 996 */ 997 if ((input_version & FFA_VERSION_BIT31_MASK) || 998 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) { 999 ret = FFA_ERROR_NOT_SUPPORTED; 1000 } else if (!secure_origin) { 1001 if (!nonsecure_version_negotiated) { 1002 /* 1003 * Once an FF-A version has been negotiated 1004 * between a caller and a callee, the version 1005 * may not be changed for the lifetime of 1006 * the calling component. 1007 */ 1008 nonsecure_ffa_version = input_version; 1009 } 1010 1011 if (is_spmc_at_el3()) { 1012 /* 1013 * Forward the call directly to the EL3 SPMC, if 1014 * enabled, as we don't need to wrap the call in 1015 * a direct request. 1016 */ 1017 spmc_nwd_ffa_version = 1018 MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 1019 return spmc_smc_handler(smc_fid, secure_origin, 1020 x1, x2, x3, x4, cookie, 1021 handle, flags); 1022 } 1023 1024 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 1025 uint64_t rc; 1026 1027 if (spmc_attrs.major_version == 1 && 1028 spmc_attrs.minor_version == 0) { 1029 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, 1030 spmc_attrs.minor_version); 1031 spmc_nwd_ffa_version = (uint32_t)ret; 1032 SMC_RET8(handle, (uint32_t)ret, 1033 FFA_TARGET_INFO_MBZ, 1034 FFA_TARGET_INFO_MBZ, 1035 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1036 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1037 FFA_PARAM_MBZ); 1038 break; 1039 } 1040 /* Save non-secure system registers context */ 1041 #if SPMD_SPM_AT_SEL2 1042 cm_el2_sysregs_context_save(NON_SECURE); 1043 #else 1044 cm_el1_sysregs_context_save(NON_SECURE); 1045 #endif 1046 1047 /* 1048 * The incoming request has FFA_VERSION as X0 smc_fid 1049 * and requested version in x1. Prepare a direct request 1050 * from SPMD to SPMC with FFA_VERSION framework function 1051 * identifier in X2 and requested version in X3. 1052 */ 1053 spmd_build_spmc_message(gpregs, 1054 SPMD_FWK_MSG_FFA_VERSION_REQ, 1055 input_version); 1056 1057 /* 1058 * Ensure x8-x17 NS GP register values are untouched when returning 1059 * from the SPMC. 1060 */ 1061 write_ctx_reg(gpregs, CTX_GPREG_X8, SMC_GET_GP(handle, CTX_GPREG_X8)); 1062 write_ctx_reg(gpregs, CTX_GPREG_X9, SMC_GET_GP(handle, CTX_GPREG_X9)); 1063 write_ctx_reg(gpregs, CTX_GPREG_X10, SMC_GET_GP(handle, CTX_GPREG_X10)); 1064 write_ctx_reg(gpregs, CTX_GPREG_X11, SMC_GET_GP(handle, CTX_GPREG_X11)); 1065 write_ctx_reg(gpregs, CTX_GPREG_X12, SMC_GET_GP(handle, CTX_GPREG_X12)); 1066 write_ctx_reg(gpregs, CTX_GPREG_X13, SMC_GET_GP(handle, CTX_GPREG_X13)); 1067 write_ctx_reg(gpregs, CTX_GPREG_X14, SMC_GET_GP(handle, CTX_GPREG_X14)); 1068 write_ctx_reg(gpregs, CTX_GPREG_X15, SMC_GET_GP(handle, CTX_GPREG_X15)); 1069 write_ctx_reg(gpregs, CTX_GPREG_X16, SMC_GET_GP(handle, CTX_GPREG_X16)); 1070 write_ctx_reg(gpregs, CTX_GPREG_X17, SMC_GET_GP(handle, CTX_GPREG_X17)); 1071 1072 rc = spmd_spm_core_sync_entry(ctx); 1073 1074 if ((rc != 0ULL) || 1075 (SMC_GET_GP(gpregs, CTX_GPREG_X0) != 1076 FFA_MSG_SEND_DIRECT_RESP_SMC32) || 1077 (SMC_GET_GP(gpregs, CTX_GPREG_X2) != 1078 (FFA_FWK_MSG_BIT | 1079 SPMD_FWK_MSG_FFA_VERSION_RESP))) { 1080 ERROR("Failed to forward FFA_VERSION\n"); 1081 ret = FFA_ERROR_NOT_SUPPORTED; 1082 } else { 1083 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3); 1084 spmc_nwd_ffa_version = (uint32_t)ret; 1085 } 1086 1087 /* 1088 * x0-x4 are updated by spmd_smc_forward below. 1089 * Zero out x5-x7 in the FFA_VERSION response. 1090 */ 1091 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 1092 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 1093 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 1094 1095 /* 1096 * Return here after SPMC has handled FFA_VERSION. 1097 * The returned SPMC version is held in X3. 1098 * Forward this version in X0 to the non-secure caller. 1099 */ 1100 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ, 1101 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1102 FFA_PARAM_MBZ, cookie, gpregs, 1103 flags, spmc_nwd_ffa_version); 1104 } else { 1105 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 1106 FFA_VERSION_MINOR); 1107 } 1108 1109 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ, 1110 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1111 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 1112 break; /* not reached */ 1113 1114 case FFA_FEATURES: 1115 /* 1116 * This is an optional interface. Do the minimal checks and 1117 * forward to SPM Core which will handle it if implemented. 1118 */ 1119 1120 /* Forward SMC from Normal world to the SPM Core */ 1121 if (!secure_origin) { 1122 return spmd_smc_forward(smc_fid, secure_origin, 1123 x1, x2, x3, x4, cookie, 1124 handle, flags, secure_ffa_version); 1125 } 1126 1127 /* 1128 * Return success if call was from secure world i.e. all 1129 * FFA functions are supported. This is essentially a 1130 * nop. 1131 */ 1132 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 1133 SMC_GET_GP(handle, CTX_GPREG_X5), 1134 SMC_GET_GP(handle, CTX_GPREG_X6), 1135 SMC_GET_GP(handle, CTX_GPREG_X7)); 1136 1137 break; /* not reached */ 1138 1139 case FFA_ID_GET: 1140 /* 1141 * Returns the ID of the calling FFA component. 1142 */ 1143 if (!secure_origin) { 1144 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1145 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 1146 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1147 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1148 FFA_PARAM_MBZ); 1149 } 1150 1151 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1152 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 1153 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1154 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1155 FFA_PARAM_MBZ); 1156 1157 break; /* not reached */ 1158 1159 case FFA_SECONDARY_EP_REGISTER_SMC64: 1160 if (secure_origin) { 1161 ret = spmd_pm_secondary_ep_register(x1); 1162 1163 if (ret < 0) { 1164 SMC_RET8(handle, FFA_ERROR_SMC64, 1165 FFA_TARGET_INFO_MBZ, ret, 1166 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1167 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1168 FFA_PARAM_MBZ); 1169 } else { 1170 SMC_RET8(handle, FFA_SUCCESS_SMC64, 1171 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, 1172 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1173 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1174 FFA_PARAM_MBZ); 1175 } 1176 } 1177 1178 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1179 break; /* Not reached */ 1180 1181 case FFA_SPM_ID_GET: 1182 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) { 1183 return spmd_ffa_error_return(handle, 1184 FFA_ERROR_NOT_SUPPORTED); 1185 } 1186 /* 1187 * Returns the ID of the SPMC or SPMD depending on the FF-A 1188 * instance where this function is invoked 1189 */ 1190 if (!secure_origin) { 1191 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1192 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 1193 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1194 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1195 FFA_PARAM_MBZ); 1196 } 1197 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1198 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID, 1199 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1200 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1201 FFA_PARAM_MBZ); 1202 1203 break; /* not reached */ 1204 1205 case FFA_MSG_SEND_DIRECT_REQ2_SMC64: 1206 if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) { 1207 /* Call not supported at this version */ 1208 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1209 } 1210 /* fallthrough */ 1211 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 1212 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 1213 /* 1214 * Regardless of secure_origin, SPMD logical partitions cannot 1215 * handle direct messages. They can only initiate direct 1216 * messages and consume direct responses or errors. 1217 */ 1218 if (is_spmd_lp_id(ffa_endpoint_source(x1)) || 1219 is_spmd_lp_id(ffa_endpoint_destination(x1))) { 1220 return spmd_ffa_error_return(handle, 1221 FFA_ERROR_INVALID_PARAMETER 1222 ); 1223 } 1224 1225 /* 1226 * When there is an ongoing SPMD logical partition direct 1227 * request, there cannot be another direct request. Return 1228 * error in this case. Panic'ing is an option but that does 1229 * not provide the opportunity for caller to abort based on 1230 * error codes. 1231 */ 1232 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1233 assert(secure_origin); 1234 return spmd_ffa_error_return(handle, 1235 FFA_ERROR_DENIED); 1236 } 1237 1238 if (!secure_origin) { 1239 /* Validate source endpoint is non-secure for non-secure caller. */ 1240 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) { 1241 return spmd_ffa_error_return(handle, 1242 FFA_ERROR_INVALID_PARAMETER); 1243 } 1244 } 1245 if (secure_origin && spmd_is_spmc_message(x1)) { 1246 return spmd_ffa_error_return(handle, 1247 FFA_ERROR_DENIED); 1248 } else { 1249 /* Forward direct message to the other world */ 1250 return spmd_smc_forward(smc_fid, secure_origin, 1251 x1, x2, x3, x4, cookie, 1252 handle, flags, secure_ffa_version); 1253 } 1254 break; /* Not reached */ 1255 1256 case FFA_MSG_SEND_DIRECT_RESP2_SMC64: 1257 if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) { 1258 /* Call not supported at this version */ 1259 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1260 } 1261 /* fallthrough */ 1262 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 1263 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1264 if (secure_origin && (spmd_is_spmc_message(x1) || 1265 is_spmd_logical_sp_dir_req_in_progress(ctx))) { 1266 spmd_spm_core_sync_exit(0ULL); 1267 } else { 1268 /* Forward direct message to the other world */ 1269 return spmd_smc_forward(smc_fid, secure_origin, 1270 x1, x2, x3, x4, cookie, 1271 handle, flags, secure_ffa_version); 1272 } 1273 break; /* Not reached */ 1274 case FFA_RX_RELEASE: 1275 case FFA_RXTX_MAP_SMC32: 1276 case FFA_RXTX_MAP_SMC64: 1277 case FFA_RXTX_UNMAP: 1278 case FFA_PARTITION_INFO_GET: 1279 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 1280 case FFA_NOTIFICATION_BITMAP_CREATE: 1281 case FFA_NOTIFICATION_BITMAP_DESTROY: 1282 case FFA_NOTIFICATION_BIND: 1283 case FFA_NOTIFICATION_UNBIND: 1284 case FFA_NOTIFICATION_SET: 1285 case FFA_NOTIFICATION_GET: 1286 case FFA_NOTIFICATION_INFO_GET: 1287 case FFA_NOTIFICATION_INFO_GET_SMC64: 1288 case FFA_MSG_SEND2: 1289 case FFA_RX_ACQUIRE: 1290 case FFA_NS_RES_INFO_GET_SMC64: 1291 #endif 1292 case FFA_MSG_RUN: 1293 /* 1294 * Above calls should be invoked only by the Normal world and 1295 * must not be forwarded from Secure world to Normal world. 1296 */ 1297 if (secure_origin) { 1298 return spmd_ffa_error_return(handle, 1299 FFA_ERROR_NOT_SUPPORTED); 1300 } 1301 1302 /* Forward the call to the other world */ 1303 /* fallthrough */ 1304 case FFA_MSG_SEND: 1305 case FFA_MEM_DONATE_SMC32: 1306 case FFA_MEM_DONATE_SMC64: 1307 case FFA_MEM_LEND_SMC32: 1308 case FFA_MEM_LEND_SMC64: 1309 case FFA_MEM_SHARE_SMC32: 1310 case FFA_MEM_SHARE_SMC64: 1311 case FFA_MEM_RETRIEVE_REQ_SMC32: 1312 case FFA_MEM_RETRIEVE_REQ_SMC64: 1313 case FFA_MEM_RETRIEVE_RESP: 1314 case FFA_MEM_RELINQUISH: 1315 case FFA_MEM_RECLAIM: 1316 case FFA_MEM_FRAG_TX: 1317 case FFA_MEM_FRAG_RX: 1318 case FFA_SUCCESS_SMC32: 1319 case FFA_SUCCESS_SMC64: 1320 /* 1321 * If there is an ongoing direct request from an SPMD logical 1322 * partition, return an error. 1323 */ 1324 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1325 assert(secure_origin); 1326 return spmd_ffa_error_return(handle, 1327 FFA_ERROR_DENIED); 1328 } 1329 1330 return spmd_smc_forward(smc_fid, secure_origin, 1331 x1, x2, x3, x4, cookie, 1332 handle, flags, secure_ffa_version); 1333 break; /* not reached */ 1334 1335 case FFA_MSG_WAIT: 1336 /* 1337 * Check if this is the first invocation of this interface on 1338 * this CPU from the Secure world. If so, then indicate that the 1339 * SPM Core initialised successfully. 1340 */ 1341 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 1342 spmd_spm_core_sync_exit(0ULL); 1343 } 1344 1345 /* Forward the call to the other world */ 1346 /* fallthrough */ 1347 case FFA_INTERRUPT: 1348 case FFA_MSG_YIELD: 1349 /* This interface must be invoked only by the Secure world */ 1350 if (!secure_origin) { 1351 return spmd_ffa_error_return(handle, 1352 FFA_ERROR_NOT_SUPPORTED); 1353 } 1354 1355 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1356 assert(secure_origin); 1357 return spmd_ffa_error_return(handle, 1358 FFA_ERROR_DENIED); 1359 } 1360 1361 return spmd_smc_forward(smc_fid, secure_origin, 1362 x1, x2, x3, x4, cookie, 1363 handle, flags, secure_ffa_version); 1364 break; /* not reached */ 1365 1366 case FFA_NORMAL_WORLD_RESUME: 1367 if (secure_origin && ctx->secure_interrupt_ongoing) { 1368 spmd_spm_core_sync_exit(0ULL); 1369 } else { 1370 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED); 1371 } 1372 break; /* Not reached */ 1373 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 1374 case FFA_PARTITION_INFO_GET_REGS_SMC64: 1375 if (secure_origin) { 1376 return spmd_el3_populate_logical_partition_info(handle, x1, 1377 x2, x3); 1378 } 1379 1380 /* Call only supported with SMCCC 1.2+ */ 1381 if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) { 1382 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1383 } 1384 1385 return spmd_smc_forward(smc_fid, secure_origin, 1386 x1, x2, x3, x4, cookie, 1387 handle, flags, secure_ffa_version); 1388 break; /* Not reached */ 1389 #endif 1390 case FFA_CONSOLE_LOG_SMC32: 1391 case FFA_CONSOLE_LOG_SMC64: 1392 /* This interface must not be forwarded to other worlds. */ 1393 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1394 break; /* not reached */ 1395 1396 case FFA_EL3_INTR_HANDLE: 1397 if (secure_origin) { 1398 return spmd_handle_group0_intr_swd(handle); 1399 } else { 1400 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1401 } 1402 case FFA_ABORT_SMC32: 1403 case FFA_ABORT_SMC64: 1404 /* This interface must be invoked only by the Secure world */ 1405 if (!secure_origin) { 1406 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1407 } 1408 1409 ERROR("SPMC encountered a fatal error. Aborting now\n"); 1410 panic(); 1411 1412 /* Not reached. */ 1413 SMC_RET0(handle); 1414 default: 1415 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 1416 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1417 } 1418 } 1419