1 /* 2 * Copyright (c) 2020-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch/aarch64/arch_features.h> 15 #include <bl31/bl31.h> 16 #include <bl31/interrupt_mgmt.h> 17 #include <common/debug.h> 18 #include <common/runtime_svc.h> 19 #include <common/tbbr/tbbr_img_def.h> 20 #include <lib/el3_runtime/context_mgmt.h> 21 #include <lib/fconf/fconf.h> 22 #include <lib/fconf/fconf_dyn_cfg_getter.h> 23 #include <lib/smccc.h> 24 #include <lib/spinlock.h> 25 #include <lib/utils.h> 26 #include <lib/xlat_tables/xlat_tables_v2.h> 27 #include <plat/common/common_def.h> 28 #include <plat/common/platform.h> 29 #include <platform_def.h> 30 #include <services/el3_spmd_logical_sp.h> 31 #include <services/ffa_svc.h> 32 #include <services/spmc_svc.h> 33 #include <services/spmd_svc.h> 34 #include <smccc_helpers.h> 35 #include "spmd_private.h" 36 37 /******************************************************************************* 38 * SPM Core context information. 39 ******************************************************************************/ 40 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 41 42 /******************************************************************************* 43 * SPM Core attribute information is read from its manifest if the SPMC is not 44 * at EL3. Else, it is populated from the SPMC directly. 45 ******************************************************************************/ 46 static spmc_manifest_attribute_t spmc_attrs; 47 48 /******************************************************************************* 49 * FFA version used by nonsecure endpoint. 50 ******************************************************************************/ 51 static uint32_t nonsecure_ffa_version; 52 53 /******************************************************************************* 54 * Whether the normal world finished negotiating its version. 55 ******************************************************************************/ 56 static bool nonsecure_version_negotiated; 57 58 /******************************************************************************* 59 * FFA version used by SPMC, as seen by the normal world. 60 ******************************************************************************/ 61 static uint32_t spmc_nwd_ffa_version; 62 63 /******************************************************************************* 64 * SPM Core entry point information. Discovered on the primary core and reused 65 * on secondary cores. 66 ******************************************************************************/ 67 static entry_point_info_t *spmc_ep_info; 68 69 /******************************************************************************* 70 * SPM Core context on current CPU get helper. 71 ******************************************************************************/ 72 spmd_spm_core_context_t *spmd_get_context(void) 73 { 74 return &spm_core_context[plat_my_core_pos()]; 75 } 76 77 /******************************************************************************* 78 * SPM Core ID getter. 79 ******************************************************************************/ 80 uint16_t spmd_spmc_id_get(void) 81 { 82 return spmc_attrs.spmc_id; 83 } 84 85 /******************************************************************************* 86 * Static function declaration. 87 ******************************************************************************/ 88 static int32_t spmd_init(void); 89 static int spmd_spmc_init(void *pm_addr); 90 91 static uint64_t spmd_smc_forward(uint32_t smc_fid, 92 bool secure_origin, 93 uint64_t x1, 94 uint64_t x2, 95 uint64_t x3, 96 uint64_t x4, 97 void *cookie, 98 void *handle, 99 uint64_t flags, 100 uint32_t secure_ffa_version); 101 102 /****************************************************************************** 103 * Builds an SPMD to SPMC direct message request. 104 *****************************************************************************/ 105 void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func, 106 unsigned long long message) 107 { 108 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32); 109 write_ctx_reg(gpregs, CTX_GPREG_X1, 110 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) | 111 spmd_spmc_id_get()); 112 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func); 113 write_ctx_reg(gpregs, CTX_GPREG_X3, message); 114 115 /* Zero out x4-x7 for the direct request emitted towards the SPMC. */ 116 write_ctx_reg(gpregs, CTX_GPREG_X4, 0); 117 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 118 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 119 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 120 } 121 122 123 /******************************************************************************* 124 * This function takes an SPMC context pointer and performs a synchronous 125 * SPMC entry. 126 ******************************************************************************/ 127 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 128 { 129 uint64_t rc; 130 131 assert(spmc_ctx != NULL); 132 133 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 134 135 /* Restore the context assigned above */ 136 #if SPMD_SPM_AT_SEL2 137 cm_el2_sysregs_context_restore(SECURE); 138 #else 139 cm_el1_sysregs_context_restore(SECURE); 140 #endif 141 cm_set_next_eret_context(SECURE); 142 143 /* Enter SPMC */ 144 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 145 146 /* Save secure state */ 147 #if SPMD_SPM_AT_SEL2 148 cm_el2_sysregs_context_save(SECURE); 149 #else 150 cm_el1_sysregs_context_save(SECURE); 151 #endif 152 153 return rc; 154 } 155 156 /******************************************************************************* 157 * This function returns to the place where spmd_spm_core_sync_entry() was 158 * called originally. 159 ******************************************************************************/ 160 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 161 { 162 spmd_spm_core_context_t *ctx = spmd_get_context(); 163 164 /* Get current CPU context from SPMC context */ 165 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 166 167 /* 168 * The SPMD must have initiated the original request through a 169 * synchronous entry into SPMC. Jump back to the original C runtime 170 * context with the value of rc in x0; 171 */ 172 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 173 174 panic(); 175 } 176 177 /******************************************************************************* 178 * Jump to the SPM Core for the first time. 179 ******************************************************************************/ 180 static int32_t spmd_init(void) 181 { 182 spmd_spm_core_context_t *ctx = spmd_get_context(); 183 uint64_t rc; 184 185 VERBOSE("SPM Core init start.\n"); 186 187 /* Primary boot core enters the SPMC for initialization. */ 188 ctx->state = SPMC_STATE_ON_PENDING; 189 190 rc = spmd_spm_core_sync_entry(ctx); 191 if (rc != 0ULL) { 192 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc); 193 return 0; 194 } 195 196 ctx->state = SPMC_STATE_ON; 197 198 VERBOSE("SPM Core init end.\n"); 199 200 spmd_logical_sp_set_spmc_initialized(); 201 rc = spmd_logical_sp_init(); 202 if (rc != 0) { 203 WARN("SPMD Logical partitions failed init.\n"); 204 } 205 206 return 1; 207 } 208 209 /******************************************************************************* 210 * spmd_secure_interrupt_handler 211 * Enter the SPMC for further handling of the secure interrupt by the SPMC 212 * itself or a Secure Partition. 213 ******************************************************************************/ 214 static uint64_t spmd_secure_interrupt_handler(uint32_t id, 215 uint32_t flags, 216 void *handle, 217 void *cookie) 218 { 219 spmd_spm_core_context_t *ctx = spmd_get_context(); 220 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 221 int64_t rc; 222 223 /* Sanity check the security state when the exception was generated */ 224 assert(get_interrupt_src_ss(flags) == NON_SECURE); 225 226 /* Sanity check the pointer to this cpu's context */ 227 assert(handle == cm_get_context(NON_SECURE)); 228 229 /* Save the non-secure context before entering SPMC */ 230 #if SPMD_SPM_AT_SEL2 231 cm_el2_sysregs_context_save(NON_SECURE); 232 #else 233 cm_el1_sysregs_context_save(NON_SECURE); 234 235 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 236 /* 237 * The hint bit denoting absence of SVE live state is effectively false 238 * in this scenario where execution was trapped to EL3 due to FIQ. 239 */ 240 simd_ctx_save(NON_SECURE, false); 241 simd_ctx_restore(SECURE); 242 #endif 243 #endif 244 245 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */ 246 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT); 247 write_ctx_reg(gpregs, CTX_GPREG_X1, 0); 248 write_ctx_reg(gpregs, CTX_GPREG_X2, 0); 249 write_ctx_reg(gpregs, CTX_GPREG_X3, 0); 250 write_ctx_reg(gpregs, CTX_GPREG_X4, 0); 251 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 252 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 253 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 254 255 /* Mark current core as handling a secure interrupt. */ 256 ctx->secure_interrupt_ongoing = true; 257 258 rc = spmd_spm_core_sync_entry(ctx); 259 260 if (rc != 0ULL) { 261 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, plat_my_core_pos()); 262 } 263 264 ctx->secure_interrupt_ongoing = false; 265 266 #if SPMD_SPM_AT_SEL2 267 cm_el2_sysregs_context_restore(NON_SECURE); 268 #else 269 cm_el1_sysregs_context_restore(NON_SECURE); 270 271 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 272 simd_ctx_save(SECURE, false); 273 simd_ctx_restore(NON_SECURE); 274 #endif 275 #endif 276 cm_set_next_eret_context(NON_SECURE); 277 278 SMC_RET0(&ctx->cpu_ctx); 279 } 280 281 #if (EL3_EXCEPTION_HANDLING == 0) 282 /******************************************************************************* 283 * spmd_group0_interrupt_handler_nwd 284 * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the 285 * handling of the interrupt to the platform handler, and return only upon 286 * successfully handling the Group0 interrupt. 287 ******************************************************************************/ 288 static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id, 289 uint32_t flags, 290 void *handle, 291 void *cookie) 292 { 293 uint32_t intid, intr_raw; 294 295 /* Sanity check the security state when the exception was generated. */ 296 assert(get_interrupt_src_ss(flags) == NON_SECURE); 297 298 /* Sanity check the pointer to this cpu's context. */ 299 assert(handle == cm_get_context(NON_SECURE)); 300 301 assert(id == INTR_ID_UNAVAILABLE); 302 303 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3); 304 305 intr_raw = plat_ic_acknowledge_interrupt(); 306 intid = plat_ic_get_interrupt_id(intr_raw); 307 308 if (intid == INTR_ID_UNAVAILABLE) { 309 return 0U; 310 } 311 312 if (plat_spmd_handle_group0_interrupt(intid) < 0) { 313 ERROR("Group0 interrupt %u not handled\n", intid); 314 panic(); 315 } 316 317 /* Deactivate the corresponding Group0 interrupt. */ 318 plat_ic_end_of_interrupt(intid); 319 320 return 0U; 321 } 322 #endif 323 324 /******************************************************************************* 325 * spmd_handle_group0_intr_swd 326 * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using 327 * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the 328 * interrupt to the platform handler, and returns only upon successfully 329 * handling the Group0 interrupt. 330 ******************************************************************************/ 331 static uint64_t spmd_handle_group0_intr_swd(void *handle) 332 { 333 uint32_t intid, intr_raw; 334 335 /* Sanity check the pointer to this cpu's context */ 336 assert(handle == cm_get_context(SECURE)); 337 338 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3); 339 340 intr_raw = plat_ic_acknowledge_interrupt(); 341 intid = plat_ic_get_interrupt_id(intr_raw); 342 343 if (intid == INTR_ID_UNAVAILABLE) { 344 return 0U; 345 } 346 347 /* 348 * TODO: Currently due to a limitation in SPMD implementation, the 349 * platform handler is expected to not delegate handling to NWd while 350 * processing Group0 secure interrupt. 351 */ 352 if (plat_spmd_handle_group0_interrupt(intid) < 0) { 353 /* Group0 interrupt was not handled by the platform. */ 354 ERROR("Group0 interrupt %u not handled\n", intid); 355 panic(); 356 } 357 358 /* Deactivate the corresponding Group0 interrupt. */ 359 plat_ic_end_of_interrupt(intid); 360 361 /* Return success. */ 362 SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 363 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 364 FFA_PARAM_MBZ); 365 } 366 367 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 368 static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size, 369 unsigned int attr, uintptr_t *align_addr, 370 size_t *align_size) 371 { 372 uintptr_t base_addr_align; 373 size_t mapped_size_align; 374 int rc; 375 376 /* Page aligned address and size if necessary */ 377 base_addr_align = page_align(base_addr, DOWN); 378 mapped_size_align = page_align(size, UP); 379 380 if ((base_addr != base_addr_align) && 381 (size == mapped_size_align)) { 382 mapped_size_align += PAGE_SIZE; 383 } 384 385 /* 386 * Map dynamically given region with its aligned base address and 387 * size 388 */ 389 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align, 390 base_addr_align, 391 mapped_size_align, 392 attr); 393 if (rc == 0) { 394 *align_addr = base_addr_align; 395 *align_size = mapped_size_align; 396 } 397 398 return rc; 399 } 400 401 static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr, 402 size_t size) 403 { 404 uintptr_t root_base_addr_align, sec_base_addr_align; 405 size_t root_mapped_size_align, sec_mapped_size_align; 406 int rc; 407 408 assert(root_base_addr != 0UL); 409 assert(sec_base_addr != 0UL); 410 assert(size != 0UL); 411 412 /* Map the memory with required attributes */ 413 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT, 414 &root_base_addr_align, 415 &root_mapped_size_align); 416 if (rc != 0) { 417 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region", 418 root_base_addr, rc); 419 panic(); 420 } 421 422 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE, 423 &sec_base_addr_align, &sec_mapped_size_align); 424 if (rc != 0) { 425 ERROR("%s %s %lu (%d)\n", "Error while mapping", 426 "secure region", sec_base_addr, rc); 427 panic(); 428 } 429 430 /* Do copy operation */ 431 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size); 432 433 /* Unmap root memory region */ 434 rc = mmap_remove_dynamic_region(root_base_addr_align, 435 root_mapped_size_align); 436 if (rc != 0) { 437 ERROR("%s %s %lu (%d)\n", "Error while unmapping", 438 "root region", root_base_addr_align, rc); 439 panic(); 440 } 441 442 /* Unmap secure memory region */ 443 rc = mmap_remove_dynamic_region(sec_base_addr_align, 444 sec_mapped_size_align); 445 if (rc != 0) { 446 ERROR("%s %s %lu (%d)\n", "Error while unmapping", 447 "secure region", sec_base_addr_align, rc); 448 panic(); 449 } 450 } 451 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 452 453 /******************************************************************************* 454 * Loads SPMC manifest and inits SPMC. 455 ******************************************************************************/ 456 static int spmd_spmc_init(void *pm_addr) 457 { 458 cpu_context_t *cpu_ctx; 459 unsigned int core_id; 460 uint32_t ep_attr, flags; 461 int rc; 462 const struct dyn_cfg_dtb_info_t *image_info __unused; 463 464 /* Load the SPM Core manifest */ 465 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 466 if (rc != 0) { 467 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 468 return rc; 469 } 470 471 /* 472 * Ensure that the SPM Core version is compatible with the SPM 473 * Dispatcher version. 474 */ 475 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 476 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 477 WARN("Unsupported FFA version (%u.%u)\n", 478 spmc_attrs.major_version, spmc_attrs.minor_version); 479 return -EINVAL; 480 } 481 482 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 483 spmc_attrs.minor_version); 484 485 VERBOSE("SPM Core run time EL%x.\n", 486 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 487 488 /* Validate the SPMC ID, Ensure high bit is set */ 489 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 490 SPMC_SECURE_ID_MASK) == 0U) { 491 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 492 return -EINVAL; 493 } 494 495 /* Validate the SPM Core execution state */ 496 if ((spmc_attrs.exec_state != MODE_RW_64) && 497 (spmc_attrs.exec_state != MODE_RW_32)) { 498 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 499 spmc_attrs.exec_state); 500 return -EINVAL; 501 } 502 503 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 504 spmc_attrs.exec_state); 505 506 #if SPMD_SPM_AT_SEL2 507 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 508 if (spmc_attrs.exec_state == MODE_RW_32) { 509 WARN("AArch32 state at S-EL2 is not supported.\n"); 510 return -EINVAL; 511 } 512 513 /* 514 * Check if S-EL2 is supported on this system if S-EL2 515 * is required for SPM 516 */ 517 if (!is_feat_sel2_supported()) { 518 WARN("SPM Core run time S-EL2 is not supported.\n"); 519 return -EINVAL; 520 } 521 #endif /* SPMD_SPM_AT_SEL2 */ 522 523 /* Initialise an entrypoint to set up the CPU context */ 524 ep_attr = SECURE | EP_ST_ENABLE; 525 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 526 ep_attr |= EP_EE_BIG; 527 } 528 529 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 530 531 /* 532 * Populate SPSR for SPM Core based upon validated parameters from the 533 * manifest. 534 */ 535 if (spmc_attrs.exec_state == MODE_RW_32) { 536 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 537 SPSR_E_LITTLE, 538 DAIF_FIQ_BIT | 539 DAIF_IRQ_BIT | 540 DAIF_ABT_BIT); 541 } else { 542 543 #if SPMD_SPM_AT_SEL2 544 static const uint32_t runtime_el = MODE_EL2; 545 #else 546 static const uint32_t runtime_el = MODE_EL1; 547 #endif 548 spmc_ep_info->spsr = SPSR_64(runtime_el, 549 MODE_SP_ELX, 550 DISABLE_ALL_EXCEPTIONS); 551 } 552 553 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 554 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID); 555 assert(image_info != NULL); 556 557 if ((image_info->config_addr == 0UL) || 558 (image_info->secondary_config_addr == 0UL) || 559 (image_info->config_max_size == 0UL)) { 560 return -EINVAL; 561 } 562 563 /* Copy manifest from root->secure region */ 564 spmd_do_sec_cpy(image_info->config_addr, 565 image_info->secondary_config_addr, 566 image_info->config_max_size); 567 568 /* Update ep info of BL32 */ 569 assert(spmc_ep_info != NULL); 570 spmc_ep_info->args.arg0 = image_info->secondary_config_addr; 571 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 572 573 /* Set an initial SPMC context state for all cores. */ 574 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) { 575 spm_core_context[core_id].state = SPMC_STATE_OFF; 576 577 /* Setup an initial cpu context for the SPMC. */ 578 cpu_ctx = &spm_core_context[core_id].cpu_ctx; 579 cm_setup_context(cpu_ctx, spmc_ep_info); 580 581 /* 582 * Pass the core linear ID to the SPMC through x4. 583 * (TF-A implementation defined behavior helping 584 * a legacy TOS migration to adopt FF-A). 585 */ 586 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id); 587 } 588 589 /* Register power management hooks with PSCI */ 590 psci_register_spd_pm_hook(&spmd_pm); 591 592 /* Register init function for deferred init. */ 593 bl31_register_bl32_init(&spmd_init); 594 595 INFO("SPM Core setup done.\n"); 596 597 /* 598 * Register an interrupt handler routing secure interrupts to SPMD 599 * while the NWd is running. 600 */ 601 flags = 0; 602 set_interrupt_rm_flag(flags, NON_SECURE); 603 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 604 spmd_secure_interrupt_handler, 605 flags); 606 if (rc != 0) { 607 panic(); 608 } 609 610 /* 611 * Permit configurations where the SPM resides at S-EL1/2 and upon a 612 * Group0 interrupt triggering while the normal world runs, the 613 * interrupt is routed either through the EHF or directly to the SPMD: 614 * 615 * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD 616 * for handling by spmd_group0_interrupt_handler_nwd. 617 * 618 * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF. 619 * 620 */ 621 #if (EL3_EXCEPTION_HANDLING == 0) 622 /* 623 * If EL3 interrupts are supported by the platform, register an 624 * interrupt handler routing Group0 interrupts to SPMD while the NWd is 625 * running. 626 */ 627 if (plat_ic_has_interrupt_type(INTR_TYPE_EL3)) { 628 rc = register_interrupt_type_handler(INTR_TYPE_EL3, 629 spmd_group0_interrupt_handler_nwd, 630 flags); 631 if (rc != 0) { 632 panic(); 633 } 634 } 635 #endif 636 637 return 0; 638 } 639 640 /******************************************************************************* 641 * Initialize context of SPM Core. 642 ******************************************************************************/ 643 int spmd_setup(void) 644 { 645 int rc; 646 void *spmc_manifest; 647 648 /* 649 * If the SPMC is at EL3, then just initialise it directly. The 650 * shenanigans of when it is at a lower EL are not needed. 651 */ 652 if (is_spmc_at_el3()) { 653 /* Allow the SPMC to populate its attributes directly. */ 654 spmc_populate_attrs(&spmc_attrs); 655 656 rc = spmc_setup(); 657 if (rc != 0) { 658 WARN("SPMC initialisation failed 0x%x.\n", rc); 659 } 660 return 0; 661 } 662 663 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 664 if (spmc_ep_info == NULL) { 665 WARN("No SPM Core image provided by BL2 boot loader.\n"); 666 return 0; 667 } 668 669 /* Under no circumstances will this parameter be 0 */ 670 assert(spmc_ep_info->pc != 0ULL); 671 672 /* 673 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 674 * be used as a manifest for the SPM Core at the next lower EL/mode. 675 */ 676 spmc_manifest = (void *)spmc_ep_info->args.arg0; 677 if (spmc_manifest == NULL) { 678 WARN("Invalid or absent SPM Core manifest.\n"); 679 return 0; 680 } 681 682 /* Load manifest, init SPMC */ 683 rc = spmd_spmc_init(spmc_manifest); 684 if (rc != 0) { 685 WARN("Booting device without SPM initialization.\n"); 686 } 687 688 return 0; 689 } 690 691 /******************************************************************************* 692 * Forward FF-A SMCs to the other security state. 693 ******************************************************************************/ 694 uint64_t spmd_smc_switch_state(uint32_t smc_fid, 695 bool secure_origin, 696 uint64_t x1, 697 uint64_t x2, 698 uint64_t x3, 699 uint64_t x4, 700 void *handle, 701 uint64_t flags, 702 uint32_t secure_ffa_version) 703 { 704 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 705 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 706 uint32_t version_in = (secure_origin) ? secure_ffa_version : nonsecure_ffa_version; 707 uint32_t version_out = (!secure_origin) ? secure_ffa_version : nonsecure_ffa_version; 708 void *ctx_out; 709 710 #if SPMD_SPM_AT_SEL2 711 if ((secure_state_out == SECURE) && (is_sve_hint_set(flags) == true)) { 712 /* 713 * Set the SVE hint bit in x0 and pass to the lower secure EL, 714 * if it was set by the caller. 715 */ 716 smc_fid |= (FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT); 717 } 718 #endif 719 720 /* Save incoming security state */ 721 #if SPMD_SPM_AT_SEL2 722 cm_el2_sysregs_context_save(secure_state_in); 723 #else 724 cm_el1_sysregs_context_save(secure_state_in); 725 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 726 /* Forward the hint bit denoting the absence of SVE live state. */ 727 simd_ctx_save(secure_state_in, (!secure_origin && (is_sve_hint_set(flags) == true))); 728 #endif 729 #endif 730 731 /* Restore outgoing security state */ 732 #if SPMD_SPM_AT_SEL2 733 cm_el2_sysregs_context_restore(secure_state_out); 734 #else 735 cm_el1_sysregs_context_restore(secure_state_out); 736 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 737 simd_ctx_restore(secure_state_out); 738 #endif 739 #endif 740 cm_set_next_eret_context(secure_state_out); 741 742 ctx_out = cm_get_context(secure_state_out); 743 if (smc_fid == FFA_NORMAL_WORLD_RESUME) { 744 SMC_RET0(ctx_out); 745 } 746 747 if ((GET_SMC_CC(smc_fid) == SMC_64) && (version_out >= MAKE_FFA_VERSION(U(1), U(2)))) { 748 if (version_in < MAKE_FFA_VERSION(U(1), U(2))) { 749 /* FFA version mismatch, with dest >= 1.2 - set outgoing x8-x17 to zero */ 750 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4, 751 SMC_GET_GP(handle, CTX_GPREG_X5), 752 SMC_GET_GP(handle, CTX_GPREG_X6), 753 SMC_GET_GP(handle, CTX_GPREG_X7), 754 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); 755 } else { 756 /* Both FFA versions >= 1.2 - pass incoming x8-x17 to dest */ 757 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4, 758 SMC_GET_GP(handle, CTX_GPREG_X5), 759 SMC_GET_GP(handle, CTX_GPREG_X6), 760 SMC_GET_GP(handle, CTX_GPREG_X7), 761 SMC_GET_GP(handle, CTX_GPREG_X8), 762 SMC_GET_GP(handle, CTX_GPREG_X9), 763 SMC_GET_GP(handle, CTX_GPREG_X10), 764 SMC_GET_GP(handle, CTX_GPREG_X11), 765 SMC_GET_GP(handle, CTX_GPREG_X12), 766 SMC_GET_GP(handle, CTX_GPREG_X13), 767 SMC_GET_GP(handle, CTX_GPREG_X14), 768 SMC_GET_GP(handle, CTX_GPREG_X15), 769 SMC_GET_GP(handle, CTX_GPREG_X16), 770 SMC_GET_GP(handle, CTX_GPREG_X17) 771 ); 772 } 773 } else { 774 /* 32 bit call or dest has FFA version < 1.2 or unknown */ 775 SMC_RET8(ctx_out, smc_fid, x1, x2, x3, x4, 776 SMC_GET_GP(handle, CTX_GPREG_X5), 777 SMC_GET_GP(handle, CTX_GPREG_X6), 778 SMC_GET_GP(handle, CTX_GPREG_X7)); 779 } 780 } 781 782 /******************************************************************************* 783 * Forward SMCs to the other security state. 784 ******************************************************************************/ 785 static uint64_t spmd_smc_forward(uint32_t smc_fid, 786 bool secure_origin, 787 uint64_t x1, 788 uint64_t x2, 789 uint64_t x3, 790 uint64_t x4, 791 void *cookie, 792 void *handle, 793 uint64_t flags, 794 uint32_t secure_ffa_version) 795 { 796 if (is_spmc_at_el3() && !secure_origin) { 797 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4, 798 cookie, handle, flags); 799 } 800 801 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4, 802 handle, flags, secure_ffa_version); 803 804 } 805 806 /******************************************************************************* 807 * Return FFA_ERROR with specified error code 808 ******************************************************************************/ 809 uint64_t spmd_ffa_error_return(void *handle, int error_code) 810 { 811 SMC_RET8(handle, (uint32_t) FFA_ERROR, 812 FFA_TARGET_INFO_MBZ, (uint32_t)error_code, 813 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 814 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 815 } 816 817 /******************************************************************************* 818 * spmd_check_address_in_binary_image 819 ******************************************************************************/ 820 bool spmd_check_address_in_binary_image(uint64_t address) 821 { 822 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 823 824 return ((address >= spmc_attrs.load_address) && 825 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 826 } 827 828 /****************************************************************************** 829 * spmd_is_spmc_message 830 *****************************************************************************/ 831 static bool spmd_is_spmc_message(unsigned int ep) 832 { 833 if (is_spmc_at_el3()) { 834 return false; 835 } 836 837 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 838 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 839 } 840 841 /******************************************************************************* 842 * This function forwards FF-A SMCs to either the main SPMD handler or the 843 * SPMC at EL3, depending on the origin security state, if enabled. 844 ******************************************************************************/ 845 uint64_t spmd_ffa_smc_handler(uint32_t smc_fid, 846 uint64_t x1, 847 uint64_t x2, 848 uint64_t x3, 849 uint64_t x4, 850 void *cookie, 851 void *handle, 852 uint64_t flags) 853 { 854 if (is_spmc_at_el3()) { 855 /* 856 * If we have an SPMC at EL3 allow handling of the SMC first. 857 * The SPMC will call back through to SPMD handler if required. 858 */ 859 if (is_caller_secure(flags)) { 860 return spmc_smc_handler(smc_fid, 861 is_caller_secure(flags), 862 x1, x2, x3, x4, cookie, 863 handle, flags); 864 } 865 } 866 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie, 867 handle, flags, spmc_nwd_ffa_version); 868 } 869 870 static uint32_t get_common_ffa_version(uint32_t secure_ffa_version) 871 { 872 if (secure_ffa_version <= nonsecure_ffa_version) { 873 return secure_ffa_version; 874 } else { 875 return nonsecure_ffa_version; 876 } 877 } 878 879 /******************************************************************************* 880 * This function handles all SMCs in the range reserved for FFA. Each call is 881 * either forwarded to the other security state or handled by the SPM dispatcher 882 ******************************************************************************/ 883 uint64_t spmd_smc_handler(uint32_t smc_fid, 884 uint64_t x1, 885 uint64_t x2, 886 uint64_t x3, 887 uint64_t x4, 888 void *cookie, 889 void *handle, 890 uint64_t flags, 891 uint32_t secure_ffa_version) 892 { 893 spmd_spm_core_context_t *ctx = spmd_get_context(); 894 bool secure_origin; 895 int ret; 896 uint32_t input_version; 897 898 /* Determine which security state this SMC originated from */ 899 secure_origin = is_caller_secure(flags); 900 901 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 902 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n", 903 plat_my_core_pos(), smc_fid, x1, x2, x3, x4, 904 SMC_GET_GP(handle, CTX_GPREG_X5), 905 SMC_GET_GP(handle, CTX_GPREG_X6), 906 SMC_GET_GP(handle, CTX_GPREG_X7)); 907 908 /* 909 * If there is an on-going info regs from EL3 SPMD LP, unconditionally 910 * return, we don't expect any other FF-A ABIs to be called between 911 * calls to FFA_PARTITION_INFO_GET_REGS. 912 */ 913 if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) { 914 assert(secure_origin); 915 spmd_spm_core_sync_exit(0ULL); 916 } 917 918 if ((!secure_origin) && (smc_fid != FFA_VERSION)) { 919 /* 920 * Once the caller invokes any FF-A ABI other than FFA_VERSION, 921 * the version negotiation phase is complete. 922 */ 923 nonsecure_version_negotiated = true; 924 } 925 926 switch (smc_fid) { 927 case FFA_ERROR: 928 /* 929 * Check if this is the first invocation of this interface on 930 * this CPU. If so, then indicate that the SPM Core initialised 931 * unsuccessfully. 932 */ 933 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 934 spmd_spm_core_sync_exit(x2); 935 } 936 937 /* 938 * Perform a synchronous exit: 939 * 1. If there was an SPMD logical partition direct request on-going, 940 * return back to the SPMD logical partition so the error can be 941 * consumed. 942 * 2. SPMC sent FFA_ERROR in response to a power management 943 * operation sent through direct request. 944 */ 945 if (is_spmd_logical_sp_dir_req_in_progress(ctx) || 946 ctx->psci_operation_ongoing) { 947 assert(secure_origin); 948 spmd_spm_core_sync_exit(0ULL); 949 } 950 951 return spmd_smc_forward(smc_fid, secure_origin, 952 x1, x2, x3, x4, cookie, 953 handle, flags, secure_ffa_version); 954 break; /* not reached */ 955 956 case FFA_VERSION: 957 input_version = (uint32_t)(0xFFFFFFFF & x1); 958 /* 959 * If caller is secure and SPMC was initialized, 960 * return FFA_VERSION of SPMD. 961 * If caller is non secure and SPMC was initialized, 962 * forward to the EL3 SPMC if enabled, otherwise send a 963 * framework message to the SPMC at the lower EL to 964 * negotiate a version that is compatible between the 965 * normal world and the SPMC. 966 * Sanity check to "input_version". 967 * If the EL3 SPMC is enabled, ignore the SPMC state as 968 * this is not used. 969 */ 970 if ((input_version & FFA_VERSION_BIT31_MASK) || 971 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) { 972 ret = FFA_ERROR_NOT_SUPPORTED; 973 } else if (!secure_origin) { 974 if (!nonsecure_version_negotiated) { 975 /* 976 * Once an FF-A version has been negotiated 977 * between a caller and a callee, the version 978 * may not be changed for the lifetime of 979 * the calling component. 980 */ 981 nonsecure_ffa_version = input_version; 982 } 983 984 if (is_spmc_at_el3()) { 985 /* 986 * Forward the call directly to the EL3 SPMC, if 987 * enabled, as we don't need to wrap the call in 988 * a direct request. 989 */ 990 spmc_nwd_ffa_version = 991 MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 992 return spmc_smc_handler(smc_fid, secure_origin, 993 x1, x2, x3, x4, cookie, 994 handle, flags); 995 } 996 997 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 998 uint64_t rc; 999 1000 if (spmc_attrs.major_version == 1 && 1001 spmc_attrs.minor_version == 0) { 1002 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, 1003 spmc_attrs.minor_version); 1004 spmc_nwd_ffa_version = (uint32_t)ret; 1005 SMC_RET8(handle, (uint32_t)ret, 1006 FFA_TARGET_INFO_MBZ, 1007 FFA_TARGET_INFO_MBZ, 1008 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1009 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1010 FFA_PARAM_MBZ); 1011 break; 1012 } 1013 /* Save non-secure system registers context */ 1014 #if SPMD_SPM_AT_SEL2 1015 cm_el2_sysregs_context_save(NON_SECURE); 1016 #else 1017 cm_el1_sysregs_context_save(NON_SECURE); 1018 #endif 1019 1020 /* 1021 * The incoming request has FFA_VERSION as X0 smc_fid 1022 * and requested version in x1. Prepare a direct request 1023 * from SPMD to SPMC with FFA_VERSION framework function 1024 * identifier in X2 and requested version in X3. 1025 */ 1026 spmd_build_spmc_message(gpregs, 1027 SPMD_FWK_MSG_FFA_VERSION_REQ, 1028 input_version); 1029 1030 /* 1031 * Ensure x8-x17 NS GP register values are untouched when returning 1032 * from the SPMC. 1033 */ 1034 write_ctx_reg(gpregs, CTX_GPREG_X8, SMC_GET_GP(handle, CTX_GPREG_X8)); 1035 write_ctx_reg(gpregs, CTX_GPREG_X9, SMC_GET_GP(handle, CTX_GPREG_X9)); 1036 write_ctx_reg(gpregs, CTX_GPREG_X10, SMC_GET_GP(handle, CTX_GPREG_X10)); 1037 write_ctx_reg(gpregs, CTX_GPREG_X11, SMC_GET_GP(handle, CTX_GPREG_X11)); 1038 write_ctx_reg(gpregs, CTX_GPREG_X12, SMC_GET_GP(handle, CTX_GPREG_X12)); 1039 write_ctx_reg(gpregs, CTX_GPREG_X13, SMC_GET_GP(handle, CTX_GPREG_X13)); 1040 write_ctx_reg(gpregs, CTX_GPREG_X14, SMC_GET_GP(handle, CTX_GPREG_X14)); 1041 write_ctx_reg(gpregs, CTX_GPREG_X15, SMC_GET_GP(handle, CTX_GPREG_X15)); 1042 write_ctx_reg(gpregs, CTX_GPREG_X16, SMC_GET_GP(handle, CTX_GPREG_X16)); 1043 write_ctx_reg(gpregs, CTX_GPREG_X17, SMC_GET_GP(handle, CTX_GPREG_X17)); 1044 1045 rc = spmd_spm_core_sync_entry(ctx); 1046 1047 if ((rc != 0ULL) || 1048 (SMC_GET_GP(gpregs, CTX_GPREG_X0) != 1049 FFA_MSG_SEND_DIRECT_RESP_SMC32) || 1050 (SMC_GET_GP(gpregs, CTX_GPREG_X2) != 1051 (FFA_FWK_MSG_BIT | 1052 SPMD_FWK_MSG_FFA_VERSION_RESP))) { 1053 ERROR("Failed to forward FFA_VERSION\n"); 1054 ret = FFA_ERROR_NOT_SUPPORTED; 1055 } else { 1056 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3); 1057 spmc_nwd_ffa_version = (uint32_t)ret; 1058 } 1059 1060 /* 1061 * x0-x4 are updated by spmd_smc_forward below. 1062 * Zero out x5-x7 in the FFA_VERSION response. 1063 */ 1064 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 1065 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 1066 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 1067 1068 /* 1069 * Return here after SPMC has handled FFA_VERSION. 1070 * The returned SPMC version is held in X3. 1071 * Forward this version in X0 to the non-secure caller. 1072 */ 1073 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ, 1074 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1075 FFA_PARAM_MBZ, cookie, gpregs, 1076 flags, spmc_nwd_ffa_version); 1077 } else { 1078 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 1079 FFA_VERSION_MINOR); 1080 } 1081 1082 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ, 1083 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1084 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 1085 break; /* not reached */ 1086 1087 case FFA_FEATURES: 1088 /* 1089 * This is an optional interface. Do the minimal checks and 1090 * forward to SPM Core which will handle it if implemented. 1091 */ 1092 1093 /* Forward SMC from Normal world to the SPM Core */ 1094 if (!secure_origin) { 1095 return spmd_smc_forward(smc_fid, secure_origin, 1096 x1, x2, x3, x4, cookie, 1097 handle, flags, secure_ffa_version); 1098 } 1099 1100 /* 1101 * Return success if call was from secure world i.e. all 1102 * FFA functions are supported. This is essentially a 1103 * nop. 1104 */ 1105 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 1106 SMC_GET_GP(handle, CTX_GPREG_X5), 1107 SMC_GET_GP(handle, CTX_GPREG_X6), 1108 SMC_GET_GP(handle, CTX_GPREG_X7)); 1109 1110 break; /* not reached */ 1111 1112 case FFA_ID_GET: 1113 /* 1114 * Returns the ID of the calling FFA component. 1115 */ 1116 if (!secure_origin) { 1117 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1118 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 1119 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1120 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1121 FFA_PARAM_MBZ); 1122 } 1123 1124 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1125 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 1126 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1127 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1128 FFA_PARAM_MBZ); 1129 1130 break; /* not reached */ 1131 1132 case FFA_SECONDARY_EP_REGISTER_SMC64: 1133 if (secure_origin) { 1134 ret = spmd_pm_secondary_ep_register(x1); 1135 1136 if (ret < 0) { 1137 SMC_RET8(handle, FFA_ERROR_SMC64, 1138 FFA_TARGET_INFO_MBZ, ret, 1139 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1140 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1141 FFA_PARAM_MBZ); 1142 } else { 1143 SMC_RET8(handle, FFA_SUCCESS_SMC64, 1144 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, 1145 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1146 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1147 FFA_PARAM_MBZ); 1148 } 1149 } 1150 1151 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1152 break; /* Not reached */ 1153 1154 case FFA_SPM_ID_GET: 1155 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) { 1156 return spmd_ffa_error_return(handle, 1157 FFA_ERROR_NOT_SUPPORTED); 1158 } 1159 /* 1160 * Returns the ID of the SPMC or SPMD depending on the FF-A 1161 * instance where this function is invoked 1162 */ 1163 if (!secure_origin) { 1164 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1165 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 1166 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1167 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1168 FFA_PARAM_MBZ); 1169 } 1170 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1171 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID, 1172 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1173 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1174 FFA_PARAM_MBZ); 1175 1176 break; /* not reached */ 1177 1178 case FFA_MSG_SEND_DIRECT_REQ2_SMC64: 1179 if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) { 1180 /* Call not supported at this version */ 1181 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1182 } 1183 /* fallthrough */ 1184 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 1185 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 1186 /* 1187 * Regardless of secure_origin, SPMD logical partitions cannot 1188 * handle direct messages. They can only initiate direct 1189 * messages and consume direct responses or errors. 1190 */ 1191 if (is_spmd_lp_id(ffa_endpoint_source(x1)) || 1192 is_spmd_lp_id(ffa_endpoint_destination(x1))) { 1193 return spmd_ffa_error_return(handle, 1194 FFA_ERROR_INVALID_PARAMETER 1195 ); 1196 } 1197 1198 /* 1199 * When there is an ongoing SPMD logical partition direct 1200 * request, there cannot be another direct request. Return 1201 * error in this case. Panic'ing is an option but that does 1202 * not provide the opportunity for caller to abort based on 1203 * error codes. 1204 */ 1205 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1206 assert(secure_origin); 1207 return spmd_ffa_error_return(handle, 1208 FFA_ERROR_DENIED); 1209 } 1210 1211 if (!secure_origin) { 1212 /* Validate source endpoint is non-secure for non-secure caller. */ 1213 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) { 1214 return spmd_ffa_error_return(handle, 1215 FFA_ERROR_INVALID_PARAMETER); 1216 } 1217 } 1218 if (secure_origin && spmd_is_spmc_message(x1)) { 1219 return spmd_ffa_error_return(handle, 1220 FFA_ERROR_DENIED); 1221 } else { 1222 /* Forward direct message to the other world */ 1223 return spmd_smc_forward(smc_fid, secure_origin, 1224 x1, x2, x3, x4, cookie, 1225 handle, flags, secure_ffa_version); 1226 } 1227 break; /* Not reached */ 1228 1229 case FFA_MSG_SEND_DIRECT_RESP2_SMC64: 1230 if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) { 1231 /* Call not supported at this version */ 1232 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1233 } 1234 /* fallthrough */ 1235 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 1236 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1237 if (secure_origin && (spmd_is_spmc_message(x1) || 1238 is_spmd_logical_sp_dir_req_in_progress(ctx))) { 1239 spmd_spm_core_sync_exit(0ULL); 1240 } else { 1241 /* Forward direct message to the other world */ 1242 return spmd_smc_forward(smc_fid, secure_origin, 1243 x1, x2, x3, x4, cookie, 1244 handle, flags, secure_ffa_version); 1245 } 1246 break; /* Not reached */ 1247 case FFA_RX_RELEASE: 1248 case FFA_RXTX_MAP_SMC32: 1249 case FFA_RXTX_MAP_SMC64: 1250 case FFA_RXTX_UNMAP: 1251 case FFA_PARTITION_INFO_GET: 1252 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 1253 case FFA_NOTIFICATION_BITMAP_CREATE: 1254 case FFA_NOTIFICATION_BITMAP_DESTROY: 1255 case FFA_NOTIFICATION_BIND: 1256 case FFA_NOTIFICATION_UNBIND: 1257 case FFA_NOTIFICATION_SET: 1258 case FFA_NOTIFICATION_GET: 1259 case FFA_NOTIFICATION_INFO_GET: 1260 case FFA_NOTIFICATION_INFO_GET_SMC64: 1261 case FFA_MSG_SEND2: 1262 case FFA_RX_ACQUIRE: 1263 case FFA_NS_RES_INFO_GET_SMC64: 1264 #endif 1265 case FFA_MSG_RUN: 1266 /* 1267 * Above calls should be invoked only by the Normal world and 1268 * must not be forwarded from Secure world to Normal world. 1269 */ 1270 if (secure_origin) { 1271 return spmd_ffa_error_return(handle, 1272 FFA_ERROR_NOT_SUPPORTED); 1273 } 1274 1275 /* Forward the call to the other world */ 1276 /* fallthrough */ 1277 case FFA_MSG_SEND: 1278 case FFA_MEM_DONATE_SMC32: 1279 case FFA_MEM_DONATE_SMC64: 1280 case FFA_MEM_LEND_SMC32: 1281 case FFA_MEM_LEND_SMC64: 1282 case FFA_MEM_SHARE_SMC32: 1283 case FFA_MEM_SHARE_SMC64: 1284 case FFA_MEM_RETRIEVE_REQ_SMC32: 1285 case FFA_MEM_RETRIEVE_REQ_SMC64: 1286 case FFA_MEM_RETRIEVE_RESP: 1287 case FFA_MEM_RELINQUISH: 1288 case FFA_MEM_RECLAIM: 1289 case FFA_MEM_FRAG_TX: 1290 case FFA_MEM_FRAG_RX: 1291 case FFA_SUCCESS_SMC32: 1292 case FFA_SUCCESS_SMC64: 1293 /* 1294 * If there is an ongoing direct request from an SPMD logical 1295 * partition, return an error. 1296 */ 1297 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1298 assert(secure_origin); 1299 return spmd_ffa_error_return(handle, 1300 FFA_ERROR_DENIED); 1301 } 1302 1303 return spmd_smc_forward(smc_fid, secure_origin, 1304 x1, x2, x3, x4, cookie, 1305 handle, flags, secure_ffa_version); 1306 break; /* not reached */ 1307 1308 case FFA_MSG_WAIT: 1309 /* 1310 * Check if this is the first invocation of this interface on 1311 * this CPU from the Secure world. If so, then indicate that the 1312 * SPM Core initialised successfully. 1313 */ 1314 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 1315 spmd_spm_core_sync_exit(0ULL); 1316 } 1317 1318 /* Forward the call to the other world */ 1319 /* fallthrough */ 1320 case FFA_INTERRUPT: 1321 case FFA_MSG_YIELD: 1322 /* This interface must be invoked only by the Secure world */ 1323 if (!secure_origin) { 1324 return spmd_ffa_error_return(handle, 1325 FFA_ERROR_NOT_SUPPORTED); 1326 } 1327 1328 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1329 assert(secure_origin); 1330 return spmd_ffa_error_return(handle, 1331 FFA_ERROR_DENIED); 1332 } 1333 1334 return spmd_smc_forward(smc_fid, secure_origin, 1335 x1, x2, x3, x4, cookie, 1336 handle, flags, secure_ffa_version); 1337 break; /* not reached */ 1338 1339 case FFA_NORMAL_WORLD_RESUME: 1340 if (secure_origin && ctx->secure_interrupt_ongoing) { 1341 spmd_spm_core_sync_exit(0ULL); 1342 } else { 1343 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED); 1344 } 1345 break; /* Not reached */ 1346 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 1347 case FFA_PARTITION_INFO_GET_REGS_SMC64: 1348 if (secure_origin) { 1349 return spmd_el3_populate_logical_partition_info(handle, x1, 1350 x2, x3); 1351 } 1352 1353 /* Call only supported with SMCCC 1.2+ */ 1354 if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) { 1355 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1356 } 1357 1358 return spmd_smc_forward(smc_fid, secure_origin, 1359 x1, x2, x3, x4, cookie, 1360 handle, flags, secure_ffa_version); 1361 break; /* Not reached */ 1362 #endif 1363 case FFA_CONSOLE_LOG_SMC32: 1364 case FFA_CONSOLE_LOG_SMC64: 1365 /* This interface must not be forwarded to other worlds. */ 1366 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1367 break; /* not reached */ 1368 1369 case FFA_EL3_INTR_HANDLE: 1370 if (secure_origin) { 1371 return spmd_handle_group0_intr_swd(handle); 1372 } else { 1373 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1374 } 1375 case FFA_ABORT_SMC32: 1376 case FFA_ABORT_SMC64: 1377 /* This interface must be invoked only by the Secure world */ 1378 if (!secure_origin) { 1379 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1380 } 1381 1382 ERROR("SPMC encountered a fatal error. Aborting now\n"); 1383 panic(); 1384 1385 /* Not reached. */ 1386 SMC_RET0(handle); 1387 default: 1388 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 1389 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1390 } 1391 } 1392