1 /* 2 * Copyright (c) 2020-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch/aarch64/arch_features.h> 15 #include <bl31/bl31.h> 16 #include <bl31/interrupt_mgmt.h> 17 #include <common/debug.h> 18 #include <common/runtime_svc.h> 19 #include <common/tbbr/tbbr_img_def.h> 20 #include <lib/el3_runtime/context_mgmt.h> 21 #include <lib/fconf/fconf.h> 22 #include <lib/fconf/fconf_dyn_cfg_getter.h> 23 #include <lib/per_cpu/per_cpu.h> 24 #include <lib/smccc.h> 25 #include <lib/spinlock.h> 26 #include <lib/utils.h> 27 #include <lib/xlat_tables/xlat_tables_v2.h> 28 #include <plat/common/common_def.h> 29 #include <plat/common/platform.h> 30 #include <platform_def.h> 31 #include <services/el3_spmd_logical_sp.h> 32 #include <services/ffa_svc.h> 33 #include <services/spmc_svc.h> 34 #include <services/spmd_svc.h> 35 #include <smccc_helpers.h> 36 #include "spmd_private.h" 37 #if TRANSFER_LIST 38 #include <transfer_list.h> 39 #endif 40 41 /******************************************************************************* 42 * SPM Core context information. 43 ******************************************************************************/ 44 static PER_CPU_DEFINE(spmd_spm_core_context_t, spm_core_context); 45 46 /******************************************************************************* 47 * SPM Core attribute information is read from its manifest if the SPMC is not 48 * at EL3. Else, it is populated from the SPMC directly. 49 ******************************************************************************/ 50 static spmc_manifest_attribute_t spmc_attrs; 51 52 /******************************************************************************* 53 * FFA version used by nonsecure endpoint. 54 ******************************************************************************/ 55 static uint32_t nonsecure_ffa_version; 56 57 /******************************************************************************* 58 * Whether the normal world finished negotiating its version. 59 ******************************************************************************/ 60 static bool nonsecure_version_negotiated; 61 62 /******************************************************************************* 63 * FFA version used by SPMC, as seen by the normal world. 64 ******************************************************************************/ 65 static uint32_t spmc_nwd_ffa_version; 66 67 /******************************************************************************* 68 * SPM Core entry point information. Discovered on the primary core and reused 69 * on secondary cores. 70 ******************************************************************************/ 71 static entry_point_info_t *spmc_ep_info; 72 73 /******************************************************************************* 74 * SPM Core context on current CPU get helper. 75 ******************************************************************************/ 76 spmd_spm_core_context_t *spmd_get_context(void) 77 { 78 return PER_CPU_CUR(spm_core_context); 79 } 80 81 /******************************************************************************* 82 * SPM Core ID getter. 83 ******************************************************************************/ 84 uint16_t spmd_spmc_id_get(void) 85 { 86 return spmc_attrs.spmc_id; 87 } 88 89 /******************************************************************************* 90 * Static function declaration. 91 ******************************************************************************/ 92 static int32_t spmd_init(void); 93 static int spmd_spmc_init(void *pm_addr); 94 95 static uint64_t spmd_smc_forward(uint32_t smc_fid, 96 bool secure_origin, 97 uint64_t x1, 98 uint64_t x2, 99 uint64_t x3, 100 uint64_t x4, 101 void *cookie, 102 void *handle, 103 uint64_t flags, 104 uint32_t secure_ffa_version); 105 106 /****************************************************************************** 107 * Builds an SPMD to SPMC direct message request. 108 *****************************************************************************/ 109 void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func, 110 unsigned long long message) 111 { 112 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32); 113 write_ctx_reg(gpregs, CTX_GPREG_X1, 114 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) | 115 spmd_spmc_id_get()); 116 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func); 117 write_ctx_reg(gpregs, CTX_GPREG_X3, message); 118 119 /* Zero out x4-x7 for the direct request emitted towards the SPMC. */ 120 write_ctx_reg(gpregs, CTX_GPREG_X4, 0); 121 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 122 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 123 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 124 } 125 126 127 /******************************************************************************* 128 * This function takes an SPMC context pointer and performs a synchronous 129 * SPMC entry. 130 ******************************************************************************/ 131 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 132 { 133 uint64_t rc; 134 135 assert(spmc_ctx != NULL); 136 137 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 138 139 /* Restore the context assigned above */ 140 #if SPMD_SPM_AT_SEL2 141 cm_el2_sysregs_context_restore(SECURE); 142 #else 143 cm_el1_sysregs_context_restore(SECURE); 144 #endif 145 cm_set_next_eret_context(SECURE); 146 147 /* Enter SPMC */ 148 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 149 150 /* Save secure state */ 151 #if SPMD_SPM_AT_SEL2 152 cm_el2_sysregs_context_save(SECURE); 153 #else 154 cm_el1_sysregs_context_save(SECURE); 155 #endif 156 157 return rc; 158 } 159 160 /******************************************************************************* 161 * This function returns to the place where spmd_spm_core_sync_entry() was 162 * called originally. 163 ******************************************************************************/ 164 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 165 { 166 spmd_spm_core_context_t *ctx = spmd_get_context(); 167 168 /* Get current CPU context from SPMC context */ 169 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 170 171 /* 172 * The SPMD must have initiated the original request through a 173 * synchronous entry into SPMC. Jump back to the original C runtime 174 * context with the value of rc in x0; 175 */ 176 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 177 178 panic(); 179 } 180 181 void spmd_setup_context(unsigned int core_id) 182 { 183 cpu_context_t *cpu_ctx; 184 185 PER_CPU_CUR(spm_core_context)->state = SPMC_STATE_OFF; 186 187 /* Setup an initial cpu context for the SPMC. */ 188 cpu_ctx = &(PER_CPU_CUR(spm_core_context)->cpu_ctx); 189 cm_setup_context(cpu_ctx, spmc_ep_info); 190 191 /* 192 * Pass the core linear ID to the SPMC through x4. 193 * (TF-A implementation defined behavior helping 194 * a legacy TOS migration to adopt FF-A). 195 */ 196 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id); 197 } 198 199 /******************************************************************************* 200 * Jump to the SPM Core for the first time. 201 ******************************************************************************/ 202 static int32_t spmd_init(void) 203 { 204 spmd_spm_core_context_t *ctx = spmd_get_context(); 205 uint64_t rc; 206 207 VERBOSE("SPM Core init start.\n"); 208 209 /* Primary boot core enters the SPMC for initialization. */ 210 ctx->state = SPMC_STATE_ON_PENDING; 211 212 rc = spmd_spm_core_sync_entry(ctx); 213 if (rc != 0ULL) { 214 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc); 215 return 0; 216 } 217 218 ctx->state = SPMC_STATE_ON; 219 220 VERBOSE("SPM Core init end.\n"); 221 222 spmd_logical_sp_set_spmc_initialized(); 223 rc = spmd_logical_sp_init(); 224 if (rc != 0) { 225 WARN("SPMD Logical partitions failed init.\n"); 226 } 227 228 return 1; 229 } 230 231 /******************************************************************************* 232 * spmd_secure_interrupt_handler 233 * Enter the SPMC for further handling of the secure interrupt by the SPMC 234 * itself or a Secure Partition. 235 ******************************************************************************/ 236 static uint64_t spmd_secure_interrupt_handler(uint32_t id, 237 uint32_t flags, 238 void *handle, 239 void *cookie) 240 { 241 spmd_spm_core_context_t *ctx = spmd_get_context(); 242 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 243 int64_t rc; 244 245 /* Sanity check the security state when the exception was generated */ 246 assert(get_interrupt_src_ss(flags) == NON_SECURE); 247 248 /* Sanity check the pointer to this cpu's context */ 249 assert(handle == cm_get_context(NON_SECURE)); 250 251 /* Save the non-secure context before entering SPMC */ 252 #if SPMD_SPM_AT_SEL2 253 cm_el2_sysregs_context_save(NON_SECURE); 254 #else 255 cm_el1_sysregs_context_save(NON_SECURE); 256 257 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 258 /* 259 * The hint bit denoting absence of SVE live state is effectively false 260 * in this scenario where execution was trapped to EL3 due to FIQ. 261 */ 262 simd_ctx_save(NON_SECURE, false); 263 simd_ctx_restore(SECURE); 264 #endif 265 #endif 266 267 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */ 268 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT); 269 write_ctx_reg(gpregs, CTX_GPREG_X1, 0); 270 write_ctx_reg(gpregs, CTX_GPREG_X2, 0); 271 write_ctx_reg(gpregs, CTX_GPREG_X3, 0); 272 write_ctx_reg(gpregs, CTX_GPREG_X4, 0); 273 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 274 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 275 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 276 277 /* Mark current core as handling a secure interrupt. */ 278 ctx->secure_interrupt_ongoing = true; 279 280 rc = spmd_spm_core_sync_entry(ctx); 281 282 if (rc != 0ULL) { 283 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, plat_my_core_pos()); 284 } 285 286 ctx->secure_interrupt_ongoing = false; 287 288 #if SPMD_SPM_AT_SEL2 289 cm_el2_sysregs_context_restore(NON_SECURE); 290 #else 291 cm_el1_sysregs_context_restore(NON_SECURE); 292 293 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 294 simd_ctx_save(SECURE, false); 295 simd_ctx_restore(NON_SECURE); 296 #endif 297 #endif 298 cm_set_next_eret_context(NON_SECURE); 299 300 SMC_RET0(&ctx->cpu_ctx); 301 } 302 303 #if (EL3_EXCEPTION_HANDLING == 0) 304 /******************************************************************************* 305 * spmd_group0_interrupt_handler_nwd 306 * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the 307 * handling of the interrupt to the platform handler, and return only upon 308 * successfully handling the Group0 interrupt. 309 ******************************************************************************/ 310 static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id, 311 uint32_t flags, 312 void *handle, 313 void *cookie) 314 { 315 uint32_t intid, intr_raw; 316 317 /* Sanity check the security state when the exception was generated. */ 318 assert(get_interrupt_src_ss(flags) == NON_SECURE); 319 320 /* Sanity check the pointer to this cpu's context. */ 321 assert(handle == cm_get_context(NON_SECURE)); 322 323 assert(id == INTR_ID_UNAVAILABLE); 324 325 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3); 326 327 intr_raw = plat_ic_acknowledge_interrupt(); 328 intid = plat_ic_get_interrupt_id(intr_raw); 329 330 if (intid == INTR_ID_UNAVAILABLE) { 331 return 0U; 332 } 333 334 if (plat_spmd_handle_group0_interrupt(intid) < 0) { 335 ERROR("Group0 interrupt %u not handled\n", intid); 336 panic(); 337 } 338 339 /* Deactivate the corresponding Group0 interrupt. */ 340 plat_ic_end_of_interrupt(intid); 341 342 return 0U; 343 } 344 #endif 345 346 /******************************************************************************* 347 * spmd_handle_group0_intr_swd 348 * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using 349 * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the 350 * interrupt to the platform handler, and returns only upon successfully 351 * handling the Group0 interrupt. 352 ******************************************************************************/ 353 static uint64_t spmd_handle_group0_intr_swd(void *handle) 354 { 355 uint32_t intid, intr_raw; 356 357 /* Sanity check the pointer to this cpu's context */ 358 assert(handle == cm_get_context(SECURE)); 359 360 assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3); 361 362 intr_raw = plat_ic_acknowledge_interrupt(); 363 intid = plat_ic_get_interrupt_id(intr_raw); 364 365 if (intid == INTR_ID_UNAVAILABLE) { 366 return 0U; 367 } 368 369 /* 370 * TODO: Currently due to a limitation in SPMD implementation, the 371 * platform handler is expected to not delegate handling to NWd while 372 * processing Group0 secure interrupt. 373 */ 374 if (plat_spmd_handle_group0_interrupt(intid) < 0) { 375 /* Group0 interrupt was not handled by the platform. */ 376 ERROR("Group0 interrupt %u not handled\n", intid); 377 panic(); 378 } 379 380 /* Deactivate the corresponding Group0 interrupt. */ 381 plat_ic_end_of_interrupt(intid); 382 383 /* Return success. */ 384 SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 385 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 386 FFA_PARAM_MBZ); 387 } 388 389 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 390 static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size, 391 unsigned int attr, uintptr_t *align_addr, 392 size_t *align_size) 393 { 394 uintptr_t base_addr_align; 395 size_t mapped_size_align; 396 int rc; 397 398 /* Page aligned address and size if necessary */ 399 base_addr_align = page_align(base_addr, DOWN); 400 mapped_size_align = page_align(size, UP); 401 402 if ((base_addr != base_addr_align) && 403 (size == mapped_size_align)) { 404 mapped_size_align += PAGE_SIZE; 405 } 406 407 /* 408 * Map dynamically given region with its aligned base address and 409 * size 410 */ 411 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align, 412 base_addr_align, 413 mapped_size_align, 414 attr); 415 if (rc == 0) { 416 *align_addr = base_addr_align; 417 *align_size = mapped_size_align; 418 } 419 420 return rc; 421 } 422 423 static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr, 424 size_t size) 425 { 426 uintptr_t root_base_addr_align, sec_base_addr_align; 427 size_t root_mapped_size_align, sec_mapped_size_align; 428 int rc; 429 430 assert(root_base_addr != 0UL); 431 assert(sec_base_addr != 0UL); 432 assert(size != 0UL); 433 434 /* Map the memory with required attributes */ 435 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT, 436 &root_base_addr_align, 437 &root_mapped_size_align); 438 if (rc != 0) { 439 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region", 440 root_base_addr, rc); 441 panic(); 442 } 443 444 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE, 445 &sec_base_addr_align, &sec_mapped_size_align); 446 if (rc != 0) { 447 ERROR("%s %s %lu (%d)\n", "Error while mapping", 448 "secure region", sec_base_addr, rc); 449 panic(); 450 } 451 452 /* Do copy operation */ 453 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size); 454 455 /* Unmap root memory region */ 456 rc = mmap_remove_dynamic_region(root_base_addr_align, 457 root_mapped_size_align); 458 if (rc != 0) { 459 ERROR("%s %s %lu (%d)\n", "Error while unmapping", 460 "root region", root_base_addr_align, rc); 461 panic(); 462 } 463 464 /* Unmap secure memory region */ 465 rc = mmap_remove_dynamic_region(sec_base_addr_align, 466 sec_mapped_size_align); 467 if (rc != 0) { 468 ERROR("%s %s %lu (%d)\n", "Error while unmapping", 469 "secure region", sec_base_addr_align, rc); 470 panic(); 471 } 472 } 473 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 474 475 /******************************************************************************* 476 * Loads SPMC manifest and inits SPMC. 477 ******************************************************************************/ 478 static int spmd_spmc_init(void *pm_addr) 479 { 480 uint32_t ep_attr, flags; 481 int rc; 482 const struct dyn_cfg_dtb_info_t *image_info __unused; 483 484 /* Load the SPM Core manifest */ 485 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 486 if (rc != 0) { 487 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 488 return rc; 489 } 490 491 /* 492 * Ensure that the SPM Core version is compatible with the SPM 493 * Dispatcher version. 494 */ 495 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 496 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 497 WARN("Unsupported FFA version (%u.%u)\n", 498 spmc_attrs.major_version, spmc_attrs.minor_version); 499 return -EINVAL; 500 } 501 502 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 503 spmc_attrs.minor_version); 504 505 VERBOSE("SPM Core run time EL%x.\n", 506 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 507 508 /* Validate the SPMC ID, Ensure high bit is set */ 509 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 510 SPMC_SECURE_ID_MASK) == 0U) { 511 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 512 return -EINVAL; 513 } 514 515 /* Validate the SPM Core execution state */ 516 if ((spmc_attrs.exec_state != MODE_RW_64) && 517 (spmc_attrs.exec_state != MODE_RW_32)) { 518 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 519 spmc_attrs.exec_state); 520 return -EINVAL; 521 } 522 523 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 524 spmc_attrs.exec_state); 525 526 #if SPMD_SPM_AT_SEL2 527 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 528 if (spmc_attrs.exec_state == MODE_RW_32) { 529 WARN("AArch32 state at S-EL2 is not supported.\n"); 530 return -EINVAL; 531 } 532 533 /* 534 * Check if S-EL2 is supported on this system if S-EL2 535 * is required for SPM 536 */ 537 if (!is_feat_sel2_supported()) { 538 WARN("SPM Core run time S-EL2 is not supported.\n"); 539 return -EINVAL; 540 } 541 #endif /* SPMD_SPM_AT_SEL2 */ 542 543 /* Initialise an entrypoint to set up the CPU context */ 544 ep_attr = SECURE | EP_ST_ENABLE; 545 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 546 ep_attr |= EP_EE_BIG; 547 } 548 549 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 550 551 /* 552 * Populate SPSR for SPM Core based upon validated parameters from the 553 * manifest. 554 */ 555 if (spmc_attrs.exec_state == MODE_RW_32) { 556 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 557 SPSR_E_LITTLE, 558 DAIF_FIQ_BIT | 559 DAIF_IRQ_BIT | 560 DAIF_ABT_BIT); 561 } else { 562 563 #if SPMD_SPM_AT_SEL2 564 static const uint32_t runtime_el = MODE_EL2; 565 #else 566 static const uint32_t runtime_el = MODE_EL1; 567 #endif 568 spmc_ep_info->spsr = SPSR_64(runtime_el, 569 MODE_SP_ELX, 570 DISABLE_ALL_EXCEPTIONS); 571 } 572 573 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 574 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID); 575 assert(image_info != NULL); 576 577 if ((image_info->config_addr == 0UL) || 578 (image_info->secondary_config_addr == 0UL) || 579 (image_info->config_max_size == 0UL)) { 580 return -EINVAL; 581 } 582 583 /* Copy manifest from root->secure region */ 584 spmd_do_sec_cpy(image_info->config_addr, 585 image_info->secondary_config_addr, 586 image_info->config_max_size); 587 588 /* Update ep info of BL32 */ 589 assert(spmc_ep_info != NULL); 590 spmc_ep_info->args.arg0 = image_info->secondary_config_addr; 591 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 592 593 spmd_setup_context(plat_my_core_pos()); 594 595 /* Register power management hooks with PSCI */ 596 psci_register_spd_pm_hook(&spmd_pm); 597 598 /* Register init function for deferred init. */ 599 bl31_register_bl32_init(&spmd_init); 600 601 INFO("SPM Core setup done.\n"); 602 603 /* 604 * Register an interrupt handler routing secure interrupts to SPMD 605 * while the NWd is running. 606 */ 607 flags = 0; 608 set_interrupt_rm_flag(flags, NON_SECURE); 609 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 610 spmd_secure_interrupt_handler, 611 flags); 612 if (rc != 0) { 613 panic(); 614 } 615 616 /* 617 * Permit configurations where the SPM resides at S-EL1/2 and upon a 618 * Group0 interrupt triggering while the normal world runs, the 619 * interrupt is routed either through the EHF or directly to the SPMD: 620 * 621 * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD 622 * for handling by spmd_group0_interrupt_handler_nwd. 623 * 624 * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF. 625 * 626 */ 627 #if (EL3_EXCEPTION_HANDLING == 0) 628 /* 629 * If EL3 interrupts are supported by the platform, register an 630 * interrupt handler routing Group0 interrupts to SPMD while the NWd is 631 * running. 632 */ 633 if (plat_ic_has_interrupt_type(INTR_TYPE_EL3)) { 634 rc = register_interrupt_type_handler(INTR_TYPE_EL3, 635 spmd_group0_interrupt_handler_nwd, 636 flags); 637 if (rc != 0) { 638 panic(); 639 } 640 } 641 #endif 642 643 return 0; 644 } 645 646 /******************************************************************************* 647 * Initialize context of SPM Core. 648 ******************************************************************************/ 649 int spmd_setup(void) 650 { 651 int rc; 652 void *spmc_manifest; 653 struct transfer_list_header *tl __maybe_unused; 654 struct transfer_list_entry *te __maybe_unused; 655 656 /* 657 * If the SPMC is at EL3, then just initialise it directly. The 658 * shenanigans of when it is at a lower EL are not needed. 659 */ 660 if (is_spmc_at_el3()) { 661 /* Allow the SPMC to populate its attributes directly. */ 662 spmc_populate_attrs(&spmc_attrs); 663 664 rc = spmc_setup(); 665 if (rc != 0) { 666 WARN("SPMC initialisation failed 0x%x.\n", rc); 667 } 668 return 0; 669 } 670 671 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 672 if (spmc_ep_info == NULL) { 673 WARN("No SPM Core image provided by BL2 boot loader.\n"); 674 return 0; 675 } 676 677 /* Under no circumstances will this parameter be 0 */ 678 assert(spmc_ep_info->pc != 0ULL); 679 680 681 #if TRANSFER_LIST && !RESET_TO_BL31 682 tl = (struct transfer_list_header *)spmc_ep_info->args.arg3; 683 te = transfer_list_find(tl, TL_TAG_DT_SPMC_MANIFEST); 684 if (te == NULL) { 685 WARN("SPM Core manifest absent in TRANSFER_LIST.\n"); 686 return -ENOENT; 687 } 688 689 spmc_manifest = (void *)transfer_list_entry_data(te); 690 691 /* Change the DT in the handoff */ 692 if (sizeof(spmc_ep_info->args.arg0) == sizeof(uint64_t)) { 693 spmc_ep_info->args.arg0 = (uintptr_t)spmc_manifest; 694 } else { 695 spmc_ep_info->args.arg3 = (uintptr_t)spmc_manifest; 696 } 697 #else 698 /* 699 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 700 * be used as a manifest for the SPM Core at the next lower EL/mode. 701 */ 702 spmc_manifest = (void *)spmc_ep_info->args.arg0; 703 #endif 704 705 if (spmc_manifest == NULL) { 706 WARN("Invalid or absent SPM Core manifest.\n"); 707 return 0; 708 } 709 710 /* Load manifest, init SPMC */ 711 rc = spmd_spmc_init(spmc_manifest); 712 if (rc != 0) { 713 WARN("Booting device without SPM initialization.\n"); 714 } 715 716 return 0; 717 } 718 719 /******************************************************************************* 720 * Forward FF-A SMCs to the other security state. 721 ******************************************************************************/ 722 uint64_t spmd_smc_switch_state(uint32_t smc_fid, 723 bool secure_origin, 724 uint64_t x1, 725 uint64_t x2, 726 uint64_t x3, 727 uint64_t x4, 728 void *handle, 729 uint64_t flags, 730 uint32_t secure_ffa_version) 731 { 732 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 733 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 734 uint32_t version_in = (secure_origin) ? secure_ffa_version : nonsecure_ffa_version; 735 uint32_t version_out = (!secure_origin) ? secure_ffa_version : nonsecure_ffa_version; 736 void *ctx_out; 737 738 #if SPMD_SPM_AT_SEL2 739 if ((secure_state_out == SECURE) && (is_sve_hint_set(flags) == true)) { 740 /* 741 * Set the SVE hint bit in x0 and pass to the lower secure EL, 742 * if it was set by the caller. 743 */ 744 smc_fid |= (FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT); 745 } 746 #endif 747 748 /* Save incoming security state */ 749 #if SPMD_SPM_AT_SEL2 750 cm_el2_sysregs_context_save(secure_state_in); 751 #else 752 cm_el1_sysregs_context_save(secure_state_in); 753 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 754 /* Forward the hint bit denoting the absence of SVE live state. */ 755 simd_ctx_save(secure_state_in, (!secure_origin && (is_sve_hint_set(flags) == true))); 756 #endif 757 #endif 758 759 /* Restore outgoing security state */ 760 #if SPMD_SPM_AT_SEL2 761 cm_el2_sysregs_context_restore(secure_state_out); 762 #else 763 cm_el1_sysregs_context_restore(secure_state_out); 764 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 765 simd_ctx_restore(secure_state_out); 766 #endif 767 #endif 768 cm_set_next_eret_context(secure_state_out); 769 770 ctx_out = cm_get_context(secure_state_out); 771 if (smc_fid == FFA_NORMAL_WORLD_RESUME) { 772 SMC_RET0(ctx_out); 773 } 774 775 if ((GET_SMC_CC(smc_fid) == SMC_64) && (version_out >= MAKE_FFA_VERSION(U(1), U(2)))) { 776 if (version_in < MAKE_FFA_VERSION(U(1), U(2))) { 777 /* FFA version mismatch, with dest >= 1.2 - set outgoing x8-x17 to zero */ 778 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4, 779 SMC_GET_GP(handle, CTX_GPREG_X5), 780 SMC_GET_GP(handle, CTX_GPREG_X6), 781 SMC_GET_GP(handle, CTX_GPREG_X7), 782 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); 783 } else { 784 /* Both FFA versions >= 1.2 - pass incoming x8-x17 to dest */ 785 SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4, 786 SMC_GET_GP(handle, CTX_GPREG_X5), 787 SMC_GET_GP(handle, CTX_GPREG_X6), 788 SMC_GET_GP(handle, CTX_GPREG_X7), 789 SMC_GET_GP(handle, CTX_GPREG_X8), 790 SMC_GET_GP(handle, CTX_GPREG_X9), 791 SMC_GET_GP(handle, CTX_GPREG_X10), 792 SMC_GET_GP(handle, CTX_GPREG_X11), 793 SMC_GET_GP(handle, CTX_GPREG_X12), 794 SMC_GET_GP(handle, CTX_GPREG_X13), 795 SMC_GET_GP(handle, CTX_GPREG_X14), 796 SMC_GET_GP(handle, CTX_GPREG_X15), 797 SMC_GET_GP(handle, CTX_GPREG_X16), 798 SMC_GET_GP(handle, CTX_GPREG_X17) 799 ); 800 } 801 } else { 802 /* 32 bit call or dest has FFA version < 1.2 or unknown */ 803 SMC_RET8(ctx_out, smc_fid, x1, x2, x3, x4, 804 SMC_GET_GP(handle, CTX_GPREG_X5), 805 SMC_GET_GP(handle, CTX_GPREG_X6), 806 SMC_GET_GP(handle, CTX_GPREG_X7)); 807 } 808 } 809 810 /******************************************************************************* 811 * Forward SMCs to the other security state. 812 ******************************************************************************/ 813 static uint64_t spmd_smc_forward(uint32_t smc_fid, 814 bool secure_origin, 815 uint64_t x1, 816 uint64_t x2, 817 uint64_t x3, 818 uint64_t x4, 819 void *cookie, 820 void *handle, 821 uint64_t flags, 822 uint32_t secure_ffa_version) 823 { 824 if (is_spmc_at_el3() && !secure_origin) { 825 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4, 826 cookie, handle, flags); 827 } 828 829 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4, 830 handle, flags, secure_ffa_version); 831 832 } 833 834 /******************************************************************************* 835 * Return FFA_ERROR with specified error code 836 ******************************************************************************/ 837 uint64_t spmd_ffa_error_return(void *handle, int error_code) 838 { 839 SMC_RET8(handle, (uint32_t) FFA_ERROR, 840 FFA_TARGET_INFO_MBZ, (uint32_t)error_code, 841 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 842 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 843 } 844 845 /******************************************************************************* 846 * spmd_check_address_in_binary_image 847 ******************************************************************************/ 848 bool spmd_check_address_in_binary_image(uint64_t address) 849 { 850 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 851 852 return ((address >= spmc_attrs.load_address) && 853 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 854 } 855 856 /****************************************************************************** 857 * spmd_is_spmc_message 858 *****************************************************************************/ 859 static bool spmd_is_spmc_message(unsigned int ep) 860 { 861 if (is_spmc_at_el3()) { 862 return false; 863 } 864 865 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 866 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 867 } 868 869 /******************************************************************************* 870 * This function forwards FF-A SMCs to either the main SPMD handler or the 871 * SPMC at EL3, depending on the origin security state, if enabled. 872 ******************************************************************************/ 873 uint64_t spmd_ffa_smc_handler(uint32_t smc_fid, 874 uint64_t x1, 875 uint64_t x2, 876 uint64_t x3, 877 uint64_t x4, 878 void *cookie, 879 void *handle, 880 uint64_t flags) 881 { 882 if (is_spmc_at_el3()) { 883 /* 884 * If we have an SPMC at EL3 allow handling of the SMC first. 885 * The SPMC will call back through to SPMD handler if required. 886 */ 887 if (is_caller_secure(flags)) { 888 return spmc_smc_handler(smc_fid, 889 is_caller_secure(flags), 890 x1, x2, x3, x4, cookie, 891 handle, flags); 892 } 893 } 894 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie, 895 handle, flags, spmc_nwd_ffa_version); 896 } 897 898 static uint32_t get_common_ffa_version(uint32_t secure_ffa_version) 899 { 900 if (secure_ffa_version <= nonsecure_ffa_version) { 901 return secure_ffa_version; 902 } else { 903 return nonsecure_ffa_version; 904 } 905 } 906 907 /******************************************************************************* 908 * This function handles all SMCs in the range reserved for FFA. Each call is 909 * either forwarded to the other security state or handled by the SPM dispatcher 910 ******************************************************************************/ 911 uint64_t spmd_smc_handler(uint32_t smc_fid, 912 uint64_t x1, 913 uint64_t x2, 914 uint64_t x3, 915 uint64_t x4, 916 void *cookie, 917 void *handle, 918 uint64_t flags, 919 uint32_t secure_ffa_version) 920 { 921 spmd_spm_core_context_t *ctx = spmd_get_context(); 922 bool secure_origin; 923 int ret; 924 uint32_t input_version; 925 926 /* Determine which security state this SMC originated from */ 927 secure_origin = is_caller_secure(flags); 928 929 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 930 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n", 931 plat_my_core_pos(), smc_fid, x1, x2, x3, x4, 932 SMC_GET_GP(handle, CTX_GPREG_X5), 933 SMC_GET_GP(handle, CTX_GPREG_X6), 934 SMC_GET_GP(handle, CTX_GPREG_X7)); 935 936 /* 937 * If there is an on-going info regs from EL3 SPMD LP, unconditionally 938 * return, we don't expect any other FF-A ABIs to be called between 939 * calls to FFA_PARTITION_INFO_GET_REGS. 940 */ 941 if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) { 942 assert(secure_origin); 943 spmd_spm_core_sync_exit(0ULL); 944 } 945 946 if ((!secure_origin) && (smc_fid != FFA_VERSION)) { 947 /* 948 * Once the caller invokes any FF-A ABI other than FFA_VERSION, 949 * the version negotiation phase is complete. 950 */ 951 nonsecure_version_negotiated = true; 952 } 953 954 switch (smc_fid) { 955 case FFA_ERROR: 956 /* 957 * Check if this is the first invocation of this interface on 958 * this CPU. If so, then indicate that the SPM Core initialised 959 * unsuccessfully. 960 */ 961 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 962 spmd_spm_core_sync_exit(x2); 963 } 964 965 /* 966 * Perform a synchronous exit: 967 * 1. If there was an SPMD logical partition direct request on-going, 968 * return back to the SPMD logical partition so the error can be 969 * consumed. 970 * 2. SPMC sent FFA_ERROR in response to a power management 971 * operation sent through direct request. 972 */ 973 if (is_spmd_logical_sp_dir_req_in_progress(ctx) || 974 ctx->psci_operation_ongoing) { 975 assert(secure_origin); 976 spmd_spm_core_sync_exit(0ULL); 977 } 978 979 return spmd_smc_forward(smc_fid, secure_origin, 980 x1, x2, x3, x4, cookie, 981 handle, flags, secure_ffa_version); 982 break; /* not reached */ 983 984 case FFA_VERSION: 985 input_version = (uint32_t)(0xFFFFFFFF & x1); 986 /* 987 * If caller is secure and SPMC was initialized, 988 * return FFA_VERSION of SPMD. 989 * If caller is non secure and SPMC was initialized, 990 * forward to the EL3 SPMC if enabled, otherwise send a 991 * framework message to the SPMC at the lower EL to 992 * negotiate a version that is compatible between the 993 * normal world and the SPMC. 994 * Sanity check to "input_version". 995 * If the EL3 SPMC is enabled, ignore the SPMC state as 996 * this is not used. 997 */ 998 if ((input_version & FFA_VERSION_BIT31_MASK) || 999 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) { 1000 ret = FFA_ERROR_NOT_SUPPORTED; 1001 } else if (!secure_origin) { 1002 if (!nonsecure_version_negotiated) { 1003 /* 1004 * Once an FF-A version has been negotiated 1005 * between a caller and a callee, the version 1006 * may not be changed for the lifetime of 1007 * the calling component. 1008 */ 1009 nonsecure_ffa_version = input_version; 1010 } 1011 1012 if (is_spmc_at_el3()) { 1013 /* 1014 * Forward the call directly to the EL3 SPMC, if 1015 * enabled, as we don't need to wrap the call in 1016 * a direct request. 1017 */ 1018 spmc_nwd_ffa_version = 1019 MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 1020 return spmc_smc_handler(smc_fid, secure_origin, 1021 x1, x2, x3, x4, cookie, 1022 handle, flags); 1023 } 1024 1025 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 1026 uint64_t rc; 1027 1028 if (spmc_attrs.major_version == 1 && 1029 spmc_attrs.minor_version == 0) { 1030 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, 1031 spmc_attrs.minor_version); 1032 spmc_nwd_ffa_version = (uint32_t)ret; 1033 SMC_RET8(handle, (uint32_t)ret, 1034 FFA_TARGET_INFO_MBZ, 1035 FFA_TARGET_INFO_MBZ, 1036 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1037 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1038 FFA_PARAM_MBZ); 1039 break; 1040 } 1041 /* Save non-secure system registers context */ 1042 #if SPMD_SPM_AT_SEL2 1043 cm_el2_sysregs_context_save(NON_SECURE); 1044 #else 1045 cm_el1_sysregs_context_save(NON_SECURE); 1046 #endif 1047 1048 /* 1049 * The incoming request has FFA_VERSION as X0 smc_fid 1050 * and requested version in x1. Prepare a direct request 1051 * from SPMD to SPMC with FFA_VERSION framework function 1052 * identifier in X2 and requested version in X3. 1053 */ 1054 spmd_build_spmc_message(gpregs, 1055 SPMD_FWK_MSG_FFA_VERSION_REQ, 1056 input_version); 1057 1058 /* 1059 * Ensure x8-x17 NS GP register values are untouched when returning 1060 * from the SPMC. 1061 */ 1062 write_ctx_reg(gpregs, CTX_GPREG_X8, SMC_GET_GP(handle, CTX_GPREG_X8)); 1063 write_ctx_reg(gpregs, CTX_GPREG_X9, SMC_GET_GP(handle, CTX_GPREG_X9)); 1064 write_ctx_reg(gpregs, CTX_GPREG_X10, SMC_GET_GP(handle, CTX_GPREG_X10)); 1065 write_ctx_reg(gpregs, CTX_GPREG_X11, SMC_GET_GP(handle, CTX_GPREG_X11)); 1066 write_ctx_reg(gpregs, CTX_GPREG_X12, SMC_GET_GP(handle, CTX_GPREG_X12)); 1067 write_ctx_reg(gpregs, CTX_GPREG_X13, SMC_GET_GP(handle, CTX_GPREG_X13)); 1068 write_ctx_reg(gpregs, CTX_GPREG_X14, SMC_GET_GP(handle, CTX_GPREG_X14)); 1069 write_ctx_reg(gpregs, CTX_GPREG_X15, SMC_GET_GP(handle, CTX_GPREG_X15)); 1070 write_ctx_reg(gpregs, CTX_GPREG_X16, SMC_GET_GP(handle, CTX_GPREG_X16)); 1071 write_ctx_reg(gpregs, CTX_GPREG_X17, SMC_GET_GP(handle, CTX_GPREG_X17)); 1072 1073 rc = spmd_spm_core_sync_entry(ctx); 1074 1075 if ((rc != 0ULL) || 1076 (SMC_GET_GP(gpregs, CTX_GPREG_X0) != 1077 FFA_MSG_SEND_DIRECT_RESP_SMC32) || 1078 (SMC_GET_GP(gpregs, CTX_GPREG_X2) != 1079 (FFA_FWK_MSG_BIT | 1080 SPMD_FWK_MSG_FFA_VERSION_RESP))) { 1081 ERROR("Failed to forward FFA_VERSION\n"); 1082 ret = FFA_ERROR_NOT_SUPPORTED; 1083 } else { 1084 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3); 1085 spmc_nwd_ffa_version = (uint32_t)ret; 1086 } 1087 1088 /* 1089 * x0-x4 are updated by spmd_smc_forward below. 1090 * Zero out x5-x7 in the FFA_VERSION response. 1091 */ 1092 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 1093 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 1094 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 1095 1096 /* 1097 * Return here after SPMC has handled FFA_VERSION. 1098 * The returned SPMC version is held in X3. 1099 * Forward this version in X0 to the non-secure caller. 1100 */ 1101 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ, 1102 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1103 FFA_PARAM_MBZ, cookie, gpregs, 1104 flags, spmc_nwd_ffa_version); 1105 } else { 1106 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 1107 FFA_VERSION_MINOR); 1108 } 1109 1110 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ, 1111 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1112 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 1113 break; /* not reached */ 1114 1115 case FFA_FEATURES: 1116 /* 1117 * This is an optional interface. Do the minimal checks and 1118 * forward to SPM Core which will handle it if implemented. 1119 */ 1120 1121 /* Forward SMC from Normal world to the SPM Core */ 1122 if (!secure_origin) { 1123 return spmd_smc_forward(smc_fid, secure_origin, 1124 x1, x2, x3, x4, cookie, 1125 handle, flags, secure_ffa_version); 1126 } 1127 1128 /* 1129 * Return success if call was from secure world i.e. all 1130 * FFA functions are supported. This is essentially a 1131 * nop. 1132 */ 1133 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 1134 SMC_GET_GP(handle, CTX_GPREG_X5), 1135 SMC_GET_GP(handle, CTX_GPREG_X6), 1136 SMC_GET_GP(handle, CTX_GPREG_X7)); 1137 1138 break; /* not reached */ 1139 1140 case FFA_ID_GET: 1141 /* 1142 * Returns the ID of the calling FFA component. 1143 */ 1144 if (!secure_origin) { 1145 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1146 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 1147 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1148 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1149 FFA_PARAM_MBZ); 1150 } 1151 1152 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1153 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 1154 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1155 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1156 FFA_PARAM_MBZ); 1157 1158 break; /* not reached */ 1159 1160 case FFA_SECONDARY_EP_REGISTER_SMC64: 1161 if (secure_origin) { 1162 ret = spmd_pm_secondary_ep_register(x1); 1163 1164 if (ret < 0) { 1165 SMC_RET8(handle, FFA_ERROR_SMC64, 1166 FFA_TARGET_INFO_MBZ, ret, 1167 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1168 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1169 FFA_PARAM_MBZ); 1170 } else { 1171 SMC_RET8(handle, FFA_SUCCESS_SMC64, 1172 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, 1173 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1174 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1175 FFA_PARAM_MBZ); 1176 } 1177 } 1178 1179 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1180 break; /* Not reached */ 1181 1182 case FFA_SPM_ID_GET: 1183 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) { 1184 return spmd_ffa_error_return(handle, 1185 FFA_ERROR_NOT_SUPPORTED); 1186 } 1187 /* 1188 * Returns the ID of the SPMC or SPMD depending on the FF-A 1189 * instance where this function is invoked 1190 */ 1191 if (!secure_origin) { 1192 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1193 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 1194 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1195 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1196 FFA_PARAM_MBZ); 1197 } 1198 SMC_RET8(handle, FFA_SUCCESS_SMC32, 1199 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID, 1200 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1201 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 1202 FFA_PARAM_MBZ); 1203 1204 break; /* not reached */ 1205 1206 case FFA_MSG_SEND_DIRECT_REQ2_SMC64: 1207 if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) { 1208 /* Call not supported at this version */ 1209 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1210 } 1211 /* fallthrough */ 1212 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 1213 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 1214 /* 1215 * Regardless of secure_origin, SPMD logical partitions cannot 1216 * handle direct messages. They can only initiate direct 1217 * messages and consume direct responses or errors. 1218 */ 1219 if (is_spmd_lp_id(ffa_endpoint_source(x1)) || 1220 is_spmd_lp_id(ffa_endpoint_destination(x1))) { 1221 return spmd_ffa_error_return(handle, 1222 FFA_ERROR_INVALID_PARAMETER 1223 ); 1224 } 1225 1226 /* 1227 * When there is an ongoing SPMD logical partition direct 1228 * request, there cannot be another direct request. Return 1229 * error in this case. Panic'ing is an option but that does 1230 * not provide the opportunity for caller to abort based on 1231 * error codes. 1232 */ 1233 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1234 assert(secure_origin); 1235 return spmd_ffa_error_return(handle, 1236 FFA_ERROR_DENIED); 1237 } 1238 1239 if (!secure_origin) { 1240 /* Validate source endpoint is non-secure for non-secure caller. */ 1241 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) { 1242 return spmd_ffa_error_return(handle, 1243 FFA_ERROR_INVALID_PARAMETER); 1244 } 1245 } 1246 if (secure_origin && spmd_is_spmc_message(x1)) { 1247 return spmd_ffa_error_return(handle, 1248 FFA_ERROR_DENIED); 1249 } else { 1250 /* Forward direct message to the other world */ 1251 return spmd_smc_forward(smc_fid, secure_origin, 1252 x1, x2, x3, x4, cookie, 1253 handle, flags, secure_ffa_version); 1254 } 1255 break; /* Not reached */ 1256 1257 case FFA_MSG_SEND_DIRECT_RESP2_SMC64: 1258 if (get_common_ffa_version(secure_ffa_version) < MAKE_FFA_VERSION(U(1), U(2))) { 1259 /* Call not supported at this version */ 1260 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1261 } 1262 /* fallthrough */ 1263 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 1264 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1265 if (secure_origin && (spmd_is_spmc_message(x1) || 1266 is_spmd_logical_sp_dir_req_in_progress(ctx))) { 1267 spmd_spm_core_sync_exit(0ULL); 1268 } else { 1269 /* Forward direct message to the other world */ 1270 return spmd_smc_forward(smc_fid, secure_origin, 1271 x1, x2, x3, x4, cookie, 1272 handle, flags, secure_ffa_version); 1273 } 1274 break; /* Not reached */ 1275 case FFA_RX_RELEASE: 1276 case FFA_RXTX_MAP_SMC32: 1277 case FFA_RXTX_MAP_SMC64: 1278 case FFA_RXTX_UNMAP: 1279 case FFA_PARTITION_INFO_GET: 1280 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 1281 case FFA_NOTIFICATION_BITMAP_CREATE: 1282 case FFA_NOTIFICATION_BITMAP_DESTROY: 1283 case FFA_NOTIFICATION_BIND: 1284 case FFA_NOTIFICATION_UNBIND: 1285 case FFA_NOTIFICATION_SET: 1286 case FFA_NOTIFICATION_GET: 1287 case FFA_NOTIFICATION_INFO_GET: 1288 case FFA_NOTIFICATION_INFO_GET_SMC64: 1289 case FFA_MSG_SEND2: 1290 case FFA_RX_ACQUIRE: 1291 case FFA_NS_RES_INFO_GET_SMC64: 1292 #endif 1293 case FFA_MSG_RUN: 1294 /* 1295 * Above calls should be invoked only by the Normal world and 1296 * must not be forwarded from Secure world to Normal world. 1297 */ 1298 if (secure_origin) { 1299 return spmd_ffa_error_return(handle, 1300 FFA_ERROR_NOT_SUPPORTED); 1301 } 1302 1303 /* Forward the call to the other world */ 1304 /* fallthrough */ 1305 case FFA_MSG_SEND: 1306 case FFA_MEM_DONATE_SMC32: 1307 case FFA_MEM_DONATE_SMC64: 1308 case FFA_MEM_LEND_SMC32: 1309 case FFA_MEM_LEND_SMC64: 1310 case FFA_MEM_SHARE_SMC32: 1311 case FFA_MEM_SHARE_SMC64: 1312 case FFA_MEM_RETRIEVE_REQ_SMC32: 1313 case FFA_MEM_RETRIEVE_REQ_SMC64: 1314 case FFA_MEM_RETRIEVE_RESP: 1315 case FFA_MEM_RELINQUISH: 1316 case FFA_MEM_RECLAIM: 1317 case FFA_MEM_FRAG_TX: 1318 case FFA_MEM_FRAG_RX: 1319 case FFA_SUCCESS_SMC32: 1320 case FFA_SUCCESS_SMC64: 1321 /* 1322 * If there is an ongoing direct request from an SPMD logical 1323 * partition, return an error. 1324 */ 1325 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1326 assert(secure_origin); 1327 return spmd_ffa_error_return(handle, 1328 FFA_ERROR_DENIED); 1329 } 1330 1331 return spmd_smc_forward(smc_fid, secure_origin, 1332 x1, x2, x3, x4, cookie, 1333 handle, flags, secure_ffa_version); 1334 break; /* not reached */ 1335 1336 case FFA_MSG_WAIT: 1337 /* 1338 * Check if this is the first invocation of this interface on 1339 * this CPU from the Secure world. If so, then indicate that the 1340 * SPM Core initialised successfully. 1341 */ 1342 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 1343 spmd_spm_core_sync_exit(0ULL); 1344 } 1345 1346 /* Forward the call to the other world */ 1347 /* fallthrough */ 1348 case FFA_INTERRUPT: 1349 case FFA_MSG_YIELD: 1350 /* This interface must be invoked only by the Secure world */ 1351 if (!secure_origin) { 1352 return spmd_ffa_error_return(handle, 1353 FFA_ERROR_NOT_SUPPORTED); 1354 } 1355 1356 if (is_spmd_logical_sp_dir_req_in_progress(ctx)) { 1357 assert(secure_origin); 1358 return spmd_ffa_error_return(handle, 1359 FFA_ERROR_DENIED); 1360 } 1361 1362 return spmd_smc_forward(smc_fid, secure_origin, 1363 x1, x2, x3, x4, cookie, 1364 handle, flags, secure_ffa_version); 1365 break; /* not reached */ 1366 1367 case FFA_NORMAL_WORLD_RESUME: 1368 if (secure_origin && ctx->secure_interrupt_ongoing) { 1369 spmd_spm_core_sync_exit(0ULL); 1370 } else { 1371 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED); 1372 } 1373 break; /* Not reached */ 1374 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 1375 case FFA_PARTITION_INFO_GET_REGS_SMC64: 1376 if (secure_origin) { 1377 return spmd_el3_populate_logical_partition_info(handle, x1, 1378 x2, x3); 1379 } 1380 1381 /* Call only supported with SMCCC 1.2+ */ 1382 if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) { 1383 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1384 } 1385 1386 return spmd_smc_forward(smc_fid, secure_origin, 1387 x1, x2, x3, x4, cookie, 1388 handle, flags, secure_ffa_version); 1389 break; /* Not reached */ 1390 #endif 1391 case FFA_CONSOLE_LOG_SMC32: 1392 case FFA_CONSOLE_LOG_SMC64: 1393 /* This interface must not be forwarded to other worlds. */ 1394 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1395 break; /* not reached */ 1396 1397 case FFA_EL3_INTR_HANDLE: 1398 if (secure_origin) { 1399 return spmd_handle_group0_intr_swd(handle); 1400 } else { 1401 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1402 } 1403 case FFA_ABORT_SMC32: 1404 case FFA_ABORT_SMC64: 1405 /* This interface must be invoked only by the Secure world */ 1406 if (!secure_origin) { 1407 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1408 } 1409 1410 ERROR("SPMC encountered a fatal error. Aborting now\n"); 1411 panic(); 1412 1413 /* Not reached. */ 1414 SMC_RET0(handle); 1415 default: 1416 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 1417 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1418 } 1419 } 1420