1 /* 2 * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch/aarch64/arch_features.h> 15 #include <bl31/bl31.h> 16 #include <bl31/interrupt_mgmt.h> 17 #include <common/debug.h> 18 #include <common/runtime_svc.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/smccc.h> 21 #include <lib/spinlock.h> 22 #include <lib/utils.h> 23 #include <plat/common/common_def.h> 24 #include <plat/common/platform.h> 25 #include <platform_def.h> 26 #include <services/ffa_svc.h> 27 #include <services/spmc_svc.h> 28 #include <services/spmd_svc.h> 29 #include <smccc_helpers.h> 30 #include "spmd_private.h" 31 32 /******************************************************************************* 33 * SPM Core context information. 34 ******************************************************************************/ 35 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 36 37 /******************************************************************************* 38 * SPM Core attribute information is read from its manifest if the SPMC is not 39 * at EL3. Else, it is populated from the SPMC directly. 40 ******************************************************************************/ 41 static spmc_manifest_attribute_t spmc_attrs; 42 43 /******************************************************************************* 44 * SPM Core entry point information. Discovered on the primary core and reused 45 * on secondary cores. 46 ******************************************************************************/ 47 static entry_point_info_t *spmc_ep_info; 48 49 /******************************************************************************* 50 * SPM Core context on CPU based on mpidr. 51 ******************************************************************************/ 52 spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr) 53 { 54 int core_idx = plat_core_pos_by_mpidr(mpidr); 55 56 if (core_idx < 0) { 57 ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx); 58 panic(); 59 } 60 61 return &spm_core_context[core_idx]; 62 } 63 64 /******************************************************************************* 65 * SPM Core context on current CPU get helper. 66 ******************************************************************************/ 67 spmd_spm_core_context_t *spmd_get_context(void) 68 { 69 return spmd_get_context_by_mpidr(read_mpidr()); 70 } 71 72 /******************************************************************************* 73 * SPM Core ID getter. 74 ******************************************************************************/ 75 uint16_t spmd_spmc_id_get(void) 76 { 77 return spmc_attrs.spmc_id; 78 } 79 80 /******************************************************************************* 81 * Static function declaration. 82 ******************************************************************************/ 83 static int32_t spmd_init(void); 84 static int spmd_spmc_init(void *pm_addr); 85 static uint64_t spmd_ffa_error_return(void *handle, 86 int error_code); 87 static uint64_t spmd_smc_forward(uint32_t smc_fid, 88 bool secure_origin, 89 uint64_t x1, 90 uint64_t x2, 91 uint64_t x3, 92 uint64_t x4, 93 void *cookie, 94 void *handle, 95 uint64_t flags); 96 97 /****************************************************************************** 98 * Builds an SPMD to SPMC direct message request. 99 *****************************************************************************/ 100 void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func, 101 unsigned long long message) 102 { 103 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32); 104 write_ctx_reg(gpregs, CTX_GPREG_X1, 105 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) | 106 spmd_spmc_id_get()); 107 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func); 108 write_ctx_reg(gpregs, CTX_GPREG_X3, message); 109 } 110 111 112 /******************************************************************************* 113 * This function takes an SPMC context pointer and performs a synchronous 114 * SPMC entry. 115 ******************************************************************************/ 116 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 117 { 118 uint64_t rc; 119 120 assert(spmc_ctx != NULL); 121 122 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 123 124 /* Restore the context assigned above */ 125 #if SPMD_SPM_AT_SEL2 126 cm_el2_sysregs_context_restore(SECURE); 127 #else 128 cm_el1_sysregs_context_restore(SECURE); 129 #endif 130 cm_set_next_eret_context(SECURE); 131 132 /* Enter SPMC */ 133 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 134 135 /* Save secure state */ 136 #if SPMD_SPM_AT_SEL2 137 cm_el2_sysregs_context_save(SECURE); 138 #else 139 cm_el1_sysregs_context_save(SECURE); 140 #endif 141 142 return rc; 143 } 144 145 /******************************************************************************* 146 * This function returns to the place where spmd_spm_core_sync_entry() was 147 * called originally. 148 ******************************************************************************/ 149 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 150 { 151 spmd_spm_core_context_t *ctx = spmd_get_context(); 152 153 /* Get current CPU context from SPMC context */ 154 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 155 156 /* 157 * The SPMD must have initiated the original request through a 158 * synchronous entry into SPMC. Jump back to the original C runtime 159 * context with the value of rc in x0; 160 */ 161 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 162 163 panic(); 164 } 165 166 /******************************************************************************* 167 * Jump to the SPM Core for the first time. 168 ******************************************************************************/ 169 static int32_t spmd_init(void) 170 { 171 spmd_spm_core_context_t *ctx = spmd_get_context(); 172 uint64_t rc; 173 174 VERBOSE("SPM Core init start.\n"); 175 176 /* Primary boot core enters the SPMC for initialization. */ 177 ctx->state = SPMC_STATE_ON_PENDING; 178 179 rc = spmd_spm_core_sync_entry(ctx); 180 if (rc != 0ULL) { 181 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc); 182 return 0; 183 } 184 185 ctx->state = SPMC_STATE_ON; 186 187 VERBOSE("SPM Core init end.\n"); 188 189 return 1; 190 } 191 192 /******************************************************************************* 193 * spmd_secure_interrupt_handler 194 * Enter the SPMC for further handling of the secure interrupt by the SPMC 195 * itself or a Secure Partition. 196 ******************************************************************************/ 197 static uint64_t spmd_secure_interrupt_handler(uint32_t id, 198 uint32_t flags, 199 void *handle, 200 void *cookie) 201 { 202 spmd_spm_core_context_t *ctx = spmd_get_context(); 203 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 204 unsigned int linear_id = plat_my_core_pos(); 205 int64_t rc; 206 207 /* Sanity check the security state when the exception was generated */ 208 assert(get_interrupt_src_ss(flags) == NON_SECURE); 209 210 /* Sanity check the pointer to this cpu's context */ 211 assert(handle == cm_get_context(NON_SECURE)); 212 213 /* Save the non-secure context before entering SPMC */ 214 cm_el1_sysregs_context_save(NON_SECURE); 215 #if SPMD_SPM_AT_SEL2 216 cm_el2_sysregs_context_save(NON_SECURE); 217 #endif 218 219 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */ 220 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT); 221 write_ctx_reg(gpregs, CTX_GPREG_X1, 0); 222 write_ctx_reg(gpregs, CTX_GPREG_X2, 0); 223 write_ctx_reg(gpregs, CTX_GPREG_X3, 0); 224 write_ctx_reg(gpregs, CTX_GPREG_X4, 0); 225 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 226 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 227 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 228 229 /* Mark current core as handling a secure interrupt. */ 230 ctx->secure_interrupt_ongoing = true; 231 232 rc = spmd_spm_core_sync_entry(ctx); 233 if (rc != 0ULL) { 234 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id); 235 } 236 237 ctx->secure_interrupt_ongoing = false; 238 239 cm_el1_sysregs_context_restore(NON_SECURE); 240 #if SPMD_SPM_AT_SEL2 241 cm_el2_sysregs_context_restore(NON_SECURE); 242 #endif 243 cm_set_next_eret_context(NON_SECURE); 244 245 SMC_RET0(&ctx->cpu_ctx); 246 } 247 248 /******************************************************************************* 249 * Loads SPMC manifest and inits SPMC. 250 ******************************************************************************/ 251 static int spmd_spmc_init(void *pm_addr) 252 { 253 cpu_context_t *cpu_ctx; 254 unsigned int core_id; 255 uint32_t ep_attr, flags; 256 int rc; 257 258 /* Load the SPM Core manifest */ 259 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 260 if (rc != 0) { 261 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 262 return rc; 263 } 264 265 /* 266 * Ensure that the SPM Core version is compatible with the SPM 267 * Dispatcher version. 268 */ 269 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 270 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 271 WARN("Unsupported FFA version (%u.%u)\n", 272 spmc_attrs.major_version, spmc_attrs.minor_version); 273 return -EINVAL; 274 } 275 276 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 277 spmc_attrs.minor_version); 278 279 VERBOSE("SPM Core run time EL%x.\n", 280 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 281 282 /* Validate the SPMC ID, Ensure high bit is set */ 283 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 284 SPMC_SECURE_ID_MASK) == 0U) { 285 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 286 return -EINVAL; 287 } 288 289 /* Validate the SPM Core execution state */ 290 if ((spmc_attrs.exec_state != MODE_RW_64) && 291 (spmc_attrs.exec_state != MODE_RW_32)) { 292 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 293 spmc_attrs.exec_state); 294 return -EINVAL; 295 } 296 297 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 298 spmc_attrs.exec_state); 299 300 #if SPMD_SPM_AT_SEL2 301 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 302 if (spmc_attrs.exec_state == MODE_RW_32) { 303 WARN("AArch32 state at S-EL2 is not supported.\n"); 304 return -EINVAL; 305 } 306 307 /* 308 * Check if S-EL2 is supported on this system if S-EL2 309 * is required for SPM 310 */ 311 if (!is_armv8_4_sel2_present()) { 312 WARN("SPM Core run time S-EL2 is not supported.\n"); 313 return -EINVAL; 314 } 315 #endif /* SPMD_SPM_AT_SEL2 */ 316 317 /* Initialise an entrypoint to set up the CPU context */ 318 ep_attr = SECURE | EP_ST_ENABLE; 319 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 320 ep_attr |= EP_EE_BIG; 321 } 322 323 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 324 325 /* 326 * Populate SPSR for SPM Core based upon validated parameters from the 327 * manifest. 328 */ 329 if (spmc_attrs.exec_state == MODE_RW_32) { 330 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 331 SPSR_E_LITTLE, 332 DAIF_FIQ_BIT | 333 DAIF_IRQ_BIT | 334 DAIF_ABT_BIT); 335 } else { 336 337 #if SPMD_SPM_AT_SEL2 338 static const uint32_t runtime_el = MODE_EL2; 339 #else 340 static const uint32_t runtime_el = MODE_EL1; 341 #endif 342 spmc_ep_info->spsr = SPSR_64(runtime_el, 343 MODE_SP_ELX, 344 DISABLE_ALL_EXCEPTIONS); 345 } 346 347 /* Set an initial SPMC context state for all cores. */ 348 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) { 349 spm_core_context[core_id].state = SPMC_STATE_OFF; 350 351 /* Setup an initial cpu context for the SPMC. */ 352 cpu_ctx = &spm_core_context[core_id].cpu_ctx; 353 cm_setup_context(cpu_ctx, spmc_ep_info); 354 355 /* 356 * Pass the core linear ID to the SPMC through x4. 357 * (TF-A implementation defined behavior helping 358 * a legacy TOS migration to adopt FF-A). 359 */ 360 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id); 361 } 362 363 /* Register power management hooks with PSCI */ 364 psci_register_spd_pm_hook(&spmd_pm); 365 366 /* Register init function for deferred init. */ 367 bl31_register_bl32_init(&spmd_init); 368 369 INFO("SPM Core setup done.\n"); 370 371 /* 372 * Register an interrupt handler routing secure interrupts to SPMD 373 * while the NWd is running. 374 */ 375 flags = 0; 376 set_interrupt_rm_flag(flags, NON_SECURE); 377 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 378 spmd_secure_interrupt_handler, 379 flags); 380 if (rc != 0) { 381 panic(); 382 } 383 384 return 0; 385 } 386 387 /******************************************************************************* 388 * Initialize context of SPM Core. 389 ******************************************************************************/ 390 int spmd_setup(void) 391 { 392 int rc; 393 void *spmc_manifest; 394 395 /* 396 * If the SPMC is at EL3, then just initialise it directly. The 397 * shenanigans of when it is at a lower EL are not needed. 398 */ 399 if (is_spmc_at_el3()) { 400 /* Allow the SPMC to populate its attributes directly. */ 401 spmc_populate_attrs(&spmc_attrs); 402 403 rc = spmc_setup(); 404 if (rc != 0) { 405 ERROR("SPMC initialisation failed 0x%x.\n", rc); 406 } 407 return rc; 408 } 409 410 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 411 if (spmc_ep_info == NULL) { 412 WARN("No SPM Core image provided by BL2 boot loader.\n"); 413 return -EINVAL; 414 } 415 416 /* Under no circumstances will this parameter be 0 */ 417 assert(spmc_ep_info->pc != 0ULL); 418 419 /* 420 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 421 * be used as a manifest for the SPM Core at the next lower EL/mode. 422 */ 423 spmc_manifest = (void *)spmc_ep_info->args.arg0; 424 if (spmc_manifest == NULL) { 425 ERROR("Invalid or absent SPM Core manifest.\n"); 426 return -EINVAL; 427 } 428 429 /* Load manifest, init SPMC */ 430 rc = spmd_spmc_init(spmc_manifest); 431 if (rc != 0) { 432 WARN("Booting device without SPM initialization.\n"); 433 } 434 435 return rc; 436 } 437 438 /******************************************************************************* 439 * Forward FF-A SMCs to the other security state. 440 ******************************************************************************/ 441 uint64_t spmd_smc_switch_state(uint32_t smc_fid, 442 bool secure_origin, 443 uint64_t x1, 444 uint64_t x2, 445 uint64_t x3, 446 uint64_t x4, 447 void *handle) 448 { 449 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 450 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 451 452 /* Save incoming security state */ 453 #if SPMD_SPM_AT_SEL2 454 if (secure_state_in == NON_SECURE) { 455 cm_el1_sysregs_context_save(secure_state_in); 456 } 457 cm_el2_sysregs_context_save(secure_state_in); 458 #else 459 cm_el1_sysregs_context_save(secure_state_in); 460 #endif 461 462 /* Restore outgoing security state */ 463 #if SPMD_SPM_AT_SEL2 464 if (secure_state_out == NON_SECURE) { 465 cm_el1_sysregs_context_restore(secure_state_out); 466 } 467 cm_el2_sysregs_context_restore(secure_state_out); 468 #else 469 cm_el1_sysregs_context_restore(secure_state_out); 470 #endif 471 cm_set_next_eret_context(secure_state_out); 472 473 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 474 SMC_GET_GP(handle, CTX_GPREG_X5), 475 SMC_GET_GP(handle, CTX_GPREG_X6), 476 SMC_GET_GP(handle, CTX_GPREG_X7)); 477 } 478 479 /******************************************************************************* 480 * Forward SMCs to the other security state. 481 ******************************************************************************/ 482 static uint64_t spmd_smc_forward(uint32_t smc_fid, 483 bool secure_origin, 484 uint64_t x1, 485 uint64_t x2, 486 uint64_t x3, 487 uint64_t x4, 488 void *cookie, 489 void *handle, 490 uint64_t flags) 491 { 492 if (is_spmc_at_el3() && !secure_origin) { 493 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4, 494 cookie, handle, flags); 495 } 496 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4, 497 handle); 498 499 } 500 501 /******************************************************************************* 502 * Return FFA_ERROR with specified error code 503 ******************************************************************************/ 504 static uint64_t spmd_ffa_error_return(void *handle, int error_code) 505 { 506 SMC_RET8(handle, (uint32_t) FFA_ERROR, 507 FFA_TARGET_INFO_MBZ, (uint32_t)error_code, 508 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 509 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 510 } 511 512 /******************************************************************************* 513 * spmd_check_address_in_binary_image 514 ******************************************************************************/ 515 bool spmd_check_address_in_binary_image(uint64_t address) 516 { 517 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 518 519 return ((address >= spmc_attrs.load_address) && 520 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 521 } 522 523 /****************************************************************************** 524 * spmd_is_spmc_message 525 *****************************************************************************/ 526 static bool spmd_is_spmc_message(unsigned int ep) 527 { 528 if (is_spmc_at_el3()) { 529 return false; 530 } 531 532 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 533 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 534 } 535 536 /****************************************************************************** 537 * spmd_handle_spmc_message 538 *****************************************************************************/ 539 static int spmd_handle_spmc_message(unsigned long long msg, 540 unsigned long long parm1, unsigned long long parm2, 541 unsigned long long parm3, unsigned long long parm4) 542 { 543 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__, 544 msg, parm1, parm2, parm3, parm4); 545 546 return -EINVAL; 547 } 548 549 /******************************************************************************* 550 * This function forwards FF-A SMCs to either the main SPMD handler or the 551 * SPMC at EL3, depending on the origin security state, if enabled. 552 ******************************************************************************/ 553 uint64_t spmd_ffa_smc_handler(uint32_t smc_fid, 554 uint64_t x1, 555 uint64_t x2, 556 uint64_t x3, 557 uint64_t x4, 558 void *cookie, 559 void *handle, 560 uint64_t flags) 561 { 562 if (is_spmc_at_el3()) { 563 /* 564 * If we have an SPMC at EL3 allow handling of the SMC first. 565 * The SPMC will call back through to SPMD handler if required. 566 */ 567 if (is_caller_secure(flags)) { 568 return spmc_smc_handler(smc_fid, 569 is_caller_secure(flags), 570 x1, x2, x3, x4, cookie, 571 handle, flags); 572 } 573 } 574 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie, 575 handle, flags); 576 } 577 578 /******************************************************************************* 579 * This function handles all SMCs in the range reserved for FFA. Each call is 580 * either forwarded to the other security state or handled by the SPM dispatcher 581 ******************************************************************************/ 582 uint64_t spmd_smc_handler(uint32_t smc_fid, 583 uint64_t x1, 584 uint64_t x2, 585 uint64_t x3, 586 uint64_t x4, 587 void *cookie, 588 void *handle, 589 uint64_t flags) 590 { 591 unsigned int linear_id = plat_my_core_pos(); 592 spmd_spm_core_context_t *ctx = spmd_get_context(); 593 bool secure_origin; 594 int32_t ret; 595 uint32_t input_version; 596 597 /* Determine which security state this SMC originated from */ 598 secure_origin = is_caller_secure(flags); 599 600 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 601 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n", 602 linear_id, smc_fid, x1, x2, x3, x4, 603 SMC_GET_GP(handle, CTX_GPREG_X5), 604 SMC_GET_GP(handle, CTX_GPREG_X6), 605 SMC_GET_GP(handle, CTX_GPREG_X7)); 606 607 switch (smc_fid) { 608 case FFA_ERROR: 609 /* 610 * Check if this is the first invocation of this interface on 611 * this CPU. If so, then indicate that the SPM Core initialised 612 * unsuccessfully. 613 */ 614 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 615 spmd_spm_core_sync_exit(x2); 616 } 617 618 return spmd_smc_forward(smc_fid, secure_origin, 619 x1, x2, x3, x4, cookie, 620 handle, flags); 621 break; /* not reached */ 622 623 case FFA_VERSION: 624 input_version = (uint32_t)(0xFFFFFFFF & x1); 625 /* 626 * If caller is secure and SPMC was initialized, 627 * return FFA_VERSION of SPMD. 628 * If caller is non secure and SPMC was initialized, 629 * forward to the EL3 SPMC if enabled, otherwise return 630 * the SPMC version if implemented at a lower EL. 631 * Sanity check to "input_version". 632 * If the EL3 SPMC is enabled, ignore the SPMC state as 633 * this is not used. 634 */ 635 if ((input_version & FFA_VERSION_BIT31_MASK) || 636 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) { 637 ret = FFA_ERROR_NOT_SUPPORTED; 638 } else if (!secure_origin) { 639 if (is_spmc_at_el3()) { 640 /* 641 * Forward the call directly to the EL3 SPMC, if 642 * enabled, as we don't need to wrap the call in 643 * a direct request. 644 */ 645 return spmd_smc_forward(smc_fid, secure_origin, 646 x1, x2, x3, x4, cookie, 647 handle, flags); 648 } 649 650 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 651 uint64_t rc; 652 653 if (spmc_attrs.major_version == 1 && 654 spmc_attrs.minor_version == 0) { 655 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, 656 spmc_attrs.minor_version); 657 SMC_RET8(handle, (uint32_t)ret, 658 FFA_TARGET_INFO_MBZ, 659 FFA_TARGET_INFO_MBZ, 660 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 661 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 662 FFA_PARAM_MBZ); 663 break; 664 } 665 /* Save non-secure system registers context */ 666 cm_el1_sysregs_context_save(NON_SECURE); 667 #if SPMD_SPM_AT_SEL2 668 cm_el2_sysregs_context_save(NON_SECURE); 669 #endif 670 671 /* 672 * The incoming request has FFA_VERSION as X0 smc_fid 673 * and requested version in x1. Prepare a direct request 674 * from SPMD to SPMC with FFA_VERSION framework function 675 * identifier in X2 and requested version in X3. 676 */ 677 spmd_build_spmc_message(gpregs, 678 SPMD_FWK_MSG_FFA_VERSION_REQ, 679 input_version); 680 681 rc = spmd_spm_core_sync_entry(ctx); 682 683 if ((rc != 0ULL) || 684 (SMC_GET_GP(gpregs, CTX_GPREG_X0) != 685 FFA_MSG_SEND_DIRECT_RESP_SMC32) || 686 (SMC_GET_GP(gpregs, CTX_GPREG_X2) != 687 (FFA_FWK_MSG_BIT | 688 SPMD_FWK_MSG_FFA_VERSION_RESP))) { 689 ERROR("Failed to forward FFA_VERSION\n"); 690 ret = FFA_ERROR_NOT_SUPPORTED; 691 } else { 692 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3); 693 } 694 695 /* 696 * Return here after SPMC has handled FFA_VERSION. 697 * The returned SPMC version is held in X3. 698 * Forward this version in X0 to the non-secure caller. 699 */ 700 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ, 701 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 702 FFA_PARAM_MBZ, cookie, gpregs, 703 flags); 704 } else { 705 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 706 FFA_VERSION_MINOR); 707 } 708 709 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ, 710 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 711 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 712 break; /* not reached */ 713 714 case FFA_FEATURES: 715 /* 716 * This is an optional interface. Do the minimal checks and 717 * forward to SPM Core which will handle it if implemented. 718 */ 719 720 /* Forward SMC from Normal world to the SPM Core */ 721 if (!secure_origin) { 722 return spmd_smc_forward(smc_fid, secure_origin, 723 x1, x2, x3, x4, cookie, 724 handle, flags); 725 } 726 727 /* 728 * Return success if call was from secure world i.e. all 729 * FFA functions are supported. This is essentially a 730 * nop. 731 */ 732 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 733 SMC_GET_GP(handle, CTX_GPREG_X5), 734 SMC_GET_GP(handle, CTX_GPREG_X6), 735 SMC_GET_GP(handle, CTX_GPREG_X7)); 736 737 break; /* not reached */ 738 739 case FFA_ID_GET: 740 /* 741 * Returns the ID of the calling FFA component. 742 */ 743 if (!secure_origin) { 744 SMC_RET8(handle, FFA_SUCCESS_SMC32, 745 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 746 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 747 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 748 FFA_PARAM_MBZ); 749 } 750 751 SMC_RET8(handle, FFA_SUCCESS_SMC32, 752 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 753 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 754 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 755 FFA_PARAM_MBZ); 756 757 break; /* not reached */ 758 759 case FFA_SECONDARY_EP_REGISTER_SMC64: 760 if (secure_origin) { 761 ret = spmd_pm_secondary_ep_register(x1); 762 763 if (ret < 0) { 764 SMC_RET8(handle, FFA_ERROR_SMC64, 765 FFA_TARGET_INFO_MBZ, ret, 766 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 767 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 768 FFA_PARAM_MBZ); 769 } else { 770 SMC_RET8(handle, FFA_SUCCESS_SMC64, 771 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, 772 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 773 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 774 FFA_PARAM_MBZ); 775 } 776 } 777 778 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 779 break; /* Not reached */ 780 781 case FFA_SPM_ID_GET: 782 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) { 783 return spmd_ffa_error_return(handle, 784 FFA_ERROR_NOT_SUPPORTED); 785 } 786 /* 787 * Returns the ID of the SPMC or SPMD depending on the FF-A 788 * instance where this function is invoked 789 */ 790 if (!secure_origin) { 791 SMC_RET8(handle, FFA_SUCCESS_SMC32, 792 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 793 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 794 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 795 FFA_PARAM_MBZ); 796 } 797 SMC_RET8(handle, FFA_SUCCESS_SMC32, 798 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID, 799 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 800 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 801 FFA_PARAM_MBZ); 802 803 break; /* not reached */ 804 805 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 806 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 807 if (!secure_origin) { 808 /* Validate source endpoint is non-secure for non-secure caller. */ 809 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) { 810 return spmd_ffa_error_return(handle, 811 FFA_ERROR_INVALID_PARAMETER); 812 } 813 } 814 if (secure_origin && spmd_is_spmc_message(x1)) { 815 ret = spmd_handle_spmc_message(x3, x4, 816 SMC_GET_GP(handle, CTX_GPREG_X5), 817 SMC_GET_GP(handle, CTX_GPREG_X6), 818 SMC_GET_GP(handle, CTX_GPREG_X7)); 819 820 SMC_RET8(handle, FFA_SUCCESS_SMC32, 821 FFA_TARGET_INFO_MBZ, ret, 822 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 823 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 824 FFA_PARAM_MBZ); 825 } else { 826 /* Forward direct message to the other world */ 827 return spmd_smc_forward(smc_fid, secure_origin, 828 x1, x2, x3, x4, cookie, 829 handle, flags); 830 } 831 break; /* Not reached */ 832 833 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 834 if (secure_origin && spmd_is_spmc_message(x1)) { 835 spmd_spm_core_sync_exit(0ULL); 836 } else { 837 /* Forward direct message to the other world */ 838 return spmd_smc_forward(smc_fid, secure_origin, 839 x1, x2, x3, x4, cookie, 840 handle, flags); 841 } 842 break; /* Not reached */ 843 844 case FFA_RX_RELEASE: 845 case FFA_RXTX_MAP_SMC32: 846 case FFA_RXTX_MAP_SMC64: 847 case FFA_RXTX_UNMAP: 848 case FFA_PARTITION_INFO_GET: 849 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 850 case FFA_NOTIFICATION_BITMAP_CREATE: 851 case FFA_NOTIFICATION_BITMAP_DESTROY: 852 case FFA_NOTIFICATION_BIND: 853 case FFA_NOTIFICATION_UNBIND: 854 case FFA_NOTIFICATION_SET: 855 case FFA_NOTIFICATION_GET: 856 case FFA_NOTIFICATION_INFO_GET: 857 case FFA_NOTIFICATION_INFO_GET_SMC64: 858 case FFA_MSG_SEND2: 859 case FFA_RX_ACQUIRE: 860 #endif 861 case FFA_MSG_RUN: 862 /* 863 * Above calls should be invoked only by the Normal world and 864 * must not be forwarded from Secure world to Normal world. 865 */ 866 if (secure_origin) { 867 return spmd_ffa_error_return(handle, 868 FFA_ERROR_NOT_SUPPORTED); 869 } 870 871 /* Forward the call to the other world */ 872 /* fallthrough */ 873 case FFA_MSG_SEND: 874 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 875 case FFA_MEM_DONATE_SMC32: 876 case FFA_MEM_DONATE_SMC64: 877 case FFA_MEM_LEND_SMC32: 878 case FFA_MEM_LEND_SMC64: 879 case FFA_MEM_SHARE_SMC32: 880 case FFA_MEM_SHARE_SMC64: 881 case FFA_MEM_RETRIEVE_REQ_SMC32: 882 case FFA_MEM_RETRIEVE_REQ_SMC64: 883 case FFA_MEM_RETRIEVE_RESP: 884 case FFA_MEM_RELINQUISH: 885 case FFA_MEM_RECLAIM: 886 case FFA_MEM_FRAG_TX: 887 case FFA_MEM_FRAG_RX: 888 case FFA_SUCCESS_SMC32: 889 case FFA_SUCCESS_SMC64: 890 /* 891 * TODO: Assume that no requests originate from EL3 at the 892 * moment. This will change if a SP service is required in 893 * response to secure interrupts targeted to EL3. Until then 894 * simply forward the call to the Normal world. 895 */ 896 897 return spmd_smc_forward(smc_fid, secure_origin, 898 x1, x2, x3, x4, cookie, 899 handle, flags); 900 break; /* not reached */ 901 902 case FFA_MSG_WAIT: 903 /* 904 * Check if this is the first invocation of this interface on 905 * this CPU from the Secure world. If so, then indicate that the 906 * SPM Core initialised successfully. 907 */ 908 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 909 spmd_spm_core_sync_exit(0ULL); 910 } 911 912 /* Forward the call to the other world */ 913 /* fallthrough */ 914 case FFA_INTERRUPT: 915 case FFA_MSG_YIELD: 916 /* This interface must be invoked only by the Secure world */ 917 if (!secure_origin) { 918 return spmd_ffa_error_return(handle, 919 FFA_ERROR_NOT_SUPPORTED); 920 } 921 922 return spmd_smc_forward(smc_fid, secure_origin, 923 x1, x2, x3, x4, cookie, 924 handle, flags); 925 break; /* not reached */ 926 927 case FFA_NORMAL_WORLD_RESUME: 928 if (secure_origin && ctx->secure_interrupt_ongoing) { 929 spmd_spm_core_sync_exit(0ULL); 930 } else { 931 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED); 932 } 933 break; /* Not reached */ 934 935 default: 936 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 937 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 938 } 939 } 940