1 /* 2 * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <string.h> 10 11 #include <arch_helpers.h> 12 #include <arch/aarch64/arch_features.h> 13 #include <bl31/bl31.h> 14 #include <common/debug.h> 15 #include <common/runtime_svc.h> 16 #include <lib/el3_runtime/context_mgmt.h> 17 #include <lib/smccc.h> 18 #include <lib/spinlock.h> 19 #include <lib/utils.h> 20 #include <plat/common/common_def.h> 21 #include <plat/common/platform.h> 22 #include <platform_def.h> 23 #include <services/ffa_svc.h> 24 #include <services/spmd_svc.h> 25 #include <smccc_helpers.h> 26 #include "spmd_private.h" 27 28 /******************************************************************************* 29 * SPM Core context information. 30 ******************************************************************************/ 31 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 32 33 /******************************************************************************* 34 * SPM Core attribute information read from its manifest. 35 ******************************************************************************/ 36 static spmc_manifest_attribute_t spmc_attrs; 37 38 /******************************************************************************* 39 * SPM Core entry point information. Discovered on the primary core and reused 40 * on secondary cores. 41 ******************************************************************************/ 42 static entry_point_info_t *spmc_ep_info; 43 44 /******************************************************************************* 45 * SPM Core context on CPU based on mpidr. 46 ******************************************************************************/ 47 spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr) 48 { 49 int core_idx = plat_core_pos_by_mpidr(mpidr); 50 51 if (core_idx < 0) { 52 ERROR("Invalid mpidr: %llx, returned ID: %d\n", mpidr, core_idx); 53 panic(); 54 } 55 56 return &spm_core_context[core_idx]; 57 } 58 59 /******************************************************************************* 60 * SPM Core context on current CPU get helper. 61 ******************************************************************************/ 62 spmd_spm_core_context_t *spmd_get_context(void) 63 { 64 return spmd_get_context_by_mpidr(read_mpidr()); 65 } 66 67 /******************************************************************************* 68 * SPM Core ID getter. 69 ******************************************************************************/ 70 uint16_t spmd_spmc_id_get(void) 71 { 72 return spmc_attrs.spmc_id; 73 } 74 75 /******************************************************************************* 76 * Static function declaration. 77 ******************************************************************************/ 78 static int32_t spmd_init(void); 79 static int spmd_spmc_init(void *pm_addr); 80 static uint64_t spmd_ffa_error_return(void *handle, 81 int error_code); 82 static uint64_t spmd_smc_forward(uint32_t smc_fid, 83 bool secure_origin, 84 uint64_t x1, 85 uint64_t x2, 86 uint64_t x3, 87 uint64_t x4, 88 void *handle); 89 90 /******************************************************************************* 91 * This function takes an SPMC context pointer and performs a synchronous 92 * SPMC entry. 93 ******************************************************************************/ 94 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 95 { 96 uint64_t rc; 97 98 assert(spmc_ctx != NULL); 99 100 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 101 102 /* Restore the context assigned above */ 103 #if SPMD_SPM_AT_SEL2 104 cm_el2_sysregs_context_restore(SECURE); 105 #else 106 cm_el1_sysregs_context_restore(SECURE); 107 #endif 108 cm_set_next_eret_context(SECURE); 109 110 /* Enter SPMC */ 111 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 112 113 /* Save secure state */ 114 #if SPMD_SPM_AT_SEL2 115 cm_el2_sysregs_context_save(SECURE); 116 #else 117 cm_el1_sysregs_context_save(SECURE); 118 #endif 119 120 return rc; 121 } 122 123 /******************************************************************************* 124 * This function returns to the place where spmd_spm_core_sync_entry() was 125 * called originally. 126 ******************************************************************************/ 127 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 128 { 129 spmd_spm_core_context_t *ctx = spmd_get_context(); 130 131 /* Get current CPU context from SPMC context */ 132 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 133 134 /* 135 * The SPMD must have initiated the original request through a 136 * synchronous entry into SPMC. Jump back to the original C runtime 137 * context with the value of rc in x0; 138 */ 139 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 140 141 panic(); 142 } 143 144 /******************************************************************************* 145 * Jump to the SPM Core for the first time. 146 ******************************************************************************/ 147 static int32_t spmd_init(void) 148 { 149 spmd_spm_core_context_t *ctx = spmd_get_context(); 150 uint64_t rc; 151 152 VERBOSE("SPM Core init start.\n"); 153 154 /* Primary boot core enters the SPMC for initialization. */ 155 ctx->state = SPMC_STATE_ON_PENDING; 156 157 rc = spmd_spm_core_sync_entry(ctx); 158 if (rc != 0ULL) { 159 ERROR("SPMC initialisation failed 0x%llx\n", rc); 160 return 0; 161 } 162 163 ctx->state = SPMC_STATE_ON; 164 165 VERBOSE("SPM Core init end.\n"); 166 167 return 1; 168 } 169 170 /******************************************************************************* 171 * Loads SPMC manifest and inits SPMC. 172 ******************************************************************************/ 173 static int spmd_spmc_init(void *pm_addr) 174 { 175 cpu_context_t *cpu_ctx; 176 unsigned int core_id; 177 uint32_t ep_attr; 178 int rc; 179 180 /* Load the SPM Core manifest */ 181 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 182 if (rc != 0) { 183 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 184 return rc; 185 } 186 187 /* 188 * Ensure that the SPM Core version is compatible with the SPM 189 * Dispatcher version. 190 */ 191 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 192 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 193 WARN("Unsupported FFA version (%u.%u)\n", 194 spmc_attrs.major_version, spmc_attrs.minor_version); 195 return -EINVAL; 196 } 197 198 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 199 spmc_attrs.minor_version); 200 201 VERBOSE("SPM Core run time EL%x.\n", 202 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 203 204 /* Validate the SPMC ID, Ensure high bit is set */ 205 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 206 SPMC_SECURE_ID_MASK) == 0U) { 207 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 208 return -EINVAL; 209 } 210 211 /* Validate the SPM Core execution state */ 212 if ((spmc_attrs.exec_state != MODE_RW_64) && 213 (spmc_attrs.exec_state != MODE_RW_32)) { 214 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 215 spmc_attrs.exec_state); 216 return -EINVAL; 217 } 218 219 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 220 spmc_attrs.exec_state); 221 222 #if SPMD_SPM_AT_SEL2 223 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 224 if (spmc_attrs.exec_state == MODE_RW_32) { 225 WARN("AArch32 state at S-EL2 is not supported.\n"); 226 return -EINVAL; 227 } 228 229 /* 230 * Check if S-EL2 is supported on this system if S-EL2 231 * is required for SPM 232 */ 233 if (!is_armv8_4_sel2_present()) { 234 WARN("SPM Core run time S-EL2 is not supported.\n"); 235 return -EINVAL; 236 } 237 #endif /* SPMD_SPM_AT_SEL2 */ 238 239 /* Initialise an entrypoint to set up the CPU context */ 240 ep_attr = SECURE | EP_ST_ENABLE; 241 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 242 ep_attr |= EP_EE_BIG; 243 } 244 245 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 246 247 /* 248 * Populate SPSR for SPM Core based upon validated parameters from the 249 * manifest. 250 */ 251 if (spmc_attrs.exec_state == MODE_RW_32) { 252 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 253 SPSR_E_LITTLE, 254 DAIF_FIQ_BIT | 255 DAIF_IRQ_BIT | 256 DAIF_ABT_BIT); 257 } else { 258 259 #if SPMD_SPM_AT_SEL2 260 static const uint32_t runtime_el = MODE_EL2; 261 #else 262 static const uint32_t runtime_el = MODE_EL1; 263 #endif 264 spmc_ep_info->spsr = SPSR_64(runtime_el, 265 MODE_SP_ELX, 266 DISABLE_ALL_EXCEPTIONS); 267 } 268 269 /* Set an initial SPMC context state for all cores. */ 270 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) { 271 spm_core_context[core_id].state = SPMC_STATE_OFF; 272 273 /* Setup an initial cpu context for the SPMC. */ 274 cpu_ctx = &spm_core_context[core_id].cpu_ctx; 275 cm_setup_context(cpu_ctx, spmc_ep_info); 276 277 /* 278 * Pass the core linear ID to the SPMC through x4. 279 * (TF-A implementation defined behavior helping 280 * a legacy TOS migration to adopt FF-A). 281 */ 282 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id); 283 } 284 285 /* Register power management hooks with PSCI */ 286 psci_register_spd_pm_hook(&spmd_pm); 287 288 /* Register init function for deferred init. */ 289 bl31_register_bl32_init(&spmd_init); 290 291 INFO("SPM Core setup done.\n"); 292 293 return 0; 294 } 295 296 /******************************************************************************* 297 * Initialize context of SPM Core. 298 ******************************************************************************/ 299 int spmd_setup(void) 300 { 301 void *spmc_manifest; 302 int rc; 303 304 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 305 if (spmc_ep_info == NULL) { 306 WARN("No SPM Core image provided by BL2 boot loader.\n"); 307 return -EINVAL; 308 } 309 310 /* Under no circumstances will this parameter be 0 */ 311 assert(spmc_ep_info->pc != 0ULL); 312 313 /* 314 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 315 * be used as a manifest for the SPM Core at the next lower EL/mode. 316 */ 317 spmc_manifest = (void *)spmc_ep_info->args.arg0; 318 if (spmc_manifest == NULL) { 319 ERROR("Invalid or absent SPM Core manifest.\n"); 320 return -EINVAL; 321 } 322 323 /* Load manifest, init SPMC */ 324 rc = spmd_spmc_init(spmc_manifest); 325 if (rc != 0) { 326 WARN("Booting device without SPM initialization.\n"); 327 } 328 329 return rc; 330 } 331 332 /******************************************************************************* 333 * Forward SMC to the other security state 334 ******************************************************************************/ 335 static uint64_t spmd_smc_forward(uint32_t smc_fid, 336 bool secure_origin, 337 uint64_t x1, 338 uint64_t x2, 339 uint64_t x3, 340 uint64_t x4, 341 void *handle) 342 { 343 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 344 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 345 346 /* Save incoming security state */ 347 #if SPMD_SPM_AT_SEL2 348 if (secure_state_in == NON_SECURE) { 349 cm_el1_sysregs_context_save(secure_state_in); 350 } 351 cm_el2_sysregs_context_save(secure_state_in); 352 #else 353 cm_el1_sysregs_context_save(secure_state_in); 354 #endif 355 356 /* Restore outgoing security state */ 357 #if SPMD_SPM_AT_SEL2 358 if (secure_state_out == NON_SECURE) { 359 cm_el1_sysregs_context_restore(secure_state_out); 360 } 361 cm_el2_sysregs_context_restore(secure_state_out); 362 #else 363 cm_el1_sysregs_context_restore(secure_state_out); 364 #endif 365 cm_set_next_eret_context(secure_state_out); 366 367 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 368 SMC_GET_GP(handle, CTX_GPREG_X5), 369 SMC_GET_GP(handle, CTX_GPREG_X6), 370 SMC_GET_GP(handle, CTX_GPREG_X7)); 371 } 372 373 /******************************************************************************* 374 * Return FFA_ERROR with specified error code 375 ******************************************************************************/ 376 static uint64_t spmd_ffa_error_return(void *handle, int error_code) 377 { 378 SMC_RET8(handle, (uint32_t) FFA_ERROR, 379 FFA_TARGET_INFO_MBZ, (uint32_t)error_code, 380 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 381 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 382 } 383 384 /******************************************************************************* 385 * spmd_check_address_in_binary_image 386 ******************************************************************************/ 387 bool spmd_check_address_in_binary_image(uint64_t address) 388 { 389 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 390 391 return ((address >= spmc_attrs.load_address) && 392 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 393 } 394 395 /****************************************************************************** 396 * spmd_is_spmc_message 397 *****************************************************************************/ 398 static bool spmd_is_spmc_message(unsigned int ep) 399 { 400 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 401 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 402 } 403 404 /****************************************************************************** 405 * spmd_handle_spmc_message 406 *****************************************************************************/ 407 static int spmd_handle_spmc_message(unsigned long long msg, 408 unsigned long long parm1, unsigned long long parm2, 409 unsigned long long parm3, unsigned long long parm4) 410 { 411 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__, 412 msg, parm1, parm2, parm3, parm4); 413 414 return -EINVAL; 415 } 416 417 /******************************************************************************* 418 * This function handles all SMCs in the range reserved for FFA. Each call is 419 * either forwarded to the other security state or handled by the SPM dispatcher 420 ******************************************************************************/ 421 uint64_t spmd_smc_handler(uint32_t smc_fid, 422 uint64_t x1, 423 uint64_t x2, 424 uint64_t x3, 425 uint64_t x4, 426 void *cookie, 427 void *handle, 428 uint64_t flags) 429 { 430 unsigned int linear_id = plat_my_core_pos(); 431 spmd_spm_core_context_t *ctx = spmd_get_context(); 432 bool secure_origin; 433 int32_t ret; 434 uint32_t input_version; 435 436 /* Determine which security state this SMC originated from */ 437 secure_origin = is_caller_secure(flags); 438 439 VERBOSE("SPM(%u): 0x%x 0x%llx 0x%llx 0x%llx 0x%llx " 440 "0x%llx 0x%llx 0x%llx\n", 441 linear_id, smc_fid, x1, x2, x3, x4, 442 SMC_GET_GP(handle, CTX_GPREG_X5), 443 SMC_GET_GP(handle, CTX_GPREG_X6), 444 SMC_GET_GP(handle, CTX_GPREG_X7)); 445 446 switch (smc_fid) { 447 case FFA_ERROR: 448 /* 449 * Check if this is the first invocation of this interface on 450 * this CPU. If so, then indicate that the SPM Core initialised 451 * unsuccessfully. 452 */ 453 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 454 spmd_spm_core_sync_exit(x2); 455 } 456 457 return spmd_smc_forward(smc_fid, secure_origin, 458 x1, x2, x3, x4, handle); 459 break; /* not reached */ 460 461 case FFA_VERSION: 462 input_version = (uint32_t)(0xFFFFFFFF & x1); 463 /* 464 * If caller is secure and SPMC was initialized, 465 * return FFA_VERSION of SPMD. 466 * If caller is non secure and SPMC was initialized, 467 * return SPMC's version. 468 * Sanity check to "input_version". 469 */ 470 if ((input_version & FFA_VERSION_BIT31_MASK) || 471 (ctx->state == SPMC_STATE_RESET)) { 472 ret = FFA_ERROR_NOT_SUPPORTED; 473 } else if (!secure_origin) { 474 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, 475 spmc_attrs.minor_version); 476 } else { 477 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 478 FFA_VERSION_MINOR); 479 } 480 481 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ, 482 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 483 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 484 break; /* not reached */ 485 486 case FFA_FEATURES: 487 /* 488 * This is an optional interface. Do the minimal checks and 489 * forward to SPM Core which will handle it if implemented. 490 */ 491 492 /* 493 * Check if x1 holds a valid FFA fid. This is an 494 * optimization. 495 */ 496 if (!is_ffa_fid(x1)) { 497 return spmd_ffa_error_return(handle, 498 FFA_ERROR_NOT_SUPPORTED); 499 } 500 501 /* Forward SMC from Normal world to the SPM Core */ 502 if (!secure_origin) { 503 return spmd_smc_forward(smc_fid, secure_origin, 504 x1, x2, x3, x4, handle); 505 } 506 507 /* 508 * Return success if call was from secure world i.e. all 509 * FFA functions are supported. This is essentially a 510 * nop. 511 */ 512 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 513 SMC_GET_GP(handle, CTX_GPREG_X5), 514 SMC_GET_GP(handle, CTX_GPREG_X6), 515 SMC_GET_GP(handle, CTX_GPREG_X7)); 516 517 break; /* not reached */ 518 519 case FFA_ID_GET: 520 /* 521 * Returns the ID of the calling FFA component. 522 */ 523 if (!secure_origin) { 524 SMC_RET8(handle, FFA_SUCCESS_SMC32, 525 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 526 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 527 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 528 FFA_PARAM_MBZ); 529 } 530 531 SMC_RET8(handle, FFA_SUCCESS_SMC32, 532 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 533 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 534 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 535 FFA_PARAM_MBZ); 536 537 break; /* not reached */ 538 539 case FFA_SECONDARY_EP_REGISTER_SMC64: 540 if (secure_origin) { 541 ret = spmd_pm_secondary_ep_register(x1); 542 543 if (ret < 0) { 544 SMC_RET8(handle, FFA_ERROR_SMC64, 545 FFA_TARGET_INFO_MBZ, ret, 546 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 547 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 548 FFA_PARAM_MBZ); 549 } else { 550 SMC_RET8(handle, FFA_SUCCESS_SMC64, 551 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, 552 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 553 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 554 FFA_PARAM_MBZ); 555 } 556 } 557 558 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 559 break; /* Not reached */ 560 561 case FFA_SPM_ID_GET: 562 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) { 563 return spmd_ffa_error_return(handle, 564 FFA_ERROR_NOT_SUPPORTED); 565 } 566 /* 567 * Returns the ID of the SPMC or SPMD depending on the FF-A 568 * instance where this function is invoked 569 */ 570 if (!secure_origin) { 571 SMC_RET8(handle, FFA_SUCCESS_SMC32, 572 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 573 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 574 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 575 FFA_PARAM_MBZ); 576 } 577 SMC_RET8(handle, FFA_SUCCESS_SMC32, 578 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID, 579 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 580 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 581 FFA_PARAM_MBZ); 582 583 break; /* not reached */ 584 585 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 586 if (secure_origin && spmd_is_spmc_message(x1)) { 587 ret = spmd_handle_spmc_message(x3, x4, 588 SMC_GET_GP(handle, CTX_GPREG_X5), 589 SMC_GET_GP(handle, CTX_GPREG_X6), 590 SMC_GET_GP(handle, CTX_GPREG_X7)); 591 592 SMC_RET8(handle, FFA_SUCCESS_SMC32, 593 FFA_TARGET_INFO_MBZ, ret, 594 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 595 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 596 FFA_PARAM_MBZ); 597 } else { 598 /* Forward direct message to the other world */ 599 return spmd_smc_forward(smc_fid, secure_origin, 600 x1, x2, x3, x4, handle); 601 } 602 break; /* Not reached */ 603 604 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 605 if (secure_origin && spmd_is_spmc_message(x1)) { 606 spmd_spm_core_sync_exit(0); 607 } else { 608 /* Forward direct message to the other world */ 609 return spmd_smc_forward(smc_fid, secure_origin, 610 x1, x2, x3, x4, handle); 611 } 612 break; /* Not reached */ 613 614 case FFA_RX_RELEASE: 615 case FFA_RXTX_MAP_SMC32: 616 case FFA_RXTX_MAP_SMC64: 617 case FFA_RXTX_UNMAP: 618 case FFA_PARTITION_INFO_GET: 619 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 620 case FFA_NOTIFICATION_BITMAP_CREATE: 621 case FFA_NOTIFICATION_BITMAP_DESTROY: 622 case FFA_NOTIFICATION_BIND: 623 case FFA_NOTIFICATION_UNBIND: 624 case FFA_NOTIFICATION_SET: 625 case FFA_NOTIFICATION_GET: 626 case FFA_NOTIFICATION_INFO_GET: 627 case FFA_NOTIFICATION_INFO_GET_SMC64: 628 #endif 629 /* 630 * Above calls should not be forwarded from Secure world to 631 * Normal world. 632 * 633 * Fall through to forward the call to the other world 634 */ 635 case FFA_MSG_RUN: 636 /* This interface must be invoked only by the Normal world */ 637 638 if (secure_origin) { 639 return spmd_ffa_error_return(handle, 640 FFA_ERROR_NOT_SUPPORTED); 641 } 642 643 /* Fall through to forward the call to the other world */ 644 case FFA_MSG_SEND: 645 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 646 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 647 case FFA_MEM_DONATE_SMC32: 648 case FFA_MEM_DONATE_SMC64: 649 case FFA_MEM_LEND_SMC32: 650 case FFA_MEM_LEND_SMC64: 651 case FFA_MEM_SHARE_SMC32: 652 case FFA_MEM_SHARE_SMC64: 653 case FFA_MEM_RETRIEVE_REQ_SMC32: 654 case FFA_MEM_RETRIEVE_REQ_SMC64: 655 case FFA_MEM_RETRIEVE_RESP: 656 case FFA_MEM_RELINQUISH: 657 case FFA_MEM_RECLAIM: 658 case FFA_SUCCESS_SMC32: 659 case FFA_SUCCESS_SMC64: 660 /* 661 * TODO: Assume that no requests originate from EL3 at the 662 * moment. This will change if a SP service is required in 663 * response to secure interrupts targeted to EL3. Until then 664 * simply forward the call to the Normal world. 665 */ 666 667 return spmd_smc_forward(smc_fid, secure_origin, 668 x1, x2, x3, x4, handle); 669 break; /* not reached */ 670 671 case FFA_MSG_WAIT: 672 /* 673 * Check if this is the first invocation of this interface on 674 * this CPU from the Secure world. If so, then indicate that the 675 * SPM Core initialised successfully. 676 */ 677 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 678 spmd_spm_core_sync_exit(0); 679 } 680 681 /* Fall through to forward the call to the other world */ 682 case FFA_INTERRUPT: 683 case FFA_MSG_YIELD: 684 /* This interface must be invoked only by the Secure world */ 685 if (!secure_origin) { 686 return spmd_ffa_error_return(handle, 687 FFA_ERROR_NOT_SUPPORTED); 688 } 689 690 return spmd_smc_forward(smc_fid, secure_origin, 691 x1, x2, x3, x4, handle); 692 break; /* not reached */ 693 694 default: 695 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 696 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 697 } 698 } 699