1 /* 2 * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch/aarch64/arch_features.h> 15 #include <bl31/bl31.h> 16 #include <common/debug.h> 17 #include <common/runtime_svc.h> 18 #include <lib/el3_runtime/context_mgmt.h> 19 #include <lib/smccc.h> 20 #include <lib/spinlock.h> 21 #include <lib/utils.h> 22 #include <plat/common/common_def.h> 23 #include <plat/common/platform.h> 24 #include <platform_def.h> 25 #include <services/ffa_svc.h> 26 #include <services/spmd_svc.h> 27 #include <smccc_helpers.h> 28 #include "spmd_private.h" 29 30 /******************************************************************************* 31 * SPM Core context information. 32 ******************************************************************************/ 33 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 34 35 /******************************************************************************* 36 * SPM Core attribute information read from its manifest. 37 ******************************************************************************/ 38 static spmc_manifest_attribute_t spmc_attrs; 39 40 /******************************************************************************* 41 * SPM Core entry point information. Discovered on the primary core and reused 42 * on secondary cores. 43 ******************************************************************************/ 44 static entry_point_info_t *spmc_ep_info; 45 46 /******************************************************************************* 47 * SPM Core context on CPU based on mpidr. 48 ******************************************************************************/ 49 spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr) 50 { 51 int core_idx = plat_core_pos_by_mpidr(mpidr); 52 53 if (core_idx < 0) { 54 ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx); 55 panic(); 56 } 57 58 return &spm_core_context[core_idx]; 59 } 60 61 /******************************************************************************* 62 * SPM Core context on current CPU get helper. 63 ******************************************************************************/ 64 spmd_spm_core_context_t *spmd_get_context(void) 65 { 66 return spmd_get_context_by_mpidr(read_mpidr()); 67 } 68 69 /******************************************************************************* 70 * SPM Core ID getter. 71 ******************************************************************************/ 72 uint16_t spmd_spmc_id_get(void) 73 { 74 return spmc_attrs.spmc_id; 75 } 76 77 /******************************************************************************* 78 * Static function declaration. 79 ******************************************************************************/ 80 static int32_t spmd_init(void); 81 static int spmd_spmc_init(void *pm_addr); 82 static uint64_t spmd_ffa_error_return(void *handle, 83 int error_code); 84 static uint64_t spmd_smc_forward(uint32_t smc_fid, 85 bool secure_origin, 86 uint64_t x1, 87 uint64_t x2, 88 uint64_t x3, 89 uint64_t x4, 90 void *handle); 91 92 /******************************************************************************* 93 * This function takes an SPMC context pointer and performs a synchronous 94 * SPMC entry. 95 ******************************************************************************/ 96 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 97 { 98 uint64_t rc; 99 100 assert(spmc_ctx != NULL); 101 102 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 103 104 /* Restore the context assigned above */ 105 #if SPMD_SPM_AT_SEL2 106 cm_el2_sysregs_context_restore(SECURE); 107 #else 108 cm_el1_sysregs_context_restore(SECURE); 109 #endif 110 cm_set_next_eret_context(SECURE); 111 112 /* Enter SPMC */ 113 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 114 115 /* Save secure state */ 116 #if SPMD_SPM_AT_SEL2 117 cm_el2_sysregs_context_save(SECURE); 118 #else 119 cm_el1_sysregs_context_save(SECURE); 120 #endif 121 122 return rc; 123 } 124 125 /******************************************************************************* 126 * This function returns to the place where spmd_spm_core_sync_entry() was 127 * called originally. 128 ******************************************************************************/ 129 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 130 { 131 spmd_spm_core_context_t *ctx = spmd_get_context(); 132 133 /* Get current CPU context from SPMC context */ 134 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 135 136 /* 137 * The SPMD must have initiated the original request through a 138 * synchronous entry into SPMC. Jump back to the original C runtime 139 * context with the value of rc in x0; 140 */ 141 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 142 143 panic(); 144 } 145 146 /******************************************************************************* 147 * Jump to the SPM Core for the first time. 148 ******************************************************************************/ 149 static int32_t spmd_init(void) 150 { 151 spmd_spm_core_context_t *ctx = spmd_get_context(); 152 uint64_t rc; 153 154 VERBOSE("SPM Core init start.\n"); 155 156 /* Primary boot core enters the SPMC for initialization. */ 157 ctx->state = SPMC_STATE_ON_PENDING; 158 159 rc = spmd_spm_core_sync_entry(ctx); 160 if (rc != 0ULL) { 161 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc); 162 return 0; 163 } 164 165 ctx->state = SPMC_STATE_ON; 166 167 VERBOSE("SPM Core init end.\n"); 168 169 return 1; 170 } 171 172 /******************************************************************************* 173 * Loads SPMC manifest and inits SPMC. 174 ******************************************************************************/ 175 static int spmd_spmc_init(void *pm_addr) 176 { 177 cpu_context_t *cpu_ctx; 178 unsigned int core_id; 179 uint32_t ep_attr; 180 int rc; 181 182 /* Load the SPM Core manifest */ 183 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 184 if (rc != 0) { 185 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 186 return rc; 187 } 188 189 /* 190 * Ensure that the SPM Core version is compatible with the SPM 191 * Dispatcher version. 192 */ 193 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 194 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 195 WARN("Unsupported FFA version (%u.%u)\n", 196 spmc_attrs.major_version, spmc_attrs.minor_version); 197 return -EINVAL; 198 } 199 200 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 201 spmc_attrs.minor_version); 202 203 VERBOSE("SPM Core run time EL%x.\n", 204 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 205 206 /* Validate the SPMC ID, Ensure high bit is set */ 207 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 208 SPMC_SECURE_ID_MASK) == 0U) { 209 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 210 return -EINVAL; 211 } 212 213 /* Validate the SPM Core execution state */ 214 if ((spmc_attrs.exec_state != MODE_RW_64) && 215 (spmc_attrs.exec_state != MODE_RW_32)) { 216 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 217 spmc_attrs.exec_state); 218 return -EINVAL; 219 } 220 221 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 222 spmc_attrs.exec_state); 223 224 #if SPMD_SPM_AT_SEL2 225 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 226 if (spmc_attrs.exec_state == MODE_RW_32) { 227 WARN("AArch32 state at S-EL2 is not supported.\n"); 228 return -EINVAL; 229 } 230 231 /* 232 * Check if S-EL2 is supported on this system if S-EL2 233 * is required for SPM 234 */ 235 if (!is_armv8_4_sel2_present()) { 236 WARN("SPM Core run time S-EL2 is not supported.\n"); 237 return -EINVAL; 238 } 239 #endif /* SPMD_SPM_AT_SEL2 */ 240 241 /* Initialise an entrypoint to set up the CPU context */ 242 ep_attr = SECURE | EP_ST_ENABLE; 243 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 244 ep_attr |= EP_EE_BIG; 245 } 246 247 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 248 249 /* 250 * Populate SPSR for SPM Core based upon validated parameters from the 251 * manifest. 252 */ 253 if (spmc_attrs.exec_state == MODE_RW_32) { 254 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 255 SPSR_E_LITTLE, 256 DAIF_FIQ_BIT | 257 DAIF_IRQ_BIT | 258 DAIF_ABT_BIT); 259 } else { 260 261 #if SPMD_SPM_AT_SEL2 262 static const uint32_t runtime_el = MODE_EL2; 263 #else 264 static const uint32_t runtime_el = MODE_EL1; 265 #endif 266 spmc_ep_info->spsr = SPSR_64(runtime_el, 267 MODE_SP_ELX, 268 DISABLE_ALL_EXCEPTIONS); 269 } 270 271 /* Set an initial SPMC context state for all cores. */ 272 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) { 273 spm_core_context[core_id].state = SPMC_STATE_OFF; 274 275 /* Setup an initial cpu context for the SPMC. */ 276 cpu_ctx = &spm_core_context[core_id].cpu_ctx; 277 cm_setup_context(cpu_ctx, spmc_ep_info); 278 279 /* 280 * Pass the core linear ID to the SPMC through x4. 281 * (TF-A implementation defined behavior helping 282 * a legacy TOS migration to adopt FF-A). 283 */ 284 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id); 285 } 286 287 /* Register power management hooks with PSCI */ 288 psci_register_spd_pm_hook(&spmd_pm); 289 290 /* Register init function for deferred init. */ 291 bl31_register_bl32_init(&spmd_init); 292 293 INFO("SPM Core setup done.\n"); 294 295 return 0; 296 } 297 298 /******************************************************************************* 299 * Initialize context of SPM Core. 300 ******************************************************************************/ 301 int spmd_setup(void) 302 { 303 void *spmc_manifest; 304 int rc; 305 306 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 307 if (spmc_ep_info == NULL) { 308 WARN("No SPM Core image provided by BL2 boot loader.\n"); 309 return -EINVAL; 310 } 311 312 /* Under no circumstances will this parameter be 0 */ 313 assert(spmc_ep_info->pc != 0ULL); 314 315 /* 316 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 317 * be used as a manifest for the SPM Core at the next lower EL/mode. 318 */ 319 spmc_manifest = (void *)spmc_ep_info->args.arg0; 320 if (spmc_manifest == NULL) { 321 ERROR("Invalid or absent SPM Core manifest.\n"); 322 return -EINVAL; 323 } 324 325 /* Load manifest, init SPMC */ 326 rc = spmd_spmc_init(spmc_manifest); 327 if (rc != 0) { 328 WARN("Booting device without SPM initialization.\n"); 329 } 330 331 return rc; 332 } 333 334 /******************************************************************************* 335 * Forward SMC to the other security state 336 ******************************************************************************/ 337 static uint64_t spmd_smc_forward(uint32_t smc_fid, 338 bool secure_origin, 339 uint64_t x1, 340 uint64_t x2, 341 uint64_t x3, 342 uint64_t x4, 343 void *handle) 344 { 345 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 346 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 347 348 /* Save incoming security state */ 349 #if SPMD_SPM_AT_SEL2 350 if (secure_state_in == NON_SECURE) { 351 cm_el1_sysregs_context_save(secure_state_in); 352 } 353 cm_el2_sysregs_context_save(secure_state_in); 354 #else 355 cm_el1_sysregs_context_save(secure_state_in); 356 #endif 357 358 /* Restore outgoing security state */ 359 #if SPMD_SPM_AT_SEL2 360 if (secure_state_out == NON_SECURE) { 361 cm_el1_sysregs_context_restore(secure_state_out); 362 } 363 cm_el2_sysregs_context_restore(secure_state_out); 364 #else 365 cm_el1_sysregs_context_restore(secure_state_out); 366 #endif 367 cm_set_next_eret_context(secure_state_out); 368 369 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 370 SMC_GET_GP(handle, CTX_GPREG_X5), 371 SMC_GET_GP(handle, CTX_GPREG_X6), 372 SMC_GET_GP(handle, CTX_GPREG_X7)); 373 } 374 375 /******************************************************************************* 376 * Return FFA_ERROR with specified error code 377 ******************************************************************************/ 378 static uint64_t spmd_ffa_error_return(void *handle, int error_code) 379 { 380 SMC_RET8(handle, (uint32_t) FFA_ERROR, 381 FFA_TARGET_INFO_MBZ, (uint32_t)error_code, 382 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 383 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 384 } 385 386 /******************************************************************************* 387 * spmd_check_address_in_binary_image 388 ******************************************************************************/ 389 bool spmd_check_address_in_binary_image(uint64_t address) 390 { 391 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 392 393 return ((address >= spmc_attrs.load_address) && 394 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 395 } 396 397 /****************************************************************************** 398 * spmd_is_spmc_message 399 *****************************************************************************/ 400 static bool spmd_is_spmc_message(unsigned int ep) 401 { 402 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 403 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 404 } 405 406 /****************************************************************************** 407 * spmd_handle_spmc_message 408 *****************************************************************************/ 409 static int spmd_handle_spmc_message(unsigned long long msg, 410 unsigned long long parm1, unsigned long long parm2, 411 unsigned long long parm3, unsigned long long parm4) 412 { 413 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__, 414 msg, parm1, parm2, parm3, parm4); 415 416 return -EINVAL; 417 } 418 419 /******************************************************************************* 420 * This function handles all SMCs in the range reserved for FFA. Each call is 421 * either forwarded to the other security state or handled by the SPM dispatcher 422 ******************************************************************************/ 423 uint64_t spmd_smc_handler(uint32_t smc_fid, 424 uint64_t x1, 425 uint64_t x2, 426 uint64_t x3, 427 uint64_t x4, 428 void *cookie, 429 void *handle, 430 uint64_t flags) 431 { 432 unsigned int linear_id = plat_my_core_pos(); 433 spmd_spm_core_context_t *ctx = spmd_get_context(); 434 bool secure_origin; 435 int32_t ret; 436 uint32_t input_version; 437 438 /* Determine which security state this SMC originated from */ 439 secure_origin = is_caller_secure(flags); 440 441 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 442 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n", 443 linear_id, smc_fid, x1, x2, x3, x4, 444 SMC_GET_GP(handle, CTX_GPREG_X5), 445 SMC_GET_GP(handle, CTX_GPREG_X6), 446 SMC_GET_GP(handle, CTX_GPREG_X7)); 447 448 switch (smc_fid) { 449 case FFA_ERROR: 450 /* 451 * Check if this is the first invocation of this interface on 452 * this CPU. If so, then indicate that the SPM Core initialised 453 * unsuccessfully. 454 */ 455 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 456 spmd_spm_core_sync_exit(x2); 457 } 458 459 return spmd_smc_forward(smc_fid, secure_origin, 460 x1, x2, x3, x4, handle); 461 break; /* not reached */ 462 463 case FFA_VERSION: 464 input_version = (uint32_t)(0xFFFFFFFF & x1); 465 /* 466 * If caller is secure and SPMC was initialized, 467 * return FFA_VERSION of SPMD. 468 * If caller is non secure and SPMC was initialized, 469 * return SPMC's version. 470 * Sanity check to "input_version". 471 */ 472 if ((input_version & FFA_VERSION_BIT31_MASK) || 473 (ctx->state == SPMC_STATE_RESET)) { 474 ret = FFA_ERROR_NOT_SUPPORTED; 475 } else if (!secure_origin) { 476 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, 477 spmc_attrs.minor_version); 478 } else { 479 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 480 FFA_VERSION_MINOR); 481 } 482 483 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ, 484 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 485 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 486 break; /* not reached */ 487 488 case FFA_FEATURES: 489 /* 490 * This is an optional interface. Do the minimal checks and 491 * forward to SPM Core which will handle it if implemented. 492 */ 493 494 /* 495 * Check if x1 holds a valid FFA fid. This is an 496 * optimization. 497 */ 498 if (!is_ffa_fid(x1)) { 499 return spmd_ffa_error_return(handle, 500 FFA_ERROR_NOT_SUPPORTED); 501 } 502 503 /* Forward SMC from Normal world to the SPM Core */ 504 if (!secure_origin) { 505 return spmd_smc_forward(smc_fid, secure_origin, 506 x1, x2, x3, x4, handle); 507 } 508 509 /* 510 * Return success if call was from secure world i.e. all 511 * FFA functions are supported. This is essentially a 512 * nop. 513 */ 514 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 515 SMC_GET_GP(handle, CTX_GPREG_X5), 516 SMC_GET_GP(handle, CTX_GPREG_X6), 517 SMC_GET_GP(handle, CTX_GPREG_X7)); 518 519 break; /* not reached */ 520 521 case FFA_ID_GET: 522 /* 523 * Returns the ID of the calling FFA component. 524 */ 525 if (!secure_origin) { 526 SMC_RET8(handle, FFA_SUCCESS_SMC32, 527 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 528 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 529 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 530 FFA_PARAM_MBZ); 531 } 532 533 SMC_RET8(handle, FFA_SUCCESS_SMC32, 534 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 535 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 536 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 537 FFA_PARAM_MBZ); 538 539 break; /* not reached */ 540 541 case FFA_SECONDARY_EP_REGISTER_SMC64: 542 if (secure_origin) { 543 ret = spmd_pm_secondary_ep_register(x1); 544 545 if (ret < 0) { 546 SMC_RET8(handle, FFA_ERROR_SMC64, 547 FFA_TARGET_INFO_MBZ, ret, 548 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 549 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 550 FFA_PARAM_MBZ); 551 } else { 552 SMC_RET8(handle, FFA_SUCCESS_SMC64, 553 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, 554 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 555 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 556 FFA_PARAM_MBZ); 557 } 558 } 559 560 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 561 break; /* Not reached */ 562 563 case FFA_SPM_ID_GET: 564 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) { 565 return spmd_ffa_error_return(handle, 566 FFA_ERROR_NOT_SUPPORTED); 567 } 568 /* 569 * Returns the ID of the SPMC or SPMD depending on the FF-A 570 * instance where this function is invoked 571 */ 572 if (!secure_origin) { 573 SMC_RET8(handle, FFA_SUCCESS_SMC32, 574 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 575 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 576 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 577 FFA_PARAM_MBZ); 578 } 579 SMC_RET8(handle, FFA_SUCCESS_SMC32, 580 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID, 581 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 582 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 583 FFA_PARAM_MBZ); 584 585 break; /* not reached */ 586 587 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 588 if (secure_origin && spmd_is_spmc_message(x1)) { 589 ret = spmd_handle_spmc_message(x3, x4, 590 SMC_GET_GP(handle, CTX_GPREG_X5), 591 SMC_GET_GP(handle, CTX_GPREG_X6), 592 SMC_GET_GP(handle, CTX_GPREG_X7)); 593 594 SMC_RET8(handle, FFA_SUCCESS_SMC32, 595 FFA_TARGET_INFO_MBZ, ret, 596 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 597 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 598 FFA_PARAM_MBZ); 599 } else { 600 /* Forward direct message to the other world */ 601 return spmd_smc_forward(smc_fid, secure_origin, 602 x1, x2, x3, x4, handle); 603 } 604 break; /* Not reached */ 605 606 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 607 if (secure_origin && spmd_is_spmc_message(x1)) { 608 spmd_spm_core_sync_exit(0); 609 } else { 610 /* Forward direct message to the other world */ 611 return spmd_smc_forward(smc_fid, secure_origin, 612 x1, x2, x3, x4, handle); 613 } 614 break; /* Not reached */ 615 616 case FFA_RX_RELEASE: 617 case FFA_RXTX_MAP_SMC32: 618 case FFA_RXTX_MAP_SMC64: 619 case FFA_RXTX_UNMAP: 620 case FFA_PARTITION_INFO_GET: 621 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 622 case FFA_NOTIFICATION_BITMAP_CREATE: 623 case FFA_NOTIFICATION_BITMAP_DESTROY: 624 case FFA_NOTIFICATION_BIND: 625 case FFA_NOTIFICATION_UNBIND: 626 case FFA_NOTIFICATION_SET: 627 case FFA_NOTIFICATION_GET: 628 case FFA_NOTIFICATION_INFO_GET: 629 case FFA_NOTIFICATION_INFO_GET_SMC64: 630 #endif 631 /* 632 * Above calls should not be forwarded from Secure world to 633 * Normal world. 634 * 635 * Fall through to forward the call to the other world 636 */ 637 case FFA_MSG_RUN: 638 /* This interface must be invoked only by the Normal world */ 639 640 if (secure_origin) { 641 return spmd_ffa_error_return(handle, 642 FFA_ERROR_NOT_SUPPORTED); 643 } 644 645 /* Fall through to forward the call to the other world */ 646 case FFA_MSG_SEND: 647 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 648 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 649 case FFA_MEM_DONATE_SMC32: 650 case FFA_MEM_DONATE_SMC64: 651 case FFA_MEM_LEND_SMC32: 652 case FFA_MEM_LEND_SMC64: 653 case FFA_MEM_SHARE_SMC32: 654 case FFA_MEM_SHARE_SMC64: 655 case FFA_MEM_RETRIEVE_REQ_SMC32: 656 case FFA_MEM_RETRIEVE_REQ_SMC64: 657 case FFA_MEM_RETRIEVE_RESP: 658 case FFA_MEM_RELINQUISH: 659 case FFA_MEM_RECLAIM: 660 case FFA_SUCCESS_SMC32: 661 case FFA_SUCCESS_SMC64: 662 /* 663 * TODO: Assume that no requests originate from EL3 at the 664 * moment. This will change if a SP service is required in 665 * response to secure interrupts targeted to EL3. Until then 666 * simply forward the call to the Normal world. 667 */ 668 669 return spmd_smc_forward(smc_fid, secure_origin, 670 x1, x2, x3, x4, handle); 671 break; /* not reached */ 672 673 case FFA_MSG_WAIT: 674 /* 675 * Check if this is the first invocation of this interface on 676 * this CPU from the Secure world. If so, then indicate that the 677 * SPM Core initialised successfully. 678 */ 679 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 680 spmd_spm_core_sync_exit(0); 681 } 682 683 /* Fall through to forward the call to the other world */ 684 case FFA_INTERRUPT: 685 case FFA_MSG_YIELD: 686 /* This interface must be invoked only by the Secure world */ 687 if (!secure_origin) { 688 return spmd_ffa_error_return(handle, 689 FFA_ERROR_NOT_SUPPORTED); 690 } 691 692 return spmd_smc_forward(smc_fid, secure_origin, 693 x1, x2, x3, x4, handle); 694 break; /* not reached */ 695 696 default: 697 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 698 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 699 } 700 } 701