1 /* 2 * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <string.h> 10 11 #include <arch_helpers.h> 12 #include <arch/aarch64/arch_features.h> 13 #include <bl31/bl31.h> 14 #include <common/debug.h> 15 #include <common/runtime_svc.h> 16 #include <lib/el3_runtime/context_mgmt.h> 17 #include <lib/smccc.h> 18 #include <lib/spinlock.h> 19 #include <lib/utils.h> 20 #include <plat/common/common_def.h> 21 #include <plat/common/platform.h> 22 #include <platform_def.h> 23 #include <services/ffa_svc.h> 24 #include <services/spmd_svc.h> 25 #include <smccc_helpers.h> 26 #include "spmd_private.h" 27 28 /******************************************************************************* 29 * SPM Core context information. 30 ******************************************************************************/ 31 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 32 33 /******************************************************************************* 34 * SPM Core attribute information read from its manifest. 35 ******************************************************************************/ 36 static spmc_manifest_attribute_t spmc_attrs; 37 38 /******************************************************************************* 39 * SPM Core entry point information. Discovered on the primary core and reused 40 * on secondary cores. 41 ******************************************************************************/ 42 static entry_point_info_t *spmc_ep_info; 43 44 /******************************************************************************* 45 * SPM Core context on CPU based on mpidr. 46 ******************************************************************************/ 47 spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr) 48 { 49 int core_idx = plat_core_pos_by_mpidr(mpidr); 50 51 if (core_idx < 0) { 52 ERROR("Invalid mpidr: %llx, returned ID: %d\n", mpidr, core_idx); 53 panic(); 54 } 55 56 return &spm_core_context[core_idx]; 57 } 58 59 /******************************************************************************* 60 * SPM Core context on current CPU get helper. 61 ******************************************************************************/ 62 spmd_spm_core_context_t *spmd_get_context(void) 63 { 64 return spmd_get_context_by_mpidr(read_mpidr()); 65 } 66 67 /******************************************************************************* 68 * SPM Core entry point information get helper. 69 ******************************************************************************/ 70 entry_point_info_t *spmd_spmc_ep_info_get(void) 71 { 72 return spmc_ep_info; 73 } 74 75 /******************************************************************************* 76 * SPM Core ID getter. 77 ******************************************************************************/ 78 uint16_t spmd_spmc_id_get(void) 79 { 80 return spmc_attrs.spmc_id; 81 } 82 83 /******************************************************************************* 84 * Static function declaration. 85 ******************************************************************************/ 86 static int32_t spmd_init(void); 87 static int spmd_spmc_init(void *pm_addr); 88 static uint64_t spmd_ffa_error_return(void *handle, 89 int error_code); 90 static uint64_t spmd_smc_forward(uint32_t smc_fid, 91 bool secure_origin, 92 uint64_t x1, 93 uint64_t x2, 94 uint64_t x3, 95 uint64_t x4, 96 void *handle); 97 98 /******************************************************************************* 99 * This function takes an SPMC context pointer and performs a synchronous 100 * SPMC entry. 101 ******************************************************************************/ 102 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 103 { 104 uint64_t rc; 105 106 assert(spmc_ctx != NULL); 107 108 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 109 110 /* Restore the context assigned above */ 111 #if SPMD_SPM_AT_SEL2 112 cm_el2_sysregs_context_restore(SECURE); 113 #else 114 cm_el1_sysregs_context_restore(SECURE); 115 #endif 116 cm_set_next_eret_context(SECURE); 117 118 /* Enter SPMC */ 119 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 120 121 /* Save secure state */ 122 #if SPMD_SPM_AT_SEL2 123 cm_el2_sysregs_context_save(SECURE); 124 #else 125 cm_el1_sysregs_context_save(SECURE); 126 #endif 127 128 return rc; 129 } 130 131 /******************************************************************************* 132 * This function returns to the place where spmd_spm_core_sync_entry() was 133 * called originally. 134 ******************************************************************************/ 135 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 136 { 137 spmd_spm_core_context_t *ctx = spmd_get_context(); 138 139 /* Get current CPU context from SPMC context */ 140 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 141 142 /* 143 * The SPMD must have initiated the original request through a 144 * synchronous entry into SPMC. Jump back to the original C runtime 145 * context with the value of rc in x0; 146 */ 147 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 148 149 panic(); 150 } 151 152 /******************************************************************************* 153 * Jump to the SPM Core for the first time. 154 ******************************************************************************/ 155 static int32_t spmd_init(void) 156 { 157 spmd_spm_core_context_t *ctx = spmd_get_context(); 158 uint64_t rc; 159 unsigned int linear_id = plat_my_core_pos(); 160 unsigned int core_id; 161 162 VERBOSE("SPM Core init start.\n"); 163 ctx->state = SPMC_STATE_ON_PENDING; 164 165 /* Set the SPMC context state on other CPUs to OFF */ 166 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) { 167 if (core_id != linear_id) { 168 spm_core_context[core_id].state = SPMC_STATE_OFF; 169 } 170 } 171 172 rc = spmd_spm_core_sync_entry(ctx); 173 if (rc != 0ULL) { 174 ERROR("SPMC initialisation failed 0x%llx\n", rc); 175 return 0; 176 } 177 178 ctx->state = SPMC_STATE_ON; 179 180 VERBOSE("SPM Core init end.\n"); 181 182 return 1; 183 } 184 185 /******************************************************************************* 186 * Loads SPMC manifest and inits SPMC. 187 ******************************************************************************/ 188 static int spmd_spmc_init(void *pm_addr) 189 { 190 spmd_spm_core_context_t *spm_ctx = spmd_get_context(); 191 uint32_t ep_attr; 192 int rc; 193 194 /* Load the SPM Core manifest */ 195 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 196 if (rc != 0) { 197 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 198 return rc; 199 } 200 201 /* 202 * Ensure that the SPM Core version is compatible with the SPM 203 * Dispatcher version. 204 */ 205 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 206 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 207 WARN("Unsupported FFA version (%u.%u)\n", 208 spmc_attrs.major_version, spmc_attrs.minor_version); 209 return -EINVAL; 210 } 211 212 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 213 spmc_attrs.minor_version); 214 215 VERBOSE("SPM Core run time EL%x.\n", 216 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 217 218 /* Validate the SPMC ID, Ensure high bit is set */ 219 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 220 SPMC_SECURE_ID_MASK) == 0U) { 221 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 222 return -EINVAL; 223 } 224 225 /* Validate the SPM Core execution state */ 226 if ((spmc_attrs.exec_state != MODE_RW_64) && 227 (spmc_attrs.exec_state != MODE_RW_32)) { 228 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 229 spmc_attrs.exec_state); 230 return -EINVAL; 231 } 232 233 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 234 spmc_attrs.exec_state); 235 236 #if SPMD_SPM_AT_SEL2 237 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 238 if (spmc_attrs.exec_state == MODE_RW_32) { 239 WARN("AArch32 state at S-EL2 is not supported.\n"); 240 return -EINVAL; 241 } 242 243 /* 244 * Check if S-EL2 is supported on this system if S-EL2 245 * is required for SPM 246 */ 247 if (!is_armv8_4_sel2_present()) { 248 WARN("SPM Core run time S-EL2 is not supported.\n"); 249 return -EINVAL; 250 } 251 #endif /* SPMD_SPM_AT_SEL2 */ 252 253 /* Initialise an entrypoint to set up the CPU context */ 254 ep_attr = SECURE | EP_ST_ENABLE; 255 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 256 ep_attr |= EP_EE_BIG; 257 } 258 259 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 260 261 /* 262 * Populate SPSR for SPM Core based upon validated parameters from the 263 * manifest. 264 */ 265 if (spmc_attrs.exec_state == MODE_RW_32) { 266 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 267 SPSR_E_LITTLE, 268 DAIF_FIQ_BIT | 269 DAIF_IRQ_BIT | 270 DAIF_ABT_BIT); 271 } else { 272 273 #if SPMD_SPM_AT_SEL2 274 static const uint32_t runtime_el = MODE_EL2; 275 #else 276 static const uint32_t runtime_el = MODE_EL1; 277 #endif 278 spmc_ep_info->spsr = SPSR_64(runtime_el, 279 MODE_SP_ELX, 280 DISABLE_ALL_EXCEPTIONS); 281 } 282 283 /* Initialise SPM Core context with this entry point information */ 284 cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info); 285 286 /* Reuse PSCI affinity states to mark this SPMC context as off */ 287 spm_ctx->state = AFF_STATE_OFF; 288 289 INFO("SPM Core setup done.\n"); 290 291 /* Register power management hooks with PSCI */ 292 psci_register_spd_pm_hook(&spmd_pm); 293 294 /* Register init function for deferred init. */ 295 bl31_register_bl32_init(&spmd_init); 296 297 return 0; 298 } 299 300 /******************************************************************************* 301 * Initialize context of SPM Core. 302 ******************************************************************************/ 303 int spmd_setup(void) 304 { 305 void *spmc_manifest; 306 int rc; 307 308 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 309 if (spmc_ep_info == NULL) { 310 WARN("No SPM Core image provided by BL2 boot loader.\n"); 311 return -EINVAL; 312 } 313 314 /* Under no circumstances will this parameter be 0 */ 315 assert(spmc_ep_info->pc != 0ULL); 316 317 /* 318 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 319 * be used as a manifest for the SPM Core at the next lower EL/mode. 320 */ 321 spmc_manifest = (void *)spmc_ep_info->args.arg0; 322 if (spmc_manifest == NULL) { 323 ERROR("Invalid or absent SPM Core manifest.\n"); 324 return -EINVAL; 325 } 326 327 /* Load manifest, init SPMC */ 328 rc = spmd_spmc_init(spmc_manifest); 329 if (rc != 0) { 330 WARN("Booting device without SPM initialization.\n"); 331 } 332 333 return rc; 334 } 335 336 /******************************************************************************* 337 * Forward SMC to the other security state 338 ******************************************************************************/ 339 static uint64_t spmd_smc_forward(uint32_t smc_fid, 340 bool secure_origin, 341 uint64_t x1, 342 uint64_t x2, 343 uint64_t x3, 344 uint64_t x4, 345 void *handle) 346 { 347 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 348 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 349 350 /* Save incoming security state */ 351 #if SPMD_SPM_AT_SEL2 352 if (secure_state_in == NON_SECURE) { 353 cm_el1_sysregs_context_save(secure_state_in); 354 } 355 cm_el2_sysregs_context_save(secure_state_in); 356 #else 357 cm_el1_sysregs_context_save(secure_state_in); 358 #endif 359 360 /* Restore outgoing security state */ 361 #if SPMD_SPM_AT_SEL2 362 if (secure_state_out == NON_SECURE) { 363 cm_el1_sysregs_context_restore(secure_state_out); 364 } 365 cm_el2_sysregs_context_restore(secure_state_out); 366 #else 367 cm_el1_sysregs_context_restore(secure_state_out); 368 #endif 369 cm_set_next_eret_context(secure_state_out); 370 371 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 372 SMC_GET_GP(handle, CTX_GPREG_X5), 373 SMC_GET_GP(handle, CTX_GPREG_X6), 374 SMC_GET_GP(handle, CTX_GPREG_X7)); 375 } 376 377 /******************************************************************************* 378 * Return FFA_ERROR with specified error code 379 ******************************************************************************/ 380 static uint64_t spmd_ffa_error_return(void *handle, int error_code) 381 { 382 SMC_RET8(handle, (uint32_t) FFA_ERROR, 383 FFA_TARGET_INFO_MBZ, (uint32_t)error_code, 384 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 385 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 386 } 387 388 /******************************************************************************* 389 * spmd_check_address_in_binary_image 390 ******************************************************************************/ 391 bool spmd_check_address_in_binary_image(uint64_t address) 392 { 393 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 394 395 return ((address >= spmc_attrs.load_address) && 396 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 397 } 398 399 /****************************************************************************** 400 * spmd_is_spmc_message 401 *****************************************************************************/ 402 static bool spmd_is_spmc_message(unsigned int ep) 403 { 404 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 405 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 406 } 407 408 /****************************************************************************** 409 * spmd_handle_spmc_message 410 *****************************************************************************/ 411 static int spmd_handle_spmc_message(unsigned long long msg, 412 unsigned long long parm1, unsigned long long parm2, 413 unsigned long long parm3, unsigned long long parm4) 414 { 415 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__, 416 msg, parm1, parm2, parm3, parm4); 417 418 return -EINVAL; 419 } 420 421 /******************************************************************************* 422 * This function handles all SMCs in the range reserved for FFA. Each call is 423 * either forwarded to the other security state or handled by the SPM dispatcher 424 ******************************************************************************/ 425 uint64_t spmd_smc_handler(uint32_t smc_fid, 426 uint64_t x1, 427 uint64_t x2, 428 uint64_t x3, 429 uint64_t x4, 430 void *cookie, 431 void *handle, 432 uint64_t flags) 433 { 434 unsigned int linear_id = plat_my_core_pos(); 435 spmd_spm_core_context_t *ctx = spmd_get_context(); 436 bool secure_origin; 437 int32_t ret; 438 uint32_t input_version; 439 440 /* Determine which security state this SMC originated from */ 441 secure_origin = is_caller_secure(flags); 442 443 VERBOSE("SPM(%u): 0x%x 0x%llx 0x%llx 0x%llx 0x%llx " 444 "0x%llx 0x%llx 0x%llx\n", 445 linear_id, smc_fid, x1, x2, x3, x4, 446 SMC_GET_GP(handle, CTX_GPREG_X5), 447 SMC_GET_GP(handle, CTX_GPREG_X6), 448 SMC_GET_GP(handle, CTX_GPREG_X7)); 449 450 switch (smc_fid) { 451 case FFA_ERROR: 452 /* 453 * Check if this is the first invocation of this interface on 454 * this CPU. If so, then indicate that the SPM Core initialised 455 * unsuccessfully. 456 */ 457 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 458 spmd_spm_core_sync_exit(x2); 459 } 460 461 return spmd_smc_forward(smc_fid, secure_origin, 462 x1, x2, x3, x4, handle); 463 break; /* not reached */ 464 465 case FFA_VERSION: 466 input_version = (uint32_t)(0xFFFFFFFF & x1); 467 /* 468 * If caller is secure and SPMC was initialized, 469 * return FFA_VERSION of SPMD. 470 * If caller is non secure and SPMC was initialized, 471 * return SPMC's version. 472 * Sanity check to "input_version". 473 */ 474 if ((input_version & FFA_VERSION_BIT31_MASK) || 475 (ctx->state == SPMC_STATE_RESET)) { 476 ret = FFA_ERROR_NOT_SUPPORTED; 477 } else if (!secure_origin) { 478 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, 479 spmc_attrs.minor_version); 480 } else { 481 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 482 FFA_VERSION_MINOR); 483 } 484 485 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ, 486 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 487 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 488 break; /* not reached */ 489 490 case FFA_FEATURES: 491 /* 492 * This is an optional interface. Do the minimal checks and 493 * forward to SPM Core which will handle it if implemented. 494 */ 495 496 /* 497 * Check if x1 holds a valid FFA fid. This is an 498 * optimization. 499 */ 500 if (!is_ffa_fid(x1)) { 501 return spmd_ffa_error_return(handle, 502 FFA_ERROR_NOT_SUPPORTED); 503 } 504 505 /* Forward SMC from Normal world to the SPM Core */ 506 if (!secure_origin) { 507 return spmd_smc_forward(smc_fid, secure_origin, 508 x1, x2, x3, x4, handle); 509 } 510 511 /* 512 * Return success if call was from secure world i.e. all 513 * FFA functions are supported. This is essentially a 514 * nop. 515 */ 516 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 517 SMC_GET_GP(handle, CTX_GPREG_X5), 518 SMC_GET_GP(handle, CTX_GPREG_X6), 519 SMC_GET_GP(handle, CTX_GPREG_X7)); 520 521 break; /* not reached */ 522 523 case FFA_ID_GET: 524 /* 525 * Returns the ID of the calling FFA component. 526 */ 527 if (!secure_origin) { 528 SMC_RET8(handle, FFA_SUCCESS_SMC32, 529 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 530 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 531 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 532 FFA_PARAM_MBZ); 533 } 534 535 SMC_RET8(handle, FFA_SUCCESS_SMC32, 536 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 537 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 538 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 539 FFA_PARAM_MBZ); 540 541 break; /* not reached */ 542 543 case FFA_SECONDARY_EP_REGISTER_SMC64: 544 if (secure_origin) { 545 ret = spmd_pm_secondary_ep_register(x1); 546 547 if (ret < 0) { 548 SMC_RET8(handle, FFA_ERROR_SMC64, 549 FFA_TARGET_INFO_MBZ, ret, 550 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 551 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 552 FFA_PARAM_MBZ); 553 } else { 554 SMC_RET8(handle, FFA_SUCCESS_SMC64, 555 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, 556 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 557 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 558 FFA_PARAM_MBZ); 559 } 560 } 561 562 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 563 break; /* Not reached */ 564 565 case FFA_SPM_ID_GET: 566 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) { 567 return spmd_ffa_error_return(handle, 568 FFA_ERROR_NOT_SUPPORTED); 569 } 570 /* 571 * Returns the ID of the SPMC or SPMD depending on the FF-A 572 * instance where this function is invoked 573 */ 574 if (!secure_origin) { 575 SMC_RET8(handle, FFA_SUCCESS_SMC32, 576 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 577 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 578 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 579 FFA_PARAM_MBZ); 580 } 581 SMC_RET8(handle, FFA_SUCCESS_SMC32, 582 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID, 583 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 584 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 585 FFA_PARAM_MBZ); 586 587 break; /* not reached */ 588 589 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 590 if (secure_origin && spmd_is_spmc_message(x1)) { 591 ret = spmd_handle_spmc_message(x3, x4, 592 SMC_GET_GP(handle, CTX_GPREG_X5), 593 SMC_GET_GP(handle, CTX_GPREG_X6), 594 SMC_GET_GP(handle, CTX_GPREG_X7)); 595 596 SMC_RET8(handle, FFA_SUCCESS_SMC32, 597 FFA_TARGET_INFO_MBZ, ret, 598 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 599 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 600 FFA_PARAM_MBZ); 601 } else { 602 /* Forward direct message to the other world */ 603 return spmd_smc_forward(smc_fid, secure_origin, 604 x1, x2, x3, x4, handle); 605 } 606 break; /* Not reached */ 607 608 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 609 if (secure_origin && spmd_is_spmc_message(x1)) { 610 spmd_spm_core_sync_exit(0); 611 } else { 612 /* Forward direct message to the other world */ 613 return spmd_smc_forward(smc_fid, secure_origin, 614 x1, x2, x3, x4, handle); 615 } 616 break; /* Not reached */ 617 618 case FFA_RX_RELEASE: 619 case FFA_RXTX_MAP_SMC32: 620 case FFA_RXTX_MAP_SMC64: 621 case FFA_RXTX_UNMAP: 622 case FFA_PARTITION_INFO_GET: 623 /* 624 * Should not be allowed to forward FFA_PARTITION_INFO_GET 625 * from Secure world to Normal world 626 * 627 * Fall through to forward the call to the other world 628 */ 629 case FFA_MSG_RUN: 630 /* This interface must be invoked only by the Normal world */ 631 632 if (secure_origin) { 633 return spmd_ffa_error_return(handle, 634 FFA_ERROR_NOT_SUPPORTED); 635 } 636 637 /* Fall through to forward the call to the other world */ 638 case FFA_MSG_SEND: 639 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 640 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 641 case FFA_MEM_DONATE_SMC32: 642 case FFA_MEM_DONATE_SMC64: 643 case FFA_MEM_LEND_SMC32: 644 case FFA_MEM_LEND_SMC64: 645 case FFA_MEM_SHARE_SMC32: 646 case FFA_MEM_SHARE_SMC64: 647 case FFA_MEM_RETRIEVE_REQ_SMC32: 648 case FFA_MEM_RETRIEVE_REQ_SMC64: 649 case FFA_MEM_RETRIEVE_RESP: 650 case FFA_MEM_RELINQUISH: 651 case FFA_MEM_RECLAIM: 652 case FFA_SUCCESS_SMC32: 653 case FFA_SUCCESS_SMC64: 654 /* 655 * TODO: Assume that no requests originate from EL3 at the 656 * moment. This will change if a SP service is required in 657 * response to secure interrupts targeted to EL3. Until then 658 * simply forward the call to the Normal world. 659 */ 660 661 return spmd_smc_forward(smc_fid, secure_origin, 662 x1, x2, x3, x4, handle); 663 break; /* not reached */ 664 665 case FFA_MSG_WAIT: 666 /* 667 * Check if this is the first invocation of this interface on 668 * this CPU from the Secure world. If so, then indicate that the 669 * SPM Core initialised successfully. 670 */ 671 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 672 spmd_spm_core_sync_exit(0); 673 } 674 675 /* Fall through to forward the call to the other world */ 676 case FFA_INTERRUPT: 677 case FFA_MSG_YIELD: 678 /* This interface must be invoked only by the Secure world */ 679 if (!secure_origin) { 680 return spmd_ffa_error_return(handle, 681 FFA_ERROR_NOT_SUPPORTED); 682 } 683 684 return spmd_smc_forward(smc_fid, secure_origin, 685 x1, x2, x3, x4, handle); 686 break; /* not reached */ 687 688 default: 689 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 690 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 691 } 692 } 693