1 /* 2 * Copyright (c) 2020-2023, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch/aarch64/arch_features.h> 15 #include <bl31/bl31.h> 16 #include <bl31/interrupt_mgmt.h> 17 #include <common/debug.h> 18 #include <common/runtime_svc.h> 19 #include <common/tbbr/tbbr_img_def.h> 20 #include <lib/el3_runtime/context_mgmt.h> 21 #include <lib/fconf/fconf.h> 22 #include <lib/fconf/fconf_dyn_cfg_getter.h> 23 #include <lib/smccc.h> 24 #include <lib/spinlock.h> 25 #include <lib/utils.h> 26 #include <lib/xlat_tables/xlat_tables_v2.h> 27 #include <plat/common/common_def.h> 28 #include <plat/common/platform.h> 29 #include <platform_def.h> 30 #include <services/ffa_svc.h> 31 #include <services/spmc_svc.h> 32 #include <services/spmd_svc.h> 33 #include <smccc_helpers.h> 34 #include "spmd_private.h" 35 36 /******************************************************************************* 37 * SPM Core context information. 38 ******************************************************************************/ 39 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 40 41 /******************************************************************************* 42 * SPM Core attribute information is read from its manifest if the SPMC is not 43 * at EL3. Else, it is populated from the SPMC directly. 44 ******************************************************************************/ 45 static spmc_manifest_attribute_t spmc_attrs; 46 47 /******************************************************************************* 48 * SPM Core entry point information. Discovered on the primary core and reused 49 * on secondary cores. 50 ******************************************************************************/ 51 static entry_point_info_t *spmc_ep_info; 52 53 /******************************************************************************* 54 * SPM Core context on CPU based on mpidr. 55 ******************************************************************************/ 56 spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr) 57 { 58 int core_idx = plat_core_pos_by_mpidr(mpidr); 59 60 if (core_idx < 0) { 61 ERROR("Invalid mpidr: %" PRIx64 ", returned ID: %d\n", mpidr, core_idx); 62 panic(); 63 } 64 65 return &spm_core_context[core_idx]; 66 } 67 68 /******************************************************************************* 69 * SPM Core context on current CPU get helper. 70 ******************************************************************************/ 71 spmd_spm_core_context_t *spmd_get_context(void) 72 { 73 return spmd_get_context_by_mpidr(read_mpidr()); 74 } 75 76 /******************************************************************************* 77 * SPM Core ID getter. 78 ******************************************************************************/ 79 uint16_t spmd_spmc_id_get(void) 80 { 81 return spmc_attrs.spmc_id; 82 } 83 84 /******************************************************************************* 85 * Static function declaration. 86 ******************************************************************************/ 87 static int32_t spmd_init(void); 88 static int spmd_spmc_init(void *pm_addr); 89 static uint64_t spmd_ffa_error_return(void *handle, 90 int error_code); 91 static uint64_t spmd_smc_forward(uint32_t smc_fid, 92 bool secure_origin, 93 uint64_t x1, 94 uint64_t x2, 95 uint64_t x3, 96 uint64_t x4, 97 void *cookie, 98 void *handle, 99 uint64_t flags); 100 101 /****************************************************************************** 102 * Builds an SPMD to SPMC direct message request. 103 *****************************************************************************/ 104 void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func, 105 unsigned long long message) 106 { 107 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32); 108 write_ctx_reg(gpregs, CTX_GPREG_X1, 109 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) | 110 spmd_spmc_id_get()); 111 write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func); 112 write_ctx_reg(gpregs, CTX_GPREG_X3, message); 113 } 114 115 116 /******************************************************************************* 117 * This function takes an SPMC context pointer and performs a synchronous 118 * SPMC entry. 119 ******************************************************************************/ 120 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 121 { 122 uint64_t rc; 123 124 assert(spmc_ctx != NULL); 125 126 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 127 128 /* Restore the context assigned above */ 129 #if SPMD_SPM_AT_SEL2 130 cm_el2_sysregs_context_restore(SECURE); 131 #else 132 cm_el1_sysregs_context_restore(SECURE); 133 #endif 134 cm_set_next_eret_context(SECURE); 135 136 /* Enter SPMC */ 137 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 138 139 /* Save secure state */ 140 #if SPMD_SPM_AT_SEL2 141 cm_el2_sysregs_context_save(SECURE); 142 #else 143 cm_el1_sysregs_context_save(SECURE); 144 #endif 145 146 return rc; 147 } 148 149 /******************************************************************************* 150 * This function returns to the place where spmd_spm_core_sync_entry() was 151 * called originally. 152 ******************************************************************************/ 153 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 154 { 155 spmd_spm_core_context_t *ctx = spmd_get_context(); 156 157 /* Get current CPU context from SPMC context */ 158 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 159 160 /* 161 * The SPMD must have initiated the original request through a 162 * synchronous entry into SPMC. Jump back to the original C runtime 163 * context with the value of rc in x0; 164 */ 165 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 166 167 panic(); 168 } 169 170 /******************************************************************************* 171 * Jump to the SPM Core for the first time. 172 ******************************************************************************/ 173 static int32_t spmd_init(void) 174 { 175 spmd_spm_core_context_t *ctx = spmd_get_context(); 176 uint64_t rc; 177 178 VERBOSE("SPM Core init start.\n"); 179 180 /* Primary boot core enters the SPMC for initialization. */ 181 ctx->state = SPMC_STATE_ON_PENDING; 182 183 rc = spmd_spm_core_sync_entry(ctx); 184 if (rc != 0ULL) { 185 ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc); 186 return 0; 187 } 188 189 ctx->state = SPMC_STATE_ON; 190 191 VERBOSE("SPM Core init end.\n"); 192 193 return 1; 194 } 195 196 /******************************************************************************* 197 * spmd_secure_interrupt_handler 198 * Enter the SPMC for further handling of the secure interrupt by the SPMC 199 * itself or a Secure Partition. 200 ******************************************************************************/ 201 static uint64_t spmd_secure_interrupt_handler(uint32_t id, 202 uint32_t flags, 203 void *handle, 204 void *cookie) 205 { 206 spmd_spm_core_context_t *ctx = spmd_get_context(); 207 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 208 unsigned int linear_id = plat_my_core_pos(); 209 int64_t rc; 210 211 /* Sanity check the security state when the exception was generated */ 212 assert(get_interrupt_src_ss(flags) == NON_SECURE); 213 214 /* Sanity check the pointer to this cpu's context */ 215 assert(handle == cm_get_context(NON_SECURE)); 216 217 /* Save the non-secure context before entering SPMC */ 218 cm_el1_sysregs_context_save(NON_SECURE); 219 #if SPMD_SPM_AT_SEL2 220 cm_el2_sysregs_context_save(NON_SECURE); 221 #endif 222 223 /* Convey the event to the SPMC through the FFA_INTERRUPT interface. */ 224 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT); 225 write_ctx_reg(gpregs, CTX_GPREG_X1, 0); 226 write_ctx_reg(gpregs, CTX_GPREG_X2, 0); 227 write_ctx_reg(gpregs, CTX_GPREG_X3, 0); 228 write_ctx_reg(gpregs, CTX_GPREG_X4, 0); 229 write_ctx_reg(gpregs, CTX_GPREG_X5, 0); 230 write_ctx_reg(gpregs, CTX_GPREG_X6, 0); 231 write_ctx_reg(gpregs, CTX_GPREG_X7, 0); 232 233 /* Mark current core as handling a secure interrupt. */ 234 ctx->secure_interrupt_ongoing = true; 235 236 rc = spmd_spm_core_sync_entry(ctx); 237 if (rc != 0ULL) { 238 ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, linear_id); 239 } 240 241 ctx->secure_interrupt_ongoing = false; 242 243 cm_el1_sysregs_context_restore(NON_SECURE); 244 #if SPMD_SPM_AT_SEL2 245 cm_el2_sysregs_context_restore(NON_SECURE); 246 #endif 247 cm_set_next_eret_context(NON_SECURE); 248 249 SMC_RET0(&ctx->cpu_ctx); 250 } 251 252 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 253 static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size, 254 unsigned int attr, uintptr_t *align_addr, 255 size_t *align_size) 256 { 257 uintptr_t base_addr_align; 258 size_t mapped_size_align; 259 int rc; 260 261 /* Page aligned address and size if necessary */ 262 base_addr_align = page_align(base_addr, DOWN); 263 mapped_size_align = page_align(size, UP); 264 265 if ((base_addr != base_addr_align) && 266 (size == mapped_size_align)) { 267 mapped_size_align += PAGE_SIZE; 268 } 269 270 /* 271 * Map dynamically given region with its aligned base address and 272 * size 273 */ 274 rc = mmap_add_dynamic_region((unsigned long long)base_addr_align, 275 base_addr_align, 276 mapped_size_align, 277 attr); 278 if (rc == 0) { 279 *align_addr = base_addr_align; 280 *align_size = mapped_size_align; 281 } 282 283 return rc; 284 } 285 286 static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr, 287 size_t size) 288 { 289 uintptr_t root_base_addr_align, sec_base_addr_align; 290 size_t root_mapped_size_align, sec_mapped_size_align; 291 int rc; 292 293 assert(root_base_addr != 0UL); 294 assert(sec_base_addr != 0UL); 295 assert(size != 0UL); 296 297 /* Map the memory with required attributes */ 298 rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT, 299 &root_base_addr_align, 300 &root_mapped_size_align); 301 if (rc != 0) { 302 ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region", 303 root_base_addr, rc); 304 panic(); 305 } 306 307 rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE, 308 &sec_base_addr_align, &sec_mapped_size_align); 309 if (rc != 0) { 310 ERROR("%s %s %lu (%d)\n", "Error while mapping", 311 "secure region", sec_base_addr, rc); 312 panic(); 313 } 314 315 /* Do copy operation */ 316 (void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size); 317 318 /* Unmap root memory region */ 319 rc = mmap_remove_dynamic_region(root_base_addr_align, 320 root_mapped_size_align); 321 if (rc != 0) { 322 ERROR("%s %s %lu (%d)\n", "Error while unmapping", 323 "root region", root_base_addr_align, rc); 324 panic(); 325 } 326 327 /* Unmap secure memory region */ 328 rc = mmap_remove_dynamic_region(sec_base_addr_align, 329 sec_mapped_size_align); 330 if (rc != 0) { 331 ERROR("%s %s %lu (%d)\n", "Error while unmapping", 332 "secure region", sec_base_addr_align, rc); 333 panic(); 334 } 335 } 336 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 337 338 /******************************************************************************* 339 * Loads SPMC manifest and inits SPMC. 340 ******************************************************************************/ 341 static int spmd_spmc_init(void *pm_addr) 342 { 343 cpu_context_t *cpu_ctx; 344 unsigned int core_id; 345 uint32_t ep_attr, flags; 346 int rc; 347 const struct dyn_cfg_dtb_info_t *image_info __unused; 348 349 /* Load the SPM Core manifest */ 350 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 351 if (rc != 0) { 352 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 353 return rc; 354 } 355 356 /* 357 * Ensure that the SPM Core version is compatible with the SPM 358 * Dispatcher version. 359 */ 360 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 361 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 362 WARN("Unsupported FFA version (%u.%u)\n", 363 spmc_attrs.major_version, spmc_attrs.minor_version); 364 return -EINVAL; 365 } 366 367 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 368 spmc_attrs.minor_version); 369 370 VERBOSE("SPM Core run time EL%x.\n", 371 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 372 373 /* Validate the SPMC ID, Ensure high bit is set */ 374 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 375 SPMC_SECURE_ID_MASK) == 0U) { 376 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 377 return -EINVAL; 378 } 379 380 /* Validate the SPM Core execution state */ 381 if ((spmc_attrs.exec_state != MODE_RW_64) && 382 (spmc_attrs.exec_state != MODE_RW_32)) { 383 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 384 spmc_attrs.exec_state); 385 return -EINVAL; 386 } 387 388 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 389 spmc_attrs.exec_state); 390 391 #if SPMD_SPM_AT_SEL2 392 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 393 if (spmc_attrs.exec_state == MODE_RW_32) { 394 WARN("AArch32 state at S-EL2 is not supported.\n"); 395 return -EINVAL; 396 } 397 398 /* 399 * Check if S-EL2 is supported on this system if S-EL2 400 * is required for SPM 401 */ 402 if (!is_armv8_4_sel2_present()) { 403 WARN("SPM Core run time S-EL2 is not supported.\n"); 404 return -EINVAL; 405 } 406 #endif /* SPMD_SPM_AT_SEL2 */ 407 408 /* Initialise an entrypoint to set up the CPU context */ 409 ep_attr = SECURE | EP_ST_ENABLE; 410 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 411 ep_attr |= EP_EE_BIG; 412 } 413 414 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 415 416 /* 417 * Populate SPSR for SPM Core based upon validated parameters from the 418 * manifest. 419 */ 420 if (spmc_attrs.exec_state == MODE_RW_32) { 421 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 422 SPSR_E_LITTLE, 423 DAIF_FIQ_BIT | 424 DAIF_IRQ_BIT | 425 DAIF_ABT_BIT); 426 } else { 427 428 #if SPMD_SPM_AT_SEL2 429 static const uint32_t runtime_el = MODE_EL2; 430 #else 431 static const uint32_t runtime_el = MODE_EL1; 432 #endif 433 spmc_ep_info->spsr = SPSR_64(runtime_el, 434 MODE_SP_ELX, 435 DISABLE_ALL_EXCEPTIONS); 436 } 437 438 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 439 image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID); 440 assert(image_info != NULL); 441 442 if ((image_info->config_addr == 0UL) || 443 (image_info->secondary_config_addr == 0UL) || 444 (image_info->config_max_size == 0UL)) { 445 return -EINVAL; 446 } 447 448 /* Copy manifest from root->secure region */ 449 spmd_do_sec_cpy(image_info->config_addr, 450 image_info->secondary_config_addr, 451 image_info->config_max_size); 452 453 /* Update ep info of BL32 */ 454 assert(spmc_ep_info != NULL); 455 spmc_ep_info->args.arg0 = image_info->secondary_config_addr; 456 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */ 457 458 /* Set an initial SPMC context state for all cores. */ 459 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) { 460 spm_core_context[core_id].state = SPMC_STATE_OFF; 461 462 /* Setup an initial cpu context for the SPMC. */ 463 cpu_ctx = &spm_core_context[core_id].cpu_ctx; 464 cm_setup_context(cpu_ctx, spmc_ep_info); 465 466 /* 467 * Pass the core linear ID to the SPMC through x4. 468 * (TF-A implementation defined behavior helping 469 * a legacy TOS migration to adopt FF-A). 470 */ 471 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id); 472 } 473 474 /* Register power management hooks with PSCI */ 475 psci_register_spd_pm_hook(&spmd_pm); 476 477 /* Register init function for deferred init. */ 478 bl31_register_bl32_init(&spmd_init); 479 480 INFO("SPM Core setup done.\n"); 481 482 /* 483 * Register an interrupt handler routing secure interrupts to SPMD 484 * while the NWd is running. 485 */ 486 flags = 0; 487 set_interrupt_rm_flag(flags, NON_SECURE); 488 rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, 489 spmd_secure_interrupt_handler, 490 flags); 491 if (rc != 0) { 492 panic(); 493 } 494 495 return 0; 496 } 497 498 /******************************************************************************* 499 * Initialize context of SPM Core. 500 ******************************************************************************/ 501 int spmd_setup(void) 502 { 503 int rc; 504 void *spmc_manifest; 505 506 /* 507 * If the SPMC is at EL3, then just initialise it directly. The 508 * shenanigans of when it is at a lower EL are not needed. 509 */ 510 if (is_spmc_at_el3()) { 511 /* Allow the SPMC to populate its attributes directly. */ 512 spmc_populate_attrs(&spmc_attrs); 513 514 rc = spmc_setup(); 515 if (rc != 0) { 516 WARN("SPMC initialisation failed 0x%x.\n", rc); 517 } 518 return 0; 519 } 520 521 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 522 if (spmc_ep_info == NULL) { 523 WARN("No SPM Core image provided by BL2 boot loader.\n"); 524 return 0; 525 } 526 527 /* Under no circumstances will this parameter be 0 */ 528 assert(spmc_ep_info->pc != 0ULL); 529 530 /* 531 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 532 * be used as a manifest for the SPM Core at the next lower EL/mode. 533 */ 534 spmc_manifest = (void *)spmc_ep_info->args.arg0; 535 if (spmc_manifest == NULL) { 536 WARN("Invalid or absent SPM Core manifest.\n"); 537 return 0; 538 } 539 540 /* Load manifest, init SPMC */ 541 rc = spmd_spmc_init(spmc_manifest); 542 if (rc != 0) { 543 WARN("Booting device without SPM initialization.\n"); 544 } 545 546 return 0; 547 } 548 549 /******************************************************************************* 550 * Forward FF-A SMCs to the other security state. 551 ******************************************************************************/ 552 uint64_t spmd_smc_switch_state(uint32_t smc_fid, 553 bool secure_origin, 554 uint64_t x1, 555 uint64_t x2, 556 uint64_t x3, 557 uint64_t x4, 558 void *handle) 559 { 560 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 561 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 562 563 /* Save incoming security state */ 564 #if SPMD_SPM_AT_SEL2 565 if (secure_state_in == NON_SECURE) { 566 cm_el1_sysregs_context_save(secure_state_in); 567 } 568 cm_el2_sysregs_context_save(secure_state_in); 569 #else 570 cm_el1_sysregs_context_save(secure_state_in); 571 #endif 572 573 /* Restore outgoing security state */ 574 #if SPMD_SPM_AT_SEL2 575 if (secure_state_out == NON_SECURE) { 576 cm_el1_sysregs_context_restore(secure_state_out); 577 } 578 cm_el2_sysregs_context_restore(secure_state_out); 579 #else 580 cm_el1_sysregs_context_restore(secure_state_out); 581 #endif 582 cm_set_next_eret_context(secure_state_out); 583 584 #if SPMD_SPM_AT_SEL2 585 /* 586 * If SPMC is at SEL2, save additional registers x8-x17, which may 587 * be used in FF-A calls such as FFA_PARTITION_INFO_GET_REGS. 588 * Note that technically, all SPMCs can support this, but this code is 589 * under ifdef to minimize breakage in case other SPMCs do not save 590 * and restore x8-x17. 591 * We also need to pass through these registers since not all FF-A ABIs 592 * modify x8-x17, in which case, SMCCC requires that these registers be 593 * preserved, so the SPMD passes through these registers and expects the 594 * SPMC to save and restore (potentially also modify) them. 595 */ 596 SMC_RET18(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 597 SMC_GET_GP(handle, CTX_GPREG_X5), 598 SMC_GET_GP(handle, CTX_GPREG_X6), 599 SMC_GET_GP(handle, CTX_GPREG_X7), 600 SMC_GET_GP(handle, CTX_GPREG_X8), 601 SMC_GET_GP(handle, CTX_GPREG_X9), 602 SMC_GET_GP(handle, CTX_GPREG_X10), 603 SMC_GET_GP(handle, CTX_GPREG_X11), 604 SMC_GET_GP(handle, CTX_GPREG_X12), 605 SMC_GET_GP(handle, CTX_GPREG_X13), 606 SMC_GET_GP(handle, CTX_GPREG_X14), 607 SMC_GET_GP(handle, CTX_GPREG_X15), 608 SMC_GET_GP(handle, CTX_GPREG_X16), 609 SMC_GET_GP(handle, CTX_GPREG_X17) 610 ); 611 612 #else 613 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 614 SMC_GET_GP(handle, CTX_GPREG_X5), 615 SMC_GET_GP(handle, CTX_GPREG_X6), 616 SMC_GET_GP(handle, CTX_GPREG_X7)); 617 #endif 618 } 619 620 /******************************************************************************* 621 * Forward SMCs to the other security state. 622 ******************************************************************************/ 623 static uint64_t spmd_smc_forward(uint32_t smc_fid, 624 bool secure_origin, 625 uint64_t x1, 626 uint64_t x2, 627 uint64_t x3, 628 uint64_t x4, 629 void *cookie, 630 void *handle, 631 uint64_t flags) 632 { 633 if (is_spmc_at_el3() && !secure_origin) { 634 return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4, 635 cookie, handle, flags); 636 } 637 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4, 638 handle); 639 640 } 641 642 /******************************************************************************* 643 * Return FFA_ERROR with specified error code 644 ******************************************************************************/ 645 static uint64_t spmd_ffa_error_return(void *handle, int error_code) 646 { 647 SMC_RET8(handle, (uint32_t) FFA_ERROR, 648 FFA_TARGET_INFO_MBZ, (uint32_t)error_code, 649 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 650 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 651 } 652 653 /******************************************************************************* 654 * spmd_check_address_in_binary_image 655 ******************************************************************************/ 656 bool spmd_check_address_in_binary_image(uint64_t address) 657 { 658 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 659 660 return ((address >= spmc_attrs.load_address) && 661 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 662 } 663 664 /****************************************************************************** 665 * spmd_is_spmc_message 666 *****************************************************************************/ 667 static bool spmd_is_spmc_message(unsigned int ep) 668 { 669 if (is_spmc_at_el3()) { 670 return false; 671 } 672 673 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 674 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 675 } 676 677 /****************************************************************************** 678 * spmd_handle_spmc_message 679 *****************************************************************************/ 680 static int spmd_handle_spmc_message(unsigned long long msg, 681 unsigned long long parm1, unsigned long long parm2, 682 unsigned long long parm3, unsigned long long parm4) 683 { 684 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__, 685 msg, parm1, parm2, parm3, parm4); 686 687 return -EINVAL; 688 } 689 690 /******************************************************************************* 691 * This function forwards FF-A SMCs to either the main SPMD handler or the 692 * SPMC at EL3, depending on the origin security state, if enabled. 693 ******************************************************************************/ 694 uint64_t spmd_ffa_smc_handler(uint32_t smc_fid, 695 uint64_t x1, 696 uint64_t x2, 697 uint64_t x3, 698 uint64_t x4, 699 void *cookie, 700 void *handle, 701 uint64_t flags) 702 { 703 if (is_spmc_at_el3()) { 704 /* 705 * If we have an SPMC at EL3 allow handling of the SMC first. 706 * The SPMC will call back through to SPMD handler if required. 707 */ 708 if (is_caller_secure(flags)) { 709 return spmc_smc_handler(smc_fid, 710 is_caller_secure(flags), 711 x1, x2, x3, x4, cookie, 712 handle, flags); 713 } 714 } 715 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie, 716 handle, flags); 717 } 718 719 /******************************************************************************* 720 * This function handles all SMCs in the range reserved for FFA. Each call is 721 * either forwarded to the other security state or handled by the SPM dispatcher 722 ******************************************************************************/ 723 uint64_t spmd_smc_handler(uint32_t smc_fid, 724 uint64_t x1, 725 uint64_t x2, 726 uint64_t x3, 727 uint64_t x4, 728 void *cookie, 729 void *handle, 730 uint64_t flags) 731 { 732 unsigned int linear_id = plat_my_core_pos(); 733 spmd_spm_core_context_t *ctx = spmd_get_context(); 734 bool secure_origin; 735 int32_t ret; 736 uint32_t input_version; 737 738 /* Determine which security state this SMC originated from */ 739 secure_origin = is_caller_secure(flags); 740 741 VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 742 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n", 743 linear_id, smc_fid, x1, x2, x3, x4, 744 SMC_GET_GP(handle, CTX_GPREG_X5), 745 SMC_GET_GP(handle, CTX_GPREG_X6), 746 SMC_GET_GP(handle, CTX_GPREG_X7)); 747 748 switch (smc_fid) { 749 case FFA_ERROR: 750 /* 751 * Check if this is the first invocation of this interface on 752 * this CPU. If so, then indicate that the SPM Core initialised 753 * unsuccessfully. 754 */ 755 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 756 spmd_spm_core_sync_exit(x2); 757 } 758 759 return spmd_smc_forward(smc_fid, secure_origin, 760 x1, x2, x3, x4, cookie, 761 handle, flags); 762 break; /* not reached */ 763 764 case FFA_VERSION: 765 input_version = (uint32_t)(0xFFFFFFFF & x1); 766 /* 767 * If caller is secure and SPMC was initialized, 768 * return FFA_VERSION of SPMD. 769 * If caller is non secure and SPMC was initialized, 770 * forward to the EL3 SPMC if enabled, otherwise return 771 * the SPMC version if implemented at a lower EL. 772 * Sanity check to "input_version". 773 * If the EL3 SPMC is enabled, ignore the SPMC state as 774 * this is not used. 775 */ 776 if ((input_version & FFA_VERSION_BIT31_MASK) || 777 (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) { 778 ret = FFA_ERROR_NOT_SUPPORTED; 779 } else if (!secure_origin) { 780 if (is_spmc_at_el3()) { 781 /* 782 * Forward the call directly to the EL3 SPMC, if 783 * enabled, as we don't need to wrap the call in 784 * a direct request. 785 */ 786 return spmd_smc_forward(smc_fid, secure_origin, 787 x1, x2, x3, x4, cookie, 788 handle, flags); 789 } 790 791 gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx); 792 uint64_t rc; 793 794 if (spmc_attrs.major_version == 1 && 795 spmc_attrs.minor_version == 0) { 796 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, 797 spmc_attrs.minor_version); 798 SMC_RET8(handle, (uint32_t)ret, 799 FFA_TARGET_INFO_MBZ, 800 FFA_TARGET_INFO_MBZ, 801 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 802 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 803 FFA_PARAM_MBZ); 804 break; 805 } 806 /* Save non-secure system registers context */ 807 cm_el1_sysregs_context_save(NON_SECURE); 808 #if SPMD_SPM_AT_SEL2 809 cm_el2_sysregs_context_save(NON_SECURE); 810 #endif 811 812 /* 813 * The incoming request has FFA_VERSION as X0 smc_fid 814 * and requested version in x1. Prepare a direct request 815 * from SPMD to SPMC with FFA_VERSION framework function 816 * identifier in X2 and requested version in X3. 817 */ 818 spmd_build_spmc_message(gpregs, 819 SPMD_FWK_MSG_FFA_VERSION_REQ, 820 input_version); 821 822 rc = spmd_spm_core_sync_entry(ctx); 823 824 if ((rc != 0ULL) || 825 (SMC_GET_GP(gpregs, CTX_GPREG_X0) != 826 FFA_MSG_SEND_DIRECT_RESP_SMC32) || 827 (SMC_GET_GP(gpregs, CTX_GPREG_X2) != 828 (FFA_FWK_MSG_BIT | 829 SPMD_FWK_MSG_FFA_VERSION_RESP))) { 830 ERROR("Failed to forward FFA_VERSION\n"); 831 ret = FFA_ERROR_NOT_SUPPORTED; 832 } else { 833 ret = SMC_GET_GP(gpregs, CTX_GPREG_X3); 834 } 835 836 /* 837 * Return here after SPMC has handled FFA_VERSION. 838 * The returned SPMC version is held in X3. 839 * Forward this version in X0 to the non-secure caller. 840 */ 841 return spmd_smc_forward(ret, true, FFA_PARAM_MBZ, 842 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 843 FFA_PARAM_MBZ, cookie, gpregs, 844 flags); 845 } else { 846 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 847 FFA_VERSION_MINOR); 848 } 849 850 SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ, 851 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 852 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 853 break; /* not reached */ 854 855 case FFA_FEATURES: 856 /* 857 * This is an optional interface. Do the minimal checks and 858 * forward to SPM Core which will handle it if implemented. 859 */ 860 861 /* Forward SMC from Normal world to the SPM Core */ 862 if (!secure_origin) { 863 return spmd_smc_forward(smc_fid, secure_origin, 864 x1, x2, x3, x4, cookie, 865 handle, flags); 866 } 867 868 /* 869 * Return success if call was from secure world i.e. all 870 * FFA functions are supported. This is essentially a 871 * nop. 872 */ 873 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 874 SMC_GET_GP(handle, CTX_GPREG_X5), 875 SMC_GET_GP(handle, CTX_GPREG_X6), 876 SMC_GET_GP(handle, CTX_GPREG_X7)); 877 878 break; /* not reached */ 879 880 case FFA_ID_GET: 881 /* 882 * Returns the ID of the calling FFA component. 883 */ 884 if (!secure_origin) { 885 SMC_RET8(handle, FFA_SUCCESS_SMC32, 886 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 887 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 888 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 889 FFA_PARAM_MBZ); 890 } 891 892 SMC_RET8(handle, FFA_SUCCESS_SMC32, 893 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 894 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 895 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 896 FFA_PARAM_MBZ); 897 898 break; /* not reached */ 899 900 case FFA_SECONDARY_EP_REGISTER_SMC64: 901 if (secure_origin) { 902 ret = spmd_pm_secondary_ep_register(x1); 903 904 if (ret < 0) { 905 SMC_RET8(handle, FFA_ERROR_SMC64, 906 FFA_TARGET_INFO_MBZ, ret, 907 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 908 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 909 FFA_PARAM_MBZ); 910 } else { 911 SMC_RET8(handle, FFA_SUCCESS_SMC64, 912 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, 913 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 914 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 915 FFA_PARAM_MBZ); 916 } 917 } 918 919 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 920 break; /* Not reached */ 921 922 case FFA_SPM_ID_GET: 923 if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) { 924 return spmd_ffa_error_return(handle, 925 FFA_ERROR_NOT_SUPPORTED); 926 } 927 /* 928 * Returns the ID of the SPMC or SPMD depending on the FF-A 929 * instance where this function is invoked 930 */ 931 if (!secure_origin) { 932 SMC_RET8(handle, FFA_SUCCESS_SMC32, 933 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 934 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 935 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 936 FFA_PARAM_MBZ); 937 } 938 SMC_RET8(handle, FFA_SUCCESS_SMC32, 939 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID, 940 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 941 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 942 FFA_PARAM_MBZ); 943 944 break; /* not reached */ 945 946 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 947 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 948 if (!secure_origin) { 949 /* Validate source endpoint is non-secure for non-secure caller. */ 950 if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) { 951 return spmd_ffa_error_return(handle, 952 FFA_ERROR_INVALID_PARAMETER); 953 } 954 } 955 if (secure_origin && spmd_is_spmc_message(x1)) { 956 ret = spmd_handle_spmc_message(x3, x4, 957 SMC_GET_GP(handle, CTX_GPREG_X5), 958 SMC_GET_GP(handle, CTX_GPREG_X6), 959 SMC_GET_GP(handle, CTX_GPREG_X7)); 960 961 SMC_RET8(handle, FFA_SUCCESS_SMC32, 962 FFA_TARGET_INFO_MBZ, ret, 963 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 964 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 965 FFA_PARAM_MBZ); 966 } else { 967 /* Forward direct message to the other world */ 968 return spmd_smc_forward(smc_fid, secure_origin, 969 x1, x2, x3, x4, cookie, 970 handle, flags); 971 } 972 break; /* Not reached */ 973 974 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 975 if (secure_origin && spmd_is_spmc_message(x1)) { 976 spmd_spm_core_sync_exit(0ULL); 977 } else { 978 /* Forward direct message to the other world */ 979 return spmd_smc_forward(smc_fid, secure_origin, 980 x1, x2, x3, x4, cookie, 981 handle, flags); 982 } 983 break; /* Not reached */ 984 985 case FFA_RX_RELEASE: 986 case FFA_RXTX_MAP_SMC32: 987 case FFA_RXTX_MAP_SMC64: 988 case FFA_RXTX_UNMAP: 989 case FFA_PARTITION_INFO_GET: 990 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 991 case FFA_NOTIFICATION_BITMAP_CREATE: 992 case FFA_NOTIFICATION_BITMAP_DESTROY: 993 case FFA_NOTIFICATION_BIND: 994 case FFA_NOTIFICATION_UNBIND: 995 case FFA_NOTIFICATION_SET: 996 case FFA_NOTIFICATION_GET: 997 case FFA_NOTIFICATION_INFO_GET: 998 case FFA_NOTIFICATION_INFO_GET_SMC64: 999 case FFA_MSG_SEND2: 1000 case FFA_RX_ACQUIRE: 1001 #endif 1002 case FFA_MSG_RUN: 1003 /* 1004 * Above calls should be invoked only by the Normal world and 1005 * must not be forwarded from Secure world to Normal world. 1006 */ 1007 if (secure_origin) { 1008 return spmd_ffa_error_return(handle, 1009 FFA_ERROR_NOT_SUPPORTED); 1010 } 1011 1012 /* Forward the call to the other world */ 1013 /* fallthrough */ 1014 case FFA_MSG_SEND: 1015 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1016 case FFA_MEM_DONATE_SMC32: 1017 case FFA_MEM_DONATE_SMC64: 1018 case FFA_MEM_LEND_SMC32: 1019 case FFA_MEM_LEND_SMC64: 1020 case FFA_MEM_SHARE_SMC32: 1021 case FFA_MEM_SHARE_SMC64: 1022 case FFA_MEM_RETRIEVE_REQ_SMC32: 1023 case FFA_MEM_RETRIEVE_REQ_SMC64: 1024 case FFA_MEM_RETRIEVE_RESP: 1025 case FFA_MEM_RELINQUISH: 1026 case FFA_MEM_RECLAIM: 1027 case FFA_MEM_FRAG_TX: 1028 case FFA_MEM_FRAG_RX: 1029 case FFA_SUCCESS_SMC32: 1030 case FFA_SUCCESS_SMC64: 1031 /* 1032 * TODO: Assume that no requests originate from EL3 at the 1033 * moment. This will change if a SP service is required in 1034 * response to secure interrupts targeted to EL3. Until then 1035 * simply forward the call to the Normal world. 1036 */ 1037 1038 return spmd_smc_forward(smc_fid, secure_origin, 1039 x1, x2, x3, x4, cookie, 1040 handle, flags); 1041 break; /* not reached */ 1042 1043 case FFA_MSG_WAIT: 1044 /* 1045 * Check if this is the first invocation of this interface on 1046 * this CPU from the Secure world. If so, then indicate that the 1047 * SPM Core initialised successfully. 1048 */ 1049 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 1050 spmd_spm_core_sync_exit(0ULL); 1051 } 1052 1053 /* Forward the call to the other world */ 1054 /* fallthrough */ 1055 case FFA_INTERRUPT: 1056 case FFA_MSG_YIELD: 1057 /* This interface must be invoked only by the Secure world */ 1058 if (!secure_origin) { 1059 return spmd_ffa_error_return(handle, 1060 FFA_ERROR_NOT_SUPPORTED); 1061 } 1062 1063 return spmd_smc_forward(smc_fid, secure_origin, 1064 x1, x2, x3, x4, cookie, 1065 handle, flags); 1066 break; /* not reached */ 1067 1068 case FFA_NORMAL_WORLD_RESUME: 1069 if (secure_origin && ctx->secure_interrupt_ongoing) { 1070 spmd_spm_core_sync_exit(0ULL); 1071 } else { 1072 return spmd_ffa_error_return(handle, FFA_ERROR_DENIED); 1073 } 1074 break; /* Not reached */ 1075 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED 1076 case FFA_PARTITION_INFO_GET_REGS_SMC64: 1077 if (secure_origin) { 1078 /* TODO: Future patches to enable support for this */ 1079 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1080 } 1081 1082 /* Call only supported with SMCCC 1.2+ */ 1083 if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) { 1084 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1085 } 1086 1087 return spmd_smc_forward(smc_fid, secure_origin, 1088 x1, x2, x3, x4, cookie, 1089 handle, flags); 1090 break; /* Not reached */ 1091 #endif 1092 default: 1093 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 1094 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1095 } 1096 } 1097