1 /* 2 * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <string.h> 10 11 #include <arch_helpers.h> 12 #include <arch/aarch64/arch_features.h> 13 #include <bl31/bl31.h> 14 #include <common/debug.h> 15 #include <common/runtime_svc.h> 16 #include <lib/el3_runtime/context_mgmt.h> 17 #include <lib/smccc.h> 18 #include <lib/spinlock.h> 19 #include <lib/utils.h> 20 #include <plat/common/common_def.h> 21 #include <plat/common/platform.h> 22 #include <platform_def.h> 23 #include <services/ffa_svc.h> 24 #include <services/spmd_svc.h> 25 #include <smccc_helpers.h> 26 #include "spmd_private.h" 27 28 /******************************************************************************* 29 * SPM Core context information. 30 ******************************************************************************/ 31 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 32 33 /******************************************************************************* 34 * SPM Core attribute information read from its manifest. 35 ******************************************************************************/ 36 static spmc_manifest_attribute_t spmc_attrs; 37 38 /******************************************************************************* 39 * SPM Core entry point information. Discovered on the primary core and reused 40 * on secondary cores. 41 ******************************************************************************/ 42 static entry_point_info_t *spmc_ep_info; 43 44 /******************************************************************************* 45 * SPM Core context on CPU based on mpidr. 46 ******************************************************************************/ 47 spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr) 48 { 49 return &spm_core_context[plat_core_pos_by_mpidr(mpidr)]; 50 } 51 52 /******************************************************************************* 53 * SPM Core context on current CPU get helper. 54 ******************************************************************************/ 55 spmd_spm_core_context_t *spmd_get_context(void) 56 { 57 return spmd_get_context_by_mpidr(read_mpidr()); 58 } 59 60 /******************************************************************************* 61 * SPM Core entry point information get helper. 62 ******************************************************************************/ 63 entry_point_info_t *spmd_spmc_ep_info_get(void) 64 { 65 return spmc_ep_info; 66 } 67 68 /******************************************************************************* 69 * SPM Core ID getter. 70 ******************************************************************************/ 71 uint16_t spmd_spmc_id_get(void) 72 { 73 return spmc_attrs.spmc_id; 74 } 75 76 /******************************************************************************* 77 * Static function declaration. 78 ******************************************************************************/ 79 static int32_t spmd_init(void); 80 static int spmd_spmc_init(void *pm_addr); 81 static uint64_t spmd_ffa_error_return(void *handle, 82 int error_code); 83 static uint64_t spmd_smc_forward(uint32_t smc_fid, 84 bool secure_origin, 85 uint64_t x1, 86 uint64_t x2, 87 uint64_t x3, 88 uint64_t x4, 89 void *handle); 90 91 /******************************************************************************* 92 * This function takes an SPMC context pointer and performs a synchronous 93 * SPMC entry. 94 ******************************************************************************/ 95 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 96 { 97 uint64_t rc; 98 99 assert(spmc_ctx != NULL); 100 101 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 102 103 /* Restore the context assigned above */ 104 cm_el1_sysregs_context_restore(SECURE); 105 #if SPMD_SPM_AT_SEL2 106 cm_el2_sysregs_context_restore(SECURE); 107 #endif 108 cm_set_next_eret_context(SECURE); 109 110 /* Enter SPMC */ 111 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 112 113 /* Save secure state */ 114 cm_el1_sysregs_context_save(SECURE); 115 #if SPMD_SPM_AT_SEL2 116 cm_el2_sysregs_context_save(SECURE); 117 #endif 118 119 return rc; 120 } 121 122 /******************************************************************************* 123 * This function returns to the place where spmd_spm_core_sync_entry() was 124 * called originally. 125 ******************************************************************************/ 126 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 127 { 128 spmd_spm_core_context_t *ctx = spmd_get_context(); 129 130 /* Get current CPU context from SPMC context */ 131 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 132 133 /* 134 * The SPMD must have initiated the original request through a 135 * synchronous entry into SPMC. Jump back to the original C runtime 136 * context with the value of rc in x0; 137 */ 138 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 139 140 panic(); 141 } 142 143 /******************************************************************************* 144 * Jump to the SPM Core for the first time. 145 ******************************************************************************/ 146 static int32_t spmd_init(void) 147 { 148 spmd_spm_core_context_t *ctx = spmd_get_context(); 149 uint64_t rc; 150 unsigned int linear_id = plat_my_core_pos(); 151 unsigned int core_id; 152 153 VERBOSE("SPM Core init start.\n"); 154 ctx->state = SPMC_STATE_ON_PENDING; 155 156 /* Set the SPMC context state on other CPUs to OFF */ 157 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) { 158 if (core_id != linear_id) { 159 spm_core_context[core_id].state = SPMC_STATE_OFF; 160 spm_core_context[core_id].secondary_ep.entry_point = 0UL; 161 } 162 } 163 164 rc = spmd_spm_core_sync_entry(ctx); 165 if (rc != 0ULL) { 166 ERROR("SPMC initialisation failed 0x%llx\n", rc); 167 return 0; 168 } 169 170 ctx->state = SPMC_STATE_ON; 171 172 VERBOSE("SPM Core init end.\n"); 173 174 return 1; 175 } 176 177 /******************************************************************************* 178 * Loads SPMC manifest and inits SPMC. 179 ******************************************************************************/ 180 static int spmd_spmc_init(void *pm_addr) 181 { 182 spmd_spm_core_context_t *spm_ctx = spmd_get_context(); 183 uint32_t ep_attr; 184 int rc; 185 186 /* Load the SPM Core manifest */ 187 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 188 if (rc != 0) { 189 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 190 return rc; 191 } 192 193 /* 194 * Ensure that the SPM Core version is compatible with the SPM 195 * Dispatcher version. 196 */ 197 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 198 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 199 WARN("Unsupported FFA version (%u.%u)\n", 200 spmc_attrs.major_version, spmc_attrs.minor_version); 201 return -EINVAL; 202 } 203 204 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 205 spmc_attrs.minor_version); 206 207 VERBOSE("SPM Core run time EL%x.\n", 208 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 209 210 /* Validate the SPMC ID, Ensure high bit is set */ 211 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 212 SPMC_SECURE_ID_MASK) == 0U) { 213 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 214 return -EINVAL; 215 } 216 217 /* Validate the SPM Core execution state */ 218 if ((spmc_attrs.exec_state != MODE_RW_64) && 219 (spmc_attrs.exec_state != MODE_RW_32)) { 220 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 221 spmc_attrs.exec_state); 222 return -EINVAL; 223 } 224 225 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 226 spmc_attrs.exec_state); 227 228 #if SPMD_SPM_AT_SEL2 229 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 230 if (spmc_attrs.exec_state == MODE_RW_32) { 231 WARN("AArch32 state at S-EL2 is not supported.\n"); 232 return -EINVAL; 233 } 234 235 /* 236 * Check if S-EL2 is supported on this system if S-EL2 237 * is required for SPM 238 */ 239 if (!is_armv8_4_sel2_present()) { 240 WARN("SPM Core run time S-EL2 is not supported.\n"); 241 return -EINVAL; 242 } 243 #endif /* SPMD_SPM_AT_SEL2 */ 244 245 /* Initialise an entrypoint to set up the CPU context */ 246 ep_attr = SECURE | EP_ST_ENABLE; 247 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 248 ep_attr |= EP_EE_BIG; 249 } 250 251 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 252 assert(spmc_ep_info->pc == BL32_BASE); 253 254 /* 255 * Populate SPSR for SPM Core based upon validated parameters from the 256 * manifest. 257 */ 258 if (spmc_attrs.exec_state == MODE_RW_32) { 259 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 260 SPSR_E_LITTLE, 261 DAIF_FIQ_BIT | 262 DAIF_IRQ_BIT | 263 DAIF_ABT_BIT); 264 } else { 265 266 #if SPMD_SPM_AT_SEL2 267 static const uint32_t runtime_el = MODE_EL2; 268 #else 269 static const uint32_t runtime_el = MODE_EL1; 270 #endif 271 spmc_ep_info->spsr = SPSR_64(runtime_el, 272 MODE_SP_ELX, 273 DISABLE_ALL_EXCEPTIONS); 274 } 275 276 /* Initialise SPM Core context with this entry point information */ 277 cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info); 278 279 /* Reuse PSCI affinity states to mark this SPMC context as off */ 280 spm_ctx->state = AFF_STATE_OFF; 281 282 INFO("SPM Core setup done.\n"); 283 284 /* Register power management hooks with PSCI */ 285 psci_register_spd_pm_hook(&spmd_pm); 286 287 /* Register init function for deferred init. */ 288 bl31_register_bl32_init(&spmd_init); 289 290 return 0; 291 } 292 293 /******************************************************************************* 294 * Initialize context of SPM Core. 295 ******************************************************************************/ 296 int spmd_setup(void) 297 { 298 void *spmc_manifest; 299 int rc; 300 301 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 302 if (spmc_ep_info == NULL) { 303 WARN("No SPM Core image provided by BL2 boot loader.\n"); 304 return -EINVAL; 305 } 306 307 /* Under no circumstances will this parameter be 0 */ 308 assert(spmc_ep_info->pc != 0ULL); 309 310 /* 311 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 312 * be used as a manifest for the SPM Core at the next lower EL/mode. 313 */ 314 spmc_manifest = (void *)spmc_ep_info->args.arg0; 315 if (spmc_manifest == NULL) { 316 ERROR("Invalid or absent SPM Core manifest.\n"); 317 return -EINVAL; 318 } 319 320 /* Load manifest, init SPMC */ 321 rc = spmd_spmc_init(spmc_manifest); 322 if (rc != 0) { 323 WARN("Booting device without SPM initialization.\n"); 324 } 325 326 return rc; 327 } 328 329 /******************************************************************************* 330 * Forward SMC to the other security state 331 ******************************************************************************/ 332 static uint64_t spmd_smc_forward(uint32_t smc_fid, 333 bool secure_origin, 334 uint64_t x1, 335 uint64_t x2, 336 uint64_t x3, 337 uint64_t x4, 338 void *handle) 339 { 340 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 341 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 342 343 /* Save incoming security state */ 344 cm_el1_sysregs_context_save(secure_state_in); 345 #if SPMD_SPM_AT_SEL2 346 cm_el2_sysregs_context_save(secure_state_in); 347 #endif 348 349 /* Restore outgoing security state */ 350 cm_el1_sysregs_context_restore(secure_state_out); 351 #if SPMD_SPM_AT_SEL2 352 cm_el2_sysregs_context_restore(secure_state_out); 353 #endif 354 cm_set_next_eret_context(secure_state_out); 355 356 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 357 SMC_GET_GP(handle, CTX_GPREG_X5), 358 SMC_GET_GP(handle, CTX_GPREG_X6), 359 SMC_GET_GP(handle, CTX_GPREG_X7)); 360 } 361 362 /******************************************************************************* 363 * Return FFA_ERROR with specified error code 364 ******************************************************************************/ 365 static uint64_t spmd_ffa_error_return(void *handle, int error_code) 366 { 367 SMC_RET8(handle, FFA_ERROR, 368 FFA_TARGET_INFO_MBZ, error_code, 369 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 370 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 371 } 372 373 /******************************************************************************* 374 * spmd_check_address_in_binary_image 375 ******************************************************************************/ 376 bool spmd_check_address_in_binary_image(uint64_t address) 377 { 378 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 379 380 return ((address >= spmc_attrs.load_address) && 381 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 382 } 383 384 /****************************************************************************** 385 * spmd_is_spmc_message 386 *****************************************************************************/ 387 static bool spmd_is_spmc_message(unsigned int ep) 388 { 389 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 390 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 391 } 392 393 /****************************************************************************** 394 * spmd_handle_spmc_message 395 *****************************************************************************/ 396 static int spmd_handle_spmc_message(unsigned long long msg, 397 unsigned long long parm1, unsigned long long parm2, 398 unsigned long long parm3, unsigned long long parm4) 399 { 400 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__, 401 msg, parm1, parm2, parm3, parm4); 402 403 switch (msg) { 404 case SPMD_DIRECT_MSG_SET_ENTRY_POINT: 405 return spmd_pm_secondary_core_set_ep(parm1, parm2, parm3); 406 default: 407 break; 408 } 409 410 return -EINVAL; 411 } 412 413 /******************************************************************************* 414 * This function handles all SMCs in the range reserved for FFA. Each call is 415 * either forwarded to the other security state or handled by the SPM dispatcher 416 ******************************************************************************/ 417 uint64_t spmd_smc_handler(uint32_t smc_fid, 418 uint64_t x1, 419 uint64_t x2, 420 uint64_t x3, 421 uint64_t x4, 422 void *cookie, 423 void *handle, 424 uint64_t flags) 425 { 426 spmd_spm_core_context_t *ctx = spmd_get_context(); 427 bool secure_origin; 428 int32_t ret; 429 uint32_t input_version; 430 431 /* Determine which security state this SMC originated from */ 432 secure_origin = is_caller_secure(flags); 433 434 INFO("SPM: 0x%x 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n", 435 smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5), 436 SMC_GET_GP(handle, CTX_GPREG_X6), 437 SMC_GET_GP(handle, CTX_GPREG_X7)); 438 439 switch (smc_fid) { 440 case FFA_ERROR: 441 /* 442 * Check if this is the first invocation of this interface on 443 * this CPU. If so, then indicate that the SPM Core initialised 444 * unsuccessfully. 445 */ 446 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 447 spmd_spm_core_sync_exit(x2); 448 } 449 450 return spmd_smc_forward(smc_fid, secure_origin, 451 x1, x2, x3, x4, handle); 452 break; /* not reached */ 453 454 case FFA_VERSION: 455 input_version = (uint32_t)(0xFFFFFFFF & x1); 456 /* 457 * If caller is secure and SPMC was initialized, 458 * return FFA_VERSION of SPMD. 459 * If caller is non secure and SPMC was initialized, 460 * return SPMC's version. 461 * Sanity check to "input_version". 462 */ 463 if ((input_version & FFA_VERSION_BIT31_MASK) || 464 (ctx->state == SPMC_STATE_RESET)) { 465 ret = FFA_ERROR_NOT_SUPPORTED; 466 } else if (!secure_origin) { 467 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, spmc_attrs.minor_version); 468 } else { 469 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 470 } 471 472 SMC_RET8(handle, ret, FFA_TARGET_INFO_MBZ, FFA_TARGET_INFO_MBZ, 473 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 474 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 475 break; /* not reached */ 476 477 case FFA_FEATURES: 478 /* 479 * This is an optional interface. Do the minimal checks and 480 * forward to SPM Core which will handle it if implemented. 481 */ 482 483 /* 484 * Check if x1 holds a valid FFA fid. This is an 485 * optimization. 486 */ 487 if (!is_ffa_fid(x1)) { 488 return spmd_ffa_error_return(handle, 489 FFA_ERROR_NOT_SUPPORTED); 490 } 491 492 /* Forward SMC from Normal world to the SPM Core */ 493 if (!secure_origin) { 494 return spmd_smc_forward(smc_fid, secure_origin, 495 x1, x2, x3, x4, handle); 496 } 497 498 /* 499 * Return success if call was from secure world i.e. all 500 * FFA functions are supported. This is essentially a 501 * nop. 502 */ 503 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 504 SMC_GET_GP(handle, CTX_GPREG_X5), 505 SMC_GET_GP(handle, CTX_GPREG_X6), 506 SMC_GET_GP(handle, CTX_GPREG_X7)); 507 508 break; /* not reached */ 509 510 case FFA_ID_GET: 511 /* 512 * Returns the ID of the calling FFA component. 513 */ 514 if (!secure_origin) { 515 SMC_RET8(handle, FFA_SUCCESS_SMC32, 516 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 517 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 518 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 519 FFA_PARAM_MBZ); 520 } 521 522 SMC_RET8(handle, FFA_SUCCESS_SMC32, 523 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 524 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 525 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 526 FFA_PARAM_MBZ); 527 528 break; /* not reached */ 529 530 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 531 if (secure_origin && spmd_is_spmc_message(x1)) { 532 ret = spmd_handle_spmc_message(x3, x4, 533 SMC_GET_GP(handle, CTX_GPREG_X5), 534 SMC_GET_GP(handle, CTX_GPREG_X6), 535 SMC_GET_GP(handle, CTX_GPREG_X7)); 536 537 SMC_RET8(handle, FFA_SUCCESS_SMC32, 538 FFA_TARGET_INFO_MBZ, ret, 539 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 540 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 541 FFA_PARAM_MBZ); 542 } else { 543 /* Forward direct message to the other world */ 544 return spmd_smc_forward(smc_fid, secure_origin, 545 x1, x2, x3, x4, handle); 546 } 547 break; /* Not reached */ 548 549 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 550 if (secure_origin && spmd_is_spmc_message(x1)) { 551 spmd_spm_core_sync_exit(0); 552 } else { 553 /* Forward direct message to the other world */ 554 return spmd_smc_forward(smc_fid, secure_origin, 555 x1, x2, x3, x4, handle); 556 } 557 break; /* Not reached */ 558 559 case FFA_RX_RELEASE: 560 case FFA_RXTX_MAP_SMC32: 561 case FFA_RXTX_MAP_SMC64: 562 case FFA_RXTX_UNMAP: 563 case FFA_PARTITION_INFO_GET: 564 /* 565 * Should not be allowed to forward FFA_PARTITION_INFO_GET 566 * from Secure world to Normal world 567 * 568 * Fall through to forward the call to the other world 569 */ 570 case FFA_MSG_RUN: 571 /* This interface must be invoked only by the Normal world */ 572 573 if (secure_origin) { 574 return spmd_ffa_error_return(handle, 575 FFA_ERROR_NOT_SUPPORTED); 576 } 577 578 /* Fall through to forward the call to the other world */ 579 case FFA_MSG_SEND: 580 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 581 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 582 case FFA_MEM_DONATE_SMC32: 583 case FFA_MEM_DONATE_SMC64: 584 case FFA_MEM_LEND_SMC32: 585 case FFA_MEM_LEND_SMC64: 586 case FFA_MEM_SHARE_SMC32: 587 case FFA_MEM_SHARE_SMC64: 588 case FFA_MEM_RETRIEVE_REQ_SMC32: 589 case FFA_MEM_RETRIEVE_REQ_SMC64: 590 case FFA_MEM_RETRIEVE_RESP: 591 case FFA_MEM_RELINQUISH: 592 case FFA_MEM_RECLAIM: 593 case FFA_SUCCESS_SMC32: 594 case FFA_SUCCESS_SMC64: 595 /* 596 * TODO: Assume that no requests originate from EL3 at the 597 * moment. This will change if a SP service is required in 598 * response to secure interrupts targeted to EL3. Until then 599 * simply forward the call to the Normal world. 600 */ 601 602 return spmd_smc_forward(smc_fid, secure_origin, 603 x1, x2, x3, x4, handle); 604 break; /* not reached */ 605 606 case FFA_MSG_WAIT: 607 /* 608 * Check if this is the first invocation of this interface on 609 * this CPU from the Secure world. If so, then indicate that the 610 * SPM Core initialised successfully. 611 */ 612 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 613 spmd_spm_core_sync_exit(0); 614 } 615 616 /* Fall through to forward the call to the other world */ 617 618 case FFA_MSG_YIELD: 619 /* This interface must be invoked only by the Secure world */ 620 if (!secure_origin) { 621 return spmd_ffa_error_return(handle, 622 FFA_ERROR_NOT_SUPPORTED); 623 } 624 625 return spmd_smc_forward(smc_fid, secure_origin, 626 x1, x2, x3, x4, handle); 627 break; /* not reached */ 628 629 default: 630 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 631 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 632 } 633 } 634