1 /* 2 * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <string.h> 10 11 #include <arch_helpers.h> 12 #include <arch/aarch64/arch_features.h> 13 #include <bl31/bl31.h> 14 #include <common/debug.h> 15 #include <common/runtime_svc.h> 16 #include <lib/el3_runtime/context_mgmt.h> 17 #include <lib/smccc.h> 18 #include <lib/spinlock.h> 19 #include <lib/utils.h> 20 #include <plat/common/common_def.h> 21 #include <plat/common/platform.h> 22 #include <platform_def.h> 23 #include <services/ffa_svc.h> 24 #include <services/spmd_svc.h> 25 #include <smccc_helpers.h> 26 #include "spmd_private.h" 27 28 /******************************************************************************* 29 * SPM Core context information. 30 ******************************************************************************/ 31 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 32 33 /******************************************************************************* 34 * SPM Core attribute information read from its manifest. 35 ******************************************************************************/ 36 static spmc_manifest_attribute_t spmc_attrs; 37 38 /******************************************************************************* 39 * SPM Core entry point information. Discovered on the primary core and reused 40 * on secondary cores. 41 ******************************************************************************/ 42 static entry_point_info_t *spmc_ep_info; 43 44 /******************************************************************************* 45 * SPM Core context on current CPU get helper. 46 ******************************************************************************/ 47 spmd_spm_core_context_t *spmd_get_context(void) 48 { 49 unsigned int linear_id = plat_my_core_pos(); 50 51 return &spm_core_context[linear_id]; 52 } 53 54 /******************************************************************************* 55 * SPM Core entry point information get helper. 56 ******************************************************************************/ 57 entry_point_info_t *spmd_spmc_ep_info_get(void) 58 { 59 return spmc_ep_info; 60 } 61 62 /******************************************************************************* 63 * SPM Core ID getter. 64 ******************************************************************************/ 65 uint16_t spmd_spmc_id_get(void) 66 { 67 return spmc_attrs.spmc_id; 68 } 69 70 /******************************************************************************* 71 * Static function declaration. 72 ******************************************************************************/ 73 static int32_t spmd_init(void); 74 static int spmd_spmc_init(void *pm_addr); 75 static uint64_t spmd_ffa_error_return(void *handle, 76 int error_code); 77 static uint64_t spmd_smc_forward(uint32_t smc_fid, 78 bool secure_origin, 79 uint64_t x1, 80 uint64_t x2, 81 uint64_t x3, 82 uint64_t x4, 83 void *handle); 84 85 /******************************************************************************* 86 * This function takes an SPMC context pointer and performs a synchronous 87 * SPMC entry. 88 ******************************************************************************/ 89 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 90 { 91 uint64_t rc; 92 93 assert(spmc_ctx != NULL); 94 95 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 96 97 /* Restore the context assigned above */ 98 cm_el1_sysregs_context_restore(SECURE); 99 #if SPMD_SPM_AT_SEL2 100 cm_el2_sysregs_context_restore(SECURE); 101 #endif 102 cm_set_next_eret_context(SECURE); 103 104 /* Enter SPMC */ 105 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 106 107 /* Save secure state */ 108 cm_el1_sysregs_context_save(SECURE); 109 #if SPMD_SPM_AT_SEL2 110 cm_el2_sysregs_context_save(SECURE); 111 #endif 112 113 return rc; 114 } 115 116 /******************************************************************************* 117 * This function returns to the place where spmd_spm_core_sync_entry() was 118 * called originally. 119 ******************************************************************************/ 120 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 121 { 122 spmd_spm_core_context_t *ctx = spmd_get_context(); 123 124 /* Get current CPU context from SPMC context */ 125 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 126 127 /* 128 * The SPMD must have initiated the original request through a 129 * synchronous entry into SPMC. Jump back to the original C runtime 130 * context with the value of rc in x0; 131 */ 132 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 133 134 panic(); 135 } 136 137 /******************************************************************************* 138 * Jump to the SPM Core for the first time. 139 ******************************************************************************/ 140 static int32_t spmd_init(void) 141 { 142 spmd_spm_core_context_t *ctx = spmd_get_context(); 143 uint64_t rc; 144 unsigned int linear_id = plat_my_core_pos(); 145 unsigned int core_id; 146 147 VERBOSE("SPM Core init start.\n"); 148 ctx->state = SPMC_STATE_ON_PENDING; 149 150 /* Set the SPMC context state on other CPUs to OFF */ 151 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) { 152 if (core_id != linear_id) { 153 spm_core_context[core_id].state = SPMC_STATE_OFF; 154 } 155 } 156 157 rc = spmd_spm_core_sync_entry(ctx); 158 if (rc != 0ULL) { 159 ERROR("SPMC initialisation failed 0x%llx\n", rc); 160 return 0; 161 } 162 163 ctx->state = SPMC_STATE_ON; 164 165 VERBOSE("SPM Core init end.\n"); 166 167 return 1; 168 } 169 170 /******************************************************************************* 171 * Loads SPMC manifest and inits SPMC. 172 ******************************************************************************/ 173 static int spmd_spmc_init(void *pm_addr) 174 { 175 spmd_spm_core_context_t *spm_ctx = spmd_get_context(); 176 uint32_t ep_attr; 177 int rc; 178 179 /* Load the SPM Core manifest */ 180 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 181 if (rc != 0) { 182 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 183 return rc; 184 } 185 186 /* 187 * Ensure that the SPM Core version is compatible with the SPM 188 * Dispatcher version. 189 */ 190 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 191 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 192 WARN("Unsupported FFA version (%u.%u)\n", 193 spmc_attrs.major_version, spmc_attrs.minor_version); 194 return -EINVAL; 195 } 196 197 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 198 spmc_attrs.minor_version); 199 200 VERBOSE("SPM Core run time EL%x.\n", 201 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 202 203 /* Validate the SPMC ID, Ensure high bit is set */ 204 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 205 SPMC_SECURE_ID_MASK) == 0U) { 206 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 207 return -EINVAL; 208 } 209 210 /* Validate the SPM Core execution state */ 211 if ((spmc_attrs.exec_state != MODE_RW_64) && 212 (spmc_attrs.exec_state != MODE_RW_32)) { 213 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 214 spmc_attrs.exec_state); 215 return -EINVAL; 216 } 217 218 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 219 spmc_attrs.exec_state); 220 221 #if SPMD_SPM_AT_SEL2 222 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 223 if (spmc_attrs.exec_state == MODE_RW_32) { 224 WARN("AArch32 state at S-EL2 is not supported.\n"); 225 return -EINVAL; 226 } 227 228 /* 229 * Check if S-EL2 is supported on this system if S-EL2 230 * is required for SPM 231 */ 232 if (!is_armv8_4_sel2_present()) { 233 WARN("SPM Core run time S-EL2 is not supported.\n"); 234 return -EINVAL; 235 } 236 #endif /* SPMD_SPM_AT_SEL2 */ 237 238 /* Initialise an entrypoint to set up the CPU context */ 239 ep_attr = SECURE | EP_ST_ENABLE; 240 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 241 ep_attr |= EP_EE_BIG; 242 } 243 244 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 245 assert(spmc_ep_info->pc == BL32_BASE); 246 247 /* 248 * Populate SPSR for SPM Core based upon validated parameters from the 249 * manifest. 250 */ 251 if (spmc_attrs.exec_state == MODE_RW_32) { 252 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 253 SPSR_E_LITTLE, 254 DAIF_FIQ_BIT | 255 DAIF_IRQ_BIT | 256 DAIF_ABT_BIT); 257 } else { 258 259 #if SPMD_SPM_AT_SEL2 260 static const uint32_t runtime_el = MODE_EL2; 261 #else 262 static const uint32_t runtime_el = MODE_EL1; 263 #endif 264 spmc_ep_info->spsr = SPSR_64(runtime_el, 265 MODE_SP_ELX, 266 DISABLE_ALL_EXCEPTIONS); 267 } 268 269 /* Initialise SPM Core context with this entry point information */ 270 cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info); 271 272 /* Reuse PSCI affinity states to mark this SPMC context as off */ 273 spm_ctx->state = AFF_STATE_OFF; 274 275 INFO("SPM Core setup done.\n"); 276 277 /* Register power management hooks with PSCI */ 278 psci_register_spd_pm_hook(&spmd_pm); 279 280 /* Register init function for deferred init. */ 281 bl31_register_bl32_init(&spmd_init); 282 283 return 0; 284 } 285 286 /******************************************************************************* 287 * Initialize context of SPM Core. 288 ******************************************************************************/ 289 int spmd_setup(void) 290 { 291 void *spmc_manifest; 292 int rc; 293 294 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 295 if (spmc_ep_info == NULL) { 296 WARN("No SPM Core image provided by BL2 boot loader.\n"); 297 return -EINVAL; 298 } 299 300 /* Under no circumstances will this parameter be 0 */ 301 assert(spmc_ep_info->pc != 0ULL); 302 303 /* 304 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 305 * be used as a manifest for the SPM Core at the next lower EL/mode. 306 */ 307 spmc_manifest = (void *)spmc_ep_info->args.arg0; 308 if (spmc_manifest == NULL) { 309 ERROR("Invalid or absent SPM Core manifest.\n"); 310 return -EINVAL; 311 } 312 313 /* Load manifest, init SPMC */ 314 rc = spmd_spmc_init(spmc_manifest); 315 if (rc != 0) { 316 WARN("Booting device without SPM initialization.\n"); 317 } 318 319 return rc; 320 } 321 322 /******************************************************************************* 323 * Forward SMC to the other security state 324 ******************************************************************************/ 325 static uint64_t spmd_smc_forward(uint32_t smc_fid, 326 bool secure_origin, 327 uint64_t x1, 328 uint64_t x2, 329 uint64_t x3, 330 uint64_t x4, 331 void *handle) 332 { 333 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 334 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 335 336 /* Save incoming security state */ 337 cm_el1_sysregs_context_save(secure_state_in); 338 #if SPMD_SPM_AT_SEL2 339 cm_el2_sysregs_context_save(secure_state_in); 340 #endif 341 342 /* Restore outgoing security state */ 343 cm_el1_sysregs_context_restore(secure_state_out); 344 #if SPMD_SPM_AT_SEL2 345 cm_el2_sysregs_context_restore(secure_state_out); 346 #endif 347 cm_set_next_eret_context(secure_state_out); 348 349 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 350 SMC_GET_GP(handle, CTX_GPREG_X5), 351 SMC_GET_GP(handle, CTX_GPREG_X6), 352 SMC_GET_GP(handle, CTX_GPREG_X7)); 353 } 354 355 /******************************************************************************* 356 * Return FFA_ERROR with specified error code 357 ******************************************************************************/ 358 static uint64_t spmd_ffa_error_return(void *handle, int error_code) 359 { 360 SMC_RET8(handle, FFA_ERROR, 361 FFA_TARGET_INFO_MBZ, error_code, 362 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 363 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 364 } 365 366 /******************************************************************************* 367 * spmd_check_address_in_binary_image 368 ******************************************************************************/ 369 bool spmd_check_address_in_binary_image(uint64_t address) 370 { 371 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 372 373 return ((address >= spmc_attrs.load_address) && 374 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 375 } 376 377 /****************************************************************************** 378 * spmd_is_spmc_message 379 *****************************************************************************/ 380 static bool spmd_is_spmc_message(unsigned int ep) 381 { 382 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 383 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 384 } 385 386 /****************************************************************************** 387 * spmd_handle_spmc_message 388 *****************************************************************************/ 389 static int spmd_handle_spmc_message(unsigned long long msg, 390 unsigned long long parm1, unsigned long long parm2, 391 unsigned long long parm3, unsigned long long parm4) 392 { 393 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__, 394 msg, parm1, parm2, parm3, parm4); 395 396 switch (msg) { 397 case SPMD_DIRECT_MSG_SET_ENTRY_POINT: 398 return spmd_pm_secondary_core_set_ep(parm1, parm2, parm3); 399 default: 400 break; 401 } 402 403 return -EINVAL; 404 } 405 406 /******************************************************************************* 407 * This function handles all SMCs in the range reserved for FFA. Each call is 408 * either forwarded to the other security state or handled by the SPM dispatcher 409 ******************************************************************************/ 410 uint64_t spmd_smc_handler(uint32_t smc_fid, 411 uint64_t x1, 412 uint64_t x2, 413 uint64_t x3, 414 uint64_t x4, 415 void *cookie, 416 void *handle, 417 uint64_t flags) 418 { 419 spmd_spm_core_context_t *ctx = spmd_get_context(); 420 bool secure_origin; 421 int32_t ret; 422 uint32_t input_version; 423 424 /* Determine which security state this SMC originated from */ 425 secure_origin = is_caller_secure(flags); 426 427 INFO("SPM: 0x%x 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n", 428 smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5), 429 SMC_GET_GP(handle, CTX_GPREG_X6), 430 SMC_GET_GP(handle, CTX_GPREG_X7)); 431 432 switch (smc_fid) { 433 case FFA_ERROR: 434 /* 435 * Check if this is the first invocation of this interface on 436 * this CPU. If so, then indicate that the SPM Core initialised 437 * unsuccessfully. 438 */ 439 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 440 spmd_spm_core_sync_exit(x2); 441 } 442 443 return spmd_smc_forward(smc_fid, secure_origin, 444 x1, x2, x3, x4, handle); 445 break; /* not reached */ 446 447 case FFA_VERSION: 448 input_version = (uint32_t)(0xFFFFFFFF & x1); 449 /* 450 * If caller is secure and SPMC was initialized, 451 * return FFA_VERSION of SPMD. 452 * If caller is non secure and SPMC was initialized, 453 * return SPMC's version. 454 * Sanity check to "input_version". 455 */ 456 if ((input_version & FFA_VERSION_BIT31_MASK) || 457 (ctx->state == SPMC_STATE_RESET)) { 458 ret = FFA_ERROR_NOT_SUPPORTED; 459 } else if (!secure_origin) { 460 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, spmc_attrs.minor_version); 461 } else { 462 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 463 } 464 465 SMC_RET8(handle, ret, FFA_TARGET_INFO_MBZ, FFA_TARGET_INFO_MBZ, 466 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 467 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 468 break; /* not reached */ 469 470 case FFA_FEATURES: 471 /* 472 * This is an optional interface. Do the minimal checks and 473 * forward to SPM Core which will handle it if implemented. 474 */ 475 476 /* 477 * Check if x1 holds a valid FFA fid. This is an 478 * optimization. 479 */ 480 if (!is_ffa_fid(x1)) { 481 return spmd_ffa_error_return(handle, 482 FFA_ERROR_NOT_SUPPORTED); 483 } 484 485 /* Forward SMC from Normal world to the SPM Core */ 486 if (!secure_origin) { 487 return spmd_smc_forward(smc_fid, secure_origin, 488 x1, x2, x3, x4, handle); 489 } 490 491 /* 492 * Return success if call was from secure world i.e. all 493 * FFA functions are supported. This is essentially a 494 * nop. 495 */ 496 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 497 SMC_GET_GP(handle, CTX_GPREG_X5), 498 SMC_GET_GP(handle, CTX_GPREG_X6), 499 SMC_GET_GP(handle, CTX_GPREG_X7)); 500 501 break; /* not reached */ 502 503 case FFA_ID_GET: 504 /* 505 * Returns the ID of the calling FFA component. 506 */ 507 if (!secure_origin) { 508 SMC_RET8(handle, FFA_SUCCESS_SMC32, 509 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 510 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 511 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 512 FFA_PARAM_MBZ); 513 } 514 515 SMC_RET8(handle, FFA_SUCCESS_SMC32, 516 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 517 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 518 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 519 FFA_PARAM_MBZ); 520 521 break; /* not reached */ 522 523 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 524 if (secure_origin && spmd_is_spmc_message(x1)) { 525 ret = spmd_handle_spmc_message(x3, x4, 526 SMC_GET_GP(handle, CTX_GPREG_X5), 527 SMC_GET_GP(handle, CTX_GPREG_X6), 528 SMC_GET_GP(handle, CTX_GPREG_X7)); 529 530 SMC_RET8(handle, FFA_SUCCESS_SMC32, 531 FFA_TARGET_INFO_MBZ, ret, 532 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 533 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 534 FFA_PARAM_MBZ); 535 } else { 536 /* Forward direct message to the other world */ 537 return spmd_smc_forward(smc_fid, secure_origin, 538 x1, x2, x3, x4, handle); 539 } 540 break; /* Not reached */ 541 542 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 543 if (secure_origin && spmd_is_spmc_message(x1)) { 544 spmd_spm_core_sync_exit(0); 545 } else { 546 /* Forward direct message to the other world */ 547 return spmd_smc_forward(smc_fid, secure_origin, 548 x1, x2, x3, x4, handle); 549 } 550 break; /* Not reached */ 551 552 case FFA_RX_RELEASE: 553 case FFA_RXTX_MAP_SMC32: 554 case FFA_RXTX_MAP_SMC64: 555 case FFA_RXTX_UNMAP: 556 case FFA_MSG_RUN: 557 /* This interface must be invoked only by the Normal world */ 558 if (secure_origin) { 559 return spmd_ffa_error_return(handle, 560 FFA_ERROR_NOT_SUPPORTED); 561 } 562 563 /* Fall through to forward the call to the other world */ 564 565 case FFA_PARTITION_INFO_GET: 566 case FFA_MSG_SEND: 567 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 568 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 569 case FFA_MEM_DONATE_SMC32: 570 case FFA_MEM_DONATE_SMC64: 571 case FFA_MEM_LEND_SMC32: 572 case FFA_MEM_LEND_SMC64: 573 case FFA_MEM_SHARE_SMC32: 574 case FFA_MEM_SHARE_SMC64: 575 case FFA_MEM_RETRIEVE_REQ_SMC32: 576 case FFA_MEM_RETRIEVE_REQ_SMC64: 577 case FFA_MEM_RETRIEVE_RESP: 578 case FFA_MEM_RELINQUISH: 579 case FFA_MEM_RECLAIM: 580 case FFA_SUCCESS_SMC32: 581 case FFA_SUCCESS_SMC64: 582 /* 583 * TODO: Assume that no requests originate from EL3 at the 584 * moment. This will change if a SP service is required in 585 * response to secure interrupts targeted to EL3. Until then 586 * simply forward the call to the Normal world. 587 */ 588 589 return spmd_smc_forward(smc_fid, secure_origin, 590 x1, x2, x3, x4, handle); 591 break; /* not reached */ 592 593 case FFA_MSG_WAIT: 594 /* 595 * Check if this is the first invocation of this interface on 596 * this CPU from the Secure world. If so, then indicate that the 597 * SPM Core initialised successfully. 598 */ 599 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 600 spmd_spm_core_sync_exit(0); 601 } 602 603 /* Fall through to forward the call to the other world */ 604 605 case FFA_MSG_YIELD: 606 /* This interface must be invoked only by the Secure world */ 607 if (!secure_origin) { 608 return spmd_ffa_error_return(handle, 609 FFA_ERROR_NOT_SUPPORTED); 610 } 611 612 return spmd_smc_forward(smc_fid, secure_origin, 613 x1, x2, x3, x4, handle); 614 break; /* not reached */ 615 616 default: 617 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 618 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 619 } 620 } 621