1 /* 2 * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <string.h> 10 11 #include <arch_helpers.h> 12 #include <arch/aarch64/arch_features.h> 13 #include <bl31/bl31.h> 14 #include <common/debug.h> 15 #include <common/runtime_svc.h> 16 #include <lib/el3_runtime/context_mgmt.h> 17 #include <lib/smccc.h> 18 #include <lib/spinlock.h> 19 #include <lib/utils.h> 20 #include <plat/common/common_def.h> 21 #include <plat/common/platform.h> 22 #include <platform_def.h> 23 #include <services/ffa_svc.h> 24 #include <services/spmd_svc.h> 25 #include <smccc_helpers.h> 26 #include "spmd_private.h" 27 28 /******************************************************************************* 29 * SPM Core context information. 30 ******************************************************************************/ 31 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 32 33 /******************************************************************************* 34 * SPM Core attribute information read from its manifest. 35 ******************************************************************************/ 36 static spmc_manifest_attribute_t spmc_attrs; 37 38 /******************************************************************************* 39 * SPM Core entry point information. Discovered on the primary core and reused 40 * on secondary cores. 41 ******************************************************************************/ 42 static entry_point_info_t *spmc_ep_info; 43 44 /******************************************************************************* 45 * SPM Core context on current CPU get helper. 46 ******************************************************************************/ 47 spmd_spm_core_context_t *spmd_get_context(void) 48 { 49 unsigned int linear_id = plat_my_core_pos(); 50 51 return &spm_core_context[linear_id]; 52 } 53 54 /******************************************************************************* 55 * SPM Core entry point information get helper. 56 ******************************************************************************/ 57 entry_point_info_t *spmd_spmc_ep_info_get(void) 58 { 59 return spmc_ep_info; 60 } 61 62 /******************************************************************************* 63 * Static function declaration. 64 ******************************************************************************/ 65 static int32_t spmd_init(void); 66 static int spmd_spmc_init(void *pm_addr); 67 static uint64_t spmd_ffa_error_return(void *handle, 68 int error_code); 69 static uint64_t spmd_smc_forward(uint32_t smc_fid, 70 bool secure_origin, 71 uint64_t x1, 72 uint64_t x2, 73 uint64_t x3, 74 uint64_t x4, 75 void *handle); 76 77 /******************************************************************************* 78 * This function takes an SPMC context pointer and performs a synchronous 79 * SPMC entry. 80 ******************************************************************************/ 81 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 82 { 83 uint64_t rc; 84 85 assert(spmc_ctx != NULL); 86 87 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 88 89 /* Restore the context assigned above */ 90 cm_el1_sysregs_context_restore(SECURE); 91 #if SPMD_SPM_AT_SEL2 92 cm_el2_sysregs_context_restore(SECURE); 93 #endif 94 cm_set_next_eret_context(SECURE); 95 96 /* Enter SPMC */ 97 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 98 99 /* Save secure state */ 100 cm_el1_sysregs_context_save(SECURE); 101 #if SPMD_SPM_AT_SEL2 102 cm_el2_sysregs_context_save(SECURE); 103 #endif 104 105 return rc; 106 } 107 108 /******************************************************************************* 109 * This function returns to the place where spmd_spm_core_sync_entry() was 110 * called originally. 111 ******************************************************************************/ 112 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 113 { 114 spmd_spm_core_context_t *ctx = spmd_get_context(); 115 116 /* Get current CPU context from SPMC context */ 117 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 118 119 /* 120 * The SPMD must have initiated the original request through a 121 * synchronous entry into SPMC. Jump back to the original C runtime 122 * context with the value of rc in x0; 123 */ 124 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 125 126 panic(); 127 } 128 129 /******************************************************************************* 130 * Jump to the SPM Core for the first time. 131 ******************************************************************************/ 132 static int32_t spmd_init(void) 133 { 134 spmd_spm_core_context_t *ctx = spmd_get_context(); 135 uint64_t rc; 136 unsigned int linear_id = plat_my_core_pos(); 137 unsigned int core_id; 138 139 VERBOSE("SPM Core init start.\n"); 140 ctx->state = SPMC_STATE_ON_PENDING; 141 142 /* Set the SPMC context state on other CPUs to OFF */ 143 for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) { 144 if (core_id != linear_id) { 145 spm_core_context[core_id].state = SPMC_STATE_OFF; 146 } 147 } 148 149 rc = spmd_spm_core_sync_entry(ctx); 150 if (rc != 0ULL) { 151 ERROR("SPMC initialisation failed 0x%llx\n", rc); 152 return 0; 153 } 154 155 ctx->state = SPMC_STATE_ON; 156 157 VERBOSE("SPM Core init end.\n"); 158 159 return 1; 160 } 161 162 /******************************************************************************* 163 * Loads SPMC manifest and inits SPMC. 164 ******************************************************************************/ 165 static int spmd_spmc_init(void *pm_addr) 166 { 167 spmd_spm_core_context_t *spm_ctx = spmd_get_context(); 168 uint32_t ep_attr; 169 int rc; 170 171 /* Load the SPM Core manifest */ 172 rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr); 173 if (rc != 0) { 174 WARN("No or invalid SPM Core manifest image provided by BL2\n"); 175 return rc; 176 } 177 178 /* 179 * Ensure that the SPM Core version is compatible with the SPM 180 * Dispatcher version. 181 */ 182 if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) || 183 (spmc_attrs.minor_version > FFA_VERSION_MINOR)) { 184 WARN("Unsupported FFA version (%u.%u)\n", 185 spmc_attrs.major_version, spmc_attrs.minor_version); 186 return -EINVAL; 187 } 188 189 VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version, 190 spmc_attrs.minor_version); 191 192 VERBOSE("SPM Core run time EL%x.\n", 193 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 194 195 /* Validate the SPMC ID, Ensure high bit is set */ 196 if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) & 197 SPMC_SECURE_ID_MASK) == 0U) { 198 WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id); 199 return -EINVAL; 200 } 201 202 /* Validate the SPM Core execution state */ 203 if ((spmc_attrs.exec_state != MODE_RW_64) && 204 (spmc_attrs.exec_state != MODE_RW_32)) { 205 WARN("Unsupported %s%x.\n", "SPM Core execution state 0x", 206 spmc_attrs.exec_state); 207 return -EINVAL; 208 } 209 210 VERBOSE("%s%x.\n", "SPM Core execution state 0x", 211 spmc_attrs.exec_state); 212 213 #if SPMD_SPM_AT_SEL2 214 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 215 if (spmc_attrs.exec_state == MODE_RW_32) { 216 WARN("AArch32 state at S-EL2 is not supported.\n"); 217 return -EINVAL; 218 } 219 220 /* 221 * Check if S-EL2 is supported on this system if S-EL2 222 * is required for SPM 223 */ 224 if (!is_armv8_4_sel2_present()) { 225 WARN("SPM Core run time S-EL2 is not supported.\n"); 226 return -EINVAL; 227 } 228 #endif /* SPMD_SPM_AT_SEL2 */ 229 230 /* Initialise an entrypoint to set up the CPU context */ 231 ep_attr = SECURE | EP_ST_ENABLE; 232 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) { 233 ep_attr |= EP_EE_BIG; 234 } 235 236 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 237 assert(spmc_ep_info->pc == BL32_BASE); 238 239 /* 240 * Populate SPSR for SPM Core based upon validated parameters from the 241 * manifest. 242 */ 243 if (spmc_attrs.exec_state == MODE_RW_32) { 244 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 245 SPSR_E_LITTLE, 246 DAIF_FIQ_BIT | 247 DAIF_IRQ_BIT | 248 DAIF_ABT_BIT); 249 } else { 250 251 #if SPMD_SPM_AT_SEL2 252 static const uint32_t runtime_el = MODE_EL2; 253 #else 254 static const uint32_t runtime_el = MODE_EL1; 255 #endif 256 spmc_ep_info->spsr = SPSR_64(runtime_el, 257 MODE_SP_ELX, 258 DISABLE_ALL_EXCEPTIONS); 259 } 260 261 /* Initialise SPM Core context with this entry point information */ 262 cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info); 263 264 /* Reuse PSCI affinity states to mark this SPMC context as off */ 265 spm_ctx->state = AFF_STATE_OFF; 266 267 INFO("SPM Core setup done.\n"); 268 269 /* Register power management hooks with PSCI */ 270 psci_register_spd_pm_hook(&spmd_pm); 271 272 /* Register init function for deferred init. */ 273 bl31_register_bl32_init(&spmd_init); 274 275 return 0; 276 } 277 278 /******************************************************************************* 279 * Initialize context of SPM Core. 280 ******************************************************************************/ 281 int spmd_setup(void) 282 { 283 void *spmc_manifest; 284 int rc; 285 286 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 287 if (spmc_ep_info == NULL) { 288 WARN("No SPM Core image provided by BL2 boot loader.\n"); 289 return -EINVAL; 290 } 291 292 /* Under no circumstances will this parameter be 0 */ 293 assert(spmc_ep_info->pc != 0ULL); 294 295 /* 296 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 297 * be used as a manifest for the SPM Core at the next lower EL/mode. 298 */ 299 spmc_manifest = (void *)spmc_ep_info->args.arg0; 300 if (spmc_manifest == NULL) { 301 ERROR("Invalid or absent SPM Core manifest.\n"); 302 return -EINVAL; 303 } 304 305 /* Load manifest, init SPMC */ 306 rc = spmd_spmc_init(spmc_manifest); 307 if (rc != 0) { 308 WARN("Booting device without SPM initialization.\n"); 309 } 310 311 return rc; 312 } 313 314 /******************************************************************************* 315 * Forward SMC to the other security state 316 ******************************************************************************/ 317 static uint64_t spmd_smc_forward(uint32_t smc_fid, 318 bool secure_origin, 319 uint64_t x1, 320 uint64_t x2, 321 uint64_t x3, 322 uint64_t x4, 323 void *handle) 324 { 325 unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 326 unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 327 328 /* Save incoming security state */ 329 cm_el1_sysregs_context_save(secure_state_in); 330 #if SPMD_SPM_AT_SEL2 331 cm_el2_sysregs_context_save(secure_state_in); 332 #endif 333 334 /* Restore outgoing security state */ 335 cm_el1_sysregs_context_restore(secure_state_out); 336 #if SPMD_SPM_AT_SEL2 337 cm_el2_sysregs_context_restore(secure_state_out); 338 #endif 339 cm_set_next_eret_context(secure_state_out); 340 341 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 342 SMC_GET_GP(handle, CTX_GPREG_X5), 343 SMC_GET_GP(handle, CTX_GPREG_X6), 344 SMC_GET_GP(handle, CTX_GPREG_X7)); 345 } 346 347 /******************************************************************************* 348 * Return FFA_ERROR with specified error code 349 ******************************************************************************/ 350 static uint64_t spmd_ffa_error_return(void *handle, int error_code) 351 { 352 SMC_RET8(handle, FFA_ERROR, 353 FFA_TARGET_INFO_MBZ, error_code, 354 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 355 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 356 } 357 358 /******************************************************************************* 359 * spmd_check_address_in_binary_image 360 ******************************************************************************/ 361 bool spmd_check_address_in_binary_image(uint64_t address) 362 { 363 assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size)); 364 365 return ((address >= spmc_attrs.load_address) && 366 (address < (spmc_attrs.load_address + spmc_attrs.binary_size))); 367 } 368 369 /****************************************************************************** 370 * spmd_is_spmc_message 371 *****************************************************************************/ 372 static bool spmd_is_spmc_message(unsigned int ep) 373 { 374 return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID) 375 && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id)); 376 } 377 378 /****************************************************************************** 379 * spmd_handle_spmc_message 380 *****************************************************************************/ 381 static int32_t spmd_handle_spmc_message(uint64_t msg, uint64_t parm1, 382 uint64_t parm2, uint64_t parm3, 383 uint64_t parm4) 384 { 385 VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__, 386 msg, parm1, parm2, parm3, parm4); 387 388 switch (msg) { 389 case SPMD_DIRECT_MSG_SET_ENTRY_POINT: 390 return spmd_pm_secondary_core_set_ep(parm1, parm2, parm3); 391 default: 392 break; 393 } 394 395 return -EINVAL; 396 } 397 398 /******************************************************************************* 399 * This function handles all SMCs in the range reserved for FFA. Each call is 400 * either forwarded to the other security state or handled by the SPM dispatcher 401 ******************************************************************************/ 402 uint64_t spmd_smc_handler(uint32_t smc_fid, 403 uint64_t x1, 404 uint64_t x2, 405 uint64_t x3, 406 uint64_t x4, 407 void *cookie, 408 void *handle, 409 uint64_t flags) 410 { 411 spmd_spm_core_context_t *ctx = spmd_get_context(); 412 bool secure_origin; 413 int32_t ret; 414 uint32_t input_version; 415 416 /* Determine which security state this SMC originated from */ 417 secure_origin = is_caller_secure(flags); 418 419 INFO("SPM: 0x%x 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n", 420 smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5), 421 SMC_GET_GP(handle, CTX_GPREG_X6), 422 SMC_GET_GP(handle, CTX_GPREG_X7)); 423 424 switch (smc_fid) { 425 case FFA_ERROR: 426 /* 427 * Check if this is the first invocation of this interface on 428 * this CPU. If so, then indicate that the SPM Core initialised 429 * unsuccessfully. 430 */ 431 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 432 spmd_spm_core_sync_exit(x2); 433 } 434 435 return spmd_smc_forward(smc_fid, secure_origin, 436 x1, x2, x3, x4, handle); 437 break; /* not reached */ 438 439 case FFA_VERSION: 440 input_version = (uint32_t)(0xFFFFFFFF & x1); 441 /* 442 * If caller is secure and SPMC was initialized, 443 * return FFA_VERSION of SPMD. 444 * If caller is non secure and SPMC was initialized, 445 * return SPMC's version. 446 * Sanity check to "input_version". 447 */ 448 if ((input_version & FFA_VERSION_BIT31_MASK) || 449 (ctx->state == SPMC_STATE_RESET)) { 450 ret = FFA_ERROR_NOT_SUPPORTED; 451 } else if (!secure_origin) { 452 ret = MAKE_FFA_VERSION(spmc_attrs.major_version, spmc_attrs.minor_version); 453 } else { 454 ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 455 } 456 457 SMC_RET8(handle, ret, FFA_TARGET_INFO_MBZ, FFA_TARGET_INFO_MBZ, 458 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 459 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 460 break; /* not reached */ 461 462 case FFA_FEATURES: 463 /* 464 * This is an optional interface. Do the minimal checks and 465 * forward to SPM Core which will handle it if implemented. 466 */ 467 468 /* 469 * Check if x1 holds a valid FFA fid. This is an 470 * optimization. 471 */ 472 if (!is_ffa_fid(x1)) { 473 return spmd_ffa_error_return(handle, 474 FFA_ERROR_NOT_SUPPORTED); 475 } 476 477 /* Forward SMC from Normal world to the SPM Core */ 478 if (!secure_origin) { 479 return spmd_smc_forward(smc_fid, secure_origin, 480 x1, x2, x3, x4, handle); 481 } 482 483 /* 484 * Return success if call was from secure world i.e. all 485 * FFA functions are supported. This is essentially a 486 * nop. 487 */ 488 SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4, 489 SMC_GET_GP(handle, CTX_GPREG_X5), 490 SMC_GET_GP(handle, CTX_GPREG_X6), 491 SMC_GET_GP(handle, CTX_GPREG_X7)); 492 493 break; /* not reached */ 494 495 case FFA_ID_GET: 496 /* 497 * Returns the ID of the calling FFA component. 498 */ 499 if (!secure_origin) { 500 SMC_RET8(handle, FFA_SUCCESS_SMC32, 501 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID, 502 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 503 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 504 FFA_PARAM_MBZ); 505 } 506 507 SMC_RET8(handle, FFA_SUCCESS_SMC32, 508 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id, 509 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 510 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 511 FFA_PARAM_MBZ); 512 513 break; /* not reached */ 514 515 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 516 if (secure_origin && spmd_is_spmc_message(x1)) { 517 ret = spmd_handle_spmc_message(x3, x4, 518 SMC_GET_GP(handle, CTX_GPREG_X5), 519 SMC_GET_GP(handle, CTX_GPREG_X6), 520 SMC_GET_GP(handle, CTX_GPREG_X7)); 521 522 SMC_RET8(handle, FFA_SUCCESS_SMC32, 523 FFA_TARGET_INFO_MBZ, ret, 524 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 525 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 526 FFA_PARAM_MBZ); 527 } else { 528 /* Forward direct message to the other world */ 529 return spmd_smc_forward(smc_fid, secure_origin, 530 x1, x2, x3, x4, handle); 531 } 532 break; /* Not reached */ 533 534 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 535 if (secure_origin && spmd_is_spmc_message(x1)) { 536 spmd_spm_core_sync_exit(0); 537 } else { 538 /* Forward direct message to the other world */ 539 return spmd_smc_forward(smc_fid, secure_origin, 540 x1, x2, x3, x4, handle); 541 } 542 break; /* Not reached */ 543 544 case FFA_RX_RELEASE: 545 case FFA_RXTX_MAP_SMC32: 546 case FFA_RXTX_MAP_SMC64: 547 case FFA_RXTX_UNMAP: 548 case FFA_MSG_RUN: 549 /* This interface must be invoked only by the Normal world */ 550 if (secure_origin) { 551 return spmd_ffa_error_return(handle, 552 FFA_ERROR_NOT_SUPPORTED); 553 } 554 555 /* Fall through to forward the call to the other world */ 556 557 case FFA_PARTITION_INFO_GET: 558 case FFA_MSG_SEND: 559 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 560 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 561 case FFA_MEM_DONATE_SMC32: 562 case FFA_MEM_DONATE_SMC64: 563 case FFA_MEM_LEND_SMC32: 564 case FFA_MEM_LEND_SMC64: 565 case FFA_MEM_SHARE_SMC32: 566 case FFA_MEM_SHARE_SMC64: 567 case FFA_MEM_RETRIEVE_REQ_SMC32: 568 case FFA_MEM_RETRIEVE_REQ_SMC64: 569 case FFA_MEM_RETRIEVE_RESP: 570 case FFA_MEM_RELINQUISH: 571 case FFA_MEM_RECLAIM: 572 case FFA_SUCCESS_SMC32: 573 case FFA_SUCCESS_SMC64: 574 /* 575 * TODO: Assume that no requests originate from EL3 at the 576 * moment. This will change if a SP service is required in 577 * response to secure interrupts targeted to EL3. Until then 578 * simply forward the call to the Normal world. 579 */ 580 581 return spmd_smc_forward(smc_fid, secure_origin, 582 x1, x2, x3, x4, handle); 583 break; /* not reached */ 584 585 case FFA_MSG_WAIT: 586 /* 587 * Check if this is the first invocation of this interface on 588 * this CPU from the Secure world. If so, then indicate that the 589 * SPM Core initialised successfully. 590 */ 591 if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) { 592 spmd_spm_core_sync_exit(0); 593 } 594 595 /* Fall through to forward the call to the other world */ 596 597 case FFA_MSG_YIELD: 598 /* This interface must be invoked only by the Secure world */ 599 if (!secure_origin) { 600 return spmd_ffa_error_return(handle, 601 FFA_ERROR_NOT_SUPPORTED); 602 } 603 604 return spmd_smc_forward(smc_fid, secure_origin, 605 x1, x2, x3, x4, handle); 606 break; /* not reached */ 607 608 default: 609 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 610 return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 611 } 612 } 613