1 /* 2 * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 10 #include <arch_helpers.h> 11 #include <bl31/bl31.h> 12 #include <bl31/ehf.h> 13 #include <common/debug.h> 14 #include <common/fdt_wrappers.h> 15 #include <common/runtime_svc.h> 16 #include <common/uuid.h> 17 #include <lib/el3_runtime/context_mgmt.h> 18 #include <lib/smccc.h> 19 #include <lib/utils.h> 20 #include <lib/xlat_tables/xlat_tables_v2.h> 21 #include <libfdt.h> 22 #include <plat/common/platform.h> 23 #include <services/el3_spmc_logical_sp.h> 24 #include <services/ffa_svc.h> 25 #include <services/spmc_svc.h> 26 #include <services/spmd_svc.h> 27 #include "spmc.h" 28 29 #include <platform_def.h> 30 31 /* Declare the maximum number of SPs and El3 LPs. */ 32 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT 33 34 /* 35 * Allocate a secure partition descriptor to describe each SP in the system that 36 * does not reside at EL3. 37 */ 38 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT]; 39 40 /* 41 * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in 42 * the system that interacts with a SP. It is used to track the Hypervisor 43 * buffer pair, version and ID for now. It could be extended to track VM 44 * properties when the SPMC supports indirect messaging. 45 */ 46 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT]; 47 48 /* 49 * Helper function to obtain the array storing the EL3 50 * Logical Partition descriptors. 51 */ 52 struct el3_lp_desc *get_el3_lp_array(void) 53 { 54 return (struct el3_lp_desc *) EL3_LP_DESCS_START; 55 } 56 57 /* 58 * Helper function to obtain the descriptor of the last SP to whom control was 59 * handed to on this physical cpu. Currently, we assume there is only one SP. 60 * TODO: Expand to track multiple partitions when required. 61 */ 62 struct secure_partition_desc *spmc_get_current_sp_ctx(void) 63 { 64 return &(sp_desc[ACTIVE_SP_DESC_INDEX]); 65 } 66 67 /* 68 * Helper function to obtain the execution context of an SP on the 69 * current physical cpu. 70 */ 71 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp) 72 { 73 return &(sp->ec[get_ec_index(sp)]); 74 } 75 76 /* Helper function to get pointer to SP context from its ID. */ 77 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id) 78 { 79 /* Check for Secure World Partitions. */ 80 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) { 81 if (sp_desc[i].sp_id == id) { 82 return &(sp_desc[i]); 83 } 84 } 85 return NULL; 86 } 87 88 /* 89 * Helper function to obtain the descriptor of the Hypervisor or OS kernel. 90 * We assume that the first descriptor is reserved for this entity. 91 */ 92 struct ns_endpoint_desc *spmc_get_hyp_ctx(void) 93 { 94 return &(ns_ep_desc[0]); 95 } 96 97 /* 98 * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor 99 * or OS kernel in the normal world or the last SP that was run. 100 */ 101 struct mailbox *spmc_get_mbox_desc(bool secure_origin) 102 { 103 /* Obtain the RX/TX buffer pair descriptor. */ 104 if (secure_origin) { 105 return &(spmc_get_current_sp_ctx()->mailbox); 106 } else { 107 return &(spmc_get_hyp_ctx()->mailbox); 108 } 109 } 110 111 /****************************************************************************** 112 * This function returns to the place where spmc_sp_synchronous_entry() was 113 * called originally. 114 ******************************************************************************/ 115 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc) 116 { 117 /* 118 * The SPM must have initiated the original request through a 119 * synchronous entry into the secure partition. Jump back to the 120 * original C runtime context with the value of rc in x0; 121 */ 122 spm_secure_partition_exit(ec->c_rt_ctx, rc); 123 124 panic(); 125 } 126 127 /******************************************************************************* 128 * Return FFA_ERROR with specified error code. 129 ******************************************************************************/ 130 uint64_t spmc_ffa_error_return(void *handle, int error_code) 131 { 132 SMC_RET8(handle, FFA_ERROR, 133 FFA_TARGET_INFO_MBZ, error_code, 134 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 135 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 136 } 137 138 /****************************************************************************** 139 * Helper function to validate a secure partition ID to ensure it does not 140 * conflict with any other FF-A component and follows the convention to 141 * indicate it resides within the secure world. 142 ******************************************************************************/ 143 bool is_ffa_secure_id_valid(uint16_t partition_id) 144 { 145 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 146 147 /* Ensure the ID is not the invalid partition ID. */ 148 if (partition_id == INV_SP_ID) { 149 return false; 150 } 151 152 /* Ensure the ID is not the SPMD ID. */ 153 if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) { 154 return false; 155 } 156 157 /* 158 * Ensure the ID follows the convention to indicate it resides 159 * in the secure world. 160 */ 161 if (!ffa_is_secure_world_id(partition_id)) { 162 return false; 163 } 164 165 /* Ensure we don't conflict with the SPMC partition ID. */ 166 if (partition_id == FFA_SPMC_ID) { 167 return false; 168 } 169 170 /* Ensure we do not already have an SP context with this ID. */ 171 if (spmc_get_sp_ctx(partition_id)) { 172 return false; 173 } 174 175 /* Ensure we don't clash with any Logical SP's. */ 176 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) { 177 if (el3_lp_descs[i].sp_id == partition_id) { 178 return false; 179 } 180 } 181 182 return true; 183 } 184 185 /******************************************************************************* 186 * This function either forwards the request to the other world or returns 187 * with an ERET depending on the source of the call. 188 * We can assume that the destination is for an entity at a lower exception 189 * level as any messages destined for a logical SP resident in EL3 will have 190 * already been taken care of by the SPMC before entering this function. 191 ******************************************************************************/ 192 static uint64_t spmc_smc_return(uint32_t smc_fid, 193 bool secure_origin, 194 uint64_t x1, 195 uint64_t x2, 196 uint64_t x3, 197 uint64_t x4, 198 void *handle, 199 void *cookie, 200 uint64_t flags, 201 uint16_t dst_id) 202 { 203 /* If the destination is in the normal world always go via the SPMD. */ 204 if (ffa_is_normal_world_id(dst_id)) { 205 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, 206 cookie, handle, flags); 207 } 208 /* 209 * If the caller is secure and we want to return to the secure world, 210 * ERET directly. 211 */ 212 else if (secure_origin && ffa_is_secure_world_id(dst_id)) { 213 SMC_RET5(handle, smc_fid, x1, x2, x3, x4); 214 } 215 /* If we originated in the normal world then switch contexts. */ 216 else if (!secure_origin && ffa_is_secure_world_id(dst_id)) { 217 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, 218 x3, x4, handle); 219 } else { 220 /* Unknown State. */ 221 panic(); 222 } 223 224 /* Shouldn't be Reached. */ 225 return 0; 226 } 227 228 /******************************************************************************* 229 * FF-A ABI Handlers. 230 ******************************************************************************/ 231 232 /******************************************************************************* 233 * Helper function to validate arg2 as part of a direct message. 234 ******************************************************************************/ 235 static inline bool direct_msg_validate_arg2(uint64_t x2) 236 { 237 /* 238 * We currently only support partition messages, therefore ensure x2 is 239 * not set. 240 */ 241 if (x2 != (uint64_t) 0) { 242 VERBOSE("Arg2 MBZ for partition messages (0x%lx).\n", x2); 243 return false; 244 } 245 return true; 246 } 247 248 /******************************************************************************* 249 * Handle direct request messages and route to the appropriate destination. 250 ******************************************************************************/ 251 static uint64_t direct_req_smc_handler(uint32_t smc_fid, 252 bool secure_origin, 253 uint64_t x1, 254 uint64_t x2, 255 uint64_t x3, 256 uint64_t x4, 257 void *cookie, 258 void *handle, 259 uint64_t flags) 260 { 261 uint16_t dst_id = ffa_endpoint_destination(x1); 262 struct el3_lp_desc *el3_lp_descs; 263 struct secure_partition_desc *sp; 264 unsigned int idx; 265 266 /* Check if arg2 has been populated correctly based on message type. */ 267 if (!direct_msg_validate_arg2(x2)) { 268 return spmc_ffa_error_return(handle, 269 FFA_ERROR_INVALID_PARAMETER); 270 } 271 272 el3_lp_descs = get_el3_lp_array(); 273 274 /* Check if the request is destined for a Logical Partition. */ 275 for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) { 276 if (el3_lp_descs[i].sp_id == dst_id) { 277 return el3_lp_descs[i].direct_req( 278 smc_fid, secure_origin, x1, x2, x3, x4, 279 cookie, handle, flags); 280 } 281 } 282 283 /* 284 * If the request was not targeted to a LSP and from the secure world 285 * then it is invalid since a SP cannot call into the Normal world and 286 * there is no other SP to call into. If there are other SPs in future 287 * then the partition runtime model would need to be validated as well. 288 */ 289 if (secure_origin) { 290 VERBOSE("Direct request not supported to the Normal World.\n"); 291 return spmc_ffa_error_return(handle, 292 FFA_ERROR_INVALID_PARAMETER); 293 } 294 295 /* Check if the SP ID is valid. */ 296 sp = spmc_get_sp_ctx(dst_id); 297 if (sp == NULL) { 298 VERBOSE("Direct request to unknown partition ID (0x%x).\n", 299 dst_id); 300 return spmc_ffa_error_return(handle, 301 FFA_ERROR_INVALID_PARAMETER); 302 } 303 304 /* 305 * Check that the target execution context is in a waiting state before 306 * forwarding the direct request to it. 307 */ 308 idx = get_ec_index(sp); 309 if (sp->ec[idx].rt_state != RT_STATE_WAITING) { 310 VERBOSE("SP context on core%u is not waiting (%u).\n", 311 idx, sp->ec[idx].rt_model); 312 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY); 313 } 314 315 /* 316 * Everything checks out so forward the request to the SP after updating 317 * its state and runtime model. 318 */ 319 sp->ec[idx].rt_state = RT_STATE_RUNNING; 320 sp->ec[idx].rt_model = RT_MODEL_DIR_REQ; 321 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 322 handle, cookie, flags, dst_id); 323 } 324 325 /******************************************************************************* 326 * Handle direct response messages and route to the appropriate destination. 327 ******************************************************************************/ 328 static uint64_t direct_resp_smc_handler(uint32_t smc_fid, 329 bool secure_origin, 330 uint64_t x1, 331 uint64_t x2, 332 uint64_t x3, 333 uint64_t x4, 334 void *cookie, 335 void *handle, 336 uint64_t flags) 337 { 338 uint16_t dst_id = ffa_endpoint_destination(x1); 339 struct secure_partition_desc *sp; 340 unsigned int idx; 341 342 /* Check if arg2 has been populated correctly based on message type. */ 343 if (!direct_msg_validate_arg2(x2)) { 344 return spmc_ffa_error_return(handle, 345 FFA_ERROR_INVALID_PARAMETER); 346 } 347 348 /* Check that the response did not originate from the Normal world. */ 349 if (!secure_origin) { 350 VERBOSE("Direct Response not supported from Normal World.\n"); 351 return spmc_ffa_error_return(handle, 352 FFA_ERROR_INVALID_PARAMETER); 353 } 354 355 /* 356 * Check that the response is either targeted to the Normal world or the 357 * SPMC e.g. a PM response. 358 */ 359 if ((dst_id != FFA_SPMC_ID) && ffa_is_secure_world_id(dst_id)) { 360 VERBOSE("Direct response to invalid partition ID (0x%x).\n", 361 dst_id); 362 return spmc_ffa_error_return(handle, 363 FFA_ERROR_INVALID_PARAMETER); 364 } 365 366 /* Obtain the SP descriptor and update its runtime state. */ 367 sp = spmc_get_sp_ctx(ffa_endpoint_source(x1)); 368 if (sp == NULL) { 369 VERBOSE("Direct response to unknown partition ID (0x%x).\n", 370 dst_id); 371 return spmc_ffa_error_return(handle, 372 FFA_ERROR_INVALID_PARAMETER); 373 } 374 375 /* Sanity check state is being tracked correctly in the SPMC. */ 376 idx = get_ec_index(sp); 377 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING); 378 379 /* Ensure SP execution context was in the right runtime model. */ 380 if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) { 381 VERBOSE("SP context on core%u not handling direct req (%u).\n", 382 idx, sp->ec[idx].rt_model); 383 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 384 } 385 386 /* Update the state of the SP execution context. */ 387 sp->ec[idx].rt_state = RT_STATE_WAITING; 388 389 /* 390 * If the receiver is not the SPMC then forward the response to the 391 * Normal world. 392 */ 393 if (dst_id == FFA_SPMC_ID) { 394 spmc_sp_synchronous_exit(&sp->ec[idx], x4); 395 /* Should not get here. */ 396 panic(); 397 } 398 399 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 400 handle, cookie, flags, dst_id); 401 } 402 403 /******************************************************************************* 404 * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its 405 * cycles. 406 ******************************************************************************/ 407 static uint64_t msg_wait_handler(uint32_t smc_fid, 408 bool secure_origin, 409 uint64_t x1, 410 uint64_t x2, 411 uint64_t x3, 412 uint64_t x4, 413 void *cookie, 414 void *handle, 415 uint64_t flags) 416 { 417 struct secure_partition_desc *sp; 418 unsigned int idx; 419 420 /* 421 * Check that the response did not originate from the Normal world as 422 * only the secure world can call this ABI. 423 */ 424 if (!secure_origin) { 425 VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n"); 426 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 427 } 428 429 /* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */ 430 sp = spmc_get_current_sp_ctx(); 431 if (sp == NULL) { 432 return spmc_ffa_error_return(handle, 433 FFA_ERROR_INVALID_PARAMETER); 434 } 435 436 /* 437 * Get the execution context of the SP that invoked FFA_MSG_WAIT. 438 */ 439 idx = get_ec_index(sp); 440 441 /* Ensure SP execution context was in the right runtime model. */ 442 if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) { 443 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 444 } 445 446 /* Sanity check the state is being tracked correctly in the SPMC. */ 447 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING); 448 449 /* 450 * Perform a synchronous exit if the partition was initialising. The 451 * state is updated after the exit. 452 */ 453 if (sp->ec[idx].rt_model == RT_MODEL_INIT) { 454 spmc_sp_synchronous_exit(&sp->ec[idx], x4); 455 /* Should not get here */ 456 panic(); 457 } 458 459 /* Update the state of the SP execution context. */ 460 sp->ec[idx].rt_state = RT_STATE_WAITING; 461 462 /* Resume normal world if a secure interrupt was handled. */ 463 if (sp->ec[idx].rt_model == RT_MODEL_INTR) { 464 /* FFA_MSG_WAIT can only be called from the secure world. */ 465 unsigned int secure_state_in = SECURE; 466 unsigned int secure_state_out = NON_SECURE; 467 468 cm_el1_sysregs_context_save(secure_state_in); 469 cm_el1_sysregs_context_restore(secure_state_out); 470 cm_set_next_eret_context(secure_state_out); 471 SMC_RET0(cm_get_context(secure_state_out)); 472 } 473 474 /* Forward the response to the Normal world. */ 475 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 476 handle, cookie, flags, FFA_NWD_ID); 477 } 478 479 static uint64_t ffa_error_handler(uint32_t smc_fid, 480 bool secure_origin, 481 uint64_t x1, 482 uint64_t x2, 483 uint64_t x3, 484 uint64_t x4, 485 void *cookie, 486 void *handle, 487 uint64_t flags) 488 { 489 struct secure_partition_desc *sp; 490 unsigned int idx; 491 492 /* Check that the response did not originate from the Normal world. */ 493 if (!secure_origin) { 494 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 495 } 496 497 /* Get the descriptor of the SP that invoked FFA_ERROR. */ 498 sp = spmc_get_current_sp_ctx(); 499 if (sp == NULL) { 500 return spmc_ffa_error_return(handle, 501 FFA_ERROR_INVALID_PARAMETER); 502 } 503 504 /* Get the execution context of the SP that invoked FFA_ERROR. */ 505 idx = get_ec_index(sp); 506 507 /* 508 * We only expect FFA_ERROR to be received during SP initialisation 509 * otherwise this is an invalid call. 510 */ 511 if (sp->ec[idx].rt_model == RT_MODEL_INIT) { 512 ERROR("SP 0x%x failed to initialize.\n", sp->sp_id); 513 spmc_sp_synchronous_exit(&sp->ec[idx], x2); 514 /* Should not get here. */ 515 panic(); 516 } 517 518 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 519 } 520 521 static uint64_t ffa_version_handler(uint32_t smc_fid, 522 bool secure_origin, 523 uint64_t x1, 524 uint64_t x2, 525 uint64_t x3, 526 uint64_t x4, 527 void *cookie, 528 void *handle, 529 uint64_t flags) 530 { 531 uint32_t requested_version = x1 & FFA_VERSION_MASK; 532 533 if (requested_version & FFA_VERSION_BIT31_MASK) { 534 /* Invalid encoding, return an error. */ 535 SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED); 536 /* Execution stops here. */ 537 } 538 539 /* Determine the caller to store the requested version. */ 540 if (secure_origin) { 541 /* 542 * Ensure that the SP is reporting the same version as 543 * specified in its manifest. If these do not match there is 544 * something wrong with the SP. 545 * TODO: Should we abort the SP? For now assert this is not 546 * case. 547 */ 548 assert(requested_version == 549 spmc_get_current_sp_ctx()->ffa_version); 550 } else { 551 /* 552 * If this is called by the normal world, record this 553 * information in its descriptor. 554 */ 555 spmc_get_hyp_ctx()->ffa_version = requested_version; 556 } 557 558 SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 559 FFA_VERSION_MINOR)); 560 } 561 562 /******************************************************************************* 563 * Helper function to obtain the FF-A version of the calling partition. 564 ******************************************************************************/ 565 uint32_t get_partition_ffa_version(bool secure_origin) 566 { 567 if (secure_origin) { 568 return spmc_get_current_sp_ctx()->ffa_version; 569 } else { 570 return spmc_get_hyp_ctx()->ffa_version; 571 } 572 } 573 574 static uint64_t rxtx_map_handler(uint32_t smc_fid, 575 bool secure_origin, 576 uint64_t x1, 577 uint64_t x2, 578 uint64_t x3, 579 uint64_t x4, 580 void *cookie, 581 void *handle, 582 uint64_t flags) 583 { 584 int ret; 585 uint32_t error_code; 586 uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS; 587 struct mailbox *mbox; 588 uintptr_t tx_address = x1; 589 uintptr_t rx_address = x2; 590 uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */ 591 uint32_t buf_size = page_count * FFA_PAGE_SIZE; 592 593 /* 594 * The SPMC does not support mapping of VM RX/TX pairs to facilitate 595 * indirect messaging with SPs. Check if the Hypervisor has invoked this 596 * ABI on behalf of a VM and reject it if this is the case. 597 */ 598 if (tx_address == 0 || rx_address == 0) { 599 WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n"); 600 return spmc_ffa_error_return(handle, 601 FFA_ERROR_INVALID_PARAMETER); 602 } 603 604 /* Ensure the specified buffers are not the same. */ 605 if (tx_address == rx_address) { 606 WARN("TX Buffer must not be the same as RX Buffer.\n"); 607 return spmc_ffa_error_return(handle, 608 FFA_ERROR_INVALID_PARAMETER); 609 } 610 611 /* Ensure the buffer size is not 0. */ 612 if (buf_size == 0U) { 613 WARN("Buffer size must not be 0\n"); 614 return spmc_ffa_error_return(handle, 615 FFA_ERROR_INVALID_PARAMETER); 616 } 617 618 /* 619 * Ensure the buffer size is a multiple of the translation granule size 620 * in TF-A. 621 */ 622 if (buf_size % PAGE_SIZE != 0U) { 623 WARN("Buffer size must be aligned to translation granule.\n"); 624 return spmc_ffa_error_return(handle, 625 FFA_ERROR_INVALID_PARAMETER); 626 } 627 628 /* Obtain the RX/TX buffer pair descriptor. */ 629 mbox = spmc_get_mbox_desc(secure_origin); 630 631 spin_lock(&mbox->lock); 632 633 /* Check if buffers have already been mapped. */ 634 if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) { 635 WARN("RX/TX Buffers already mapped (%p/%p)\n", 636 (void *) mbox->rx_buffer, (void *)mbox->tx_buffer); 637 error_code = FFA_ERROR_DENIED; 638 goto err; 639 } 640 641 /* memmap the TX buffer as read only. */ 642 ret = mmap_add_dynamic_region(tx_address, /* PA */ 643 tx_address, /* VA */ 644 buf_size, /* size */ 645 mem_atts | MT_RO_DATA); /* attrs */ 646 if (ret != 0) { 647 /* Return the correct error code. */ 648 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY : 649 FFA_ERROR_INVALID_PARAMETER; 650 WARN("Unable to map TX buffer: %d\n", error_code); 651 goto err; 652 } 653 654 /* memmap the RX buffer as read write. */ 655 ret = mmap_add_dynamic_region(rx_address, /* PA */ 656 rx_address, /* VA */ 657 buf_size, /* size */ 658 mem_atts | MT_RW_DATA); /* attrs */ 659 660 if (ret != 0) { 661 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY : 662 FFA_ERROR_INVALID_PARAMETER; 663 WARN("Unable to map RX buffer: %d\n", error_code); 664 /* Unmap the TX buffer again. */ 665 mmap_remove_dynamic_region(tx_address, buf_size); 666 goto err; 667 } 668 669 mbox->tx_buffer = (void *) tx_address; 670 mbox->rx_buffer = (void *) rx_address; 671 mbox->rxtx_page_count = page_count; 672 spin_unlock(&mbox->lock); 673 674 SMC_RET1(handle, FFA_SUCCESS_SMC32); 675 /* Execution stops here. */ 676 err: 677 spin_unlock(&mbox->lock); 678 return spmc_ffa_error_return(handle, error_code); 679 } 680 681 static uint64_t rxtx_unmap_handler(uint32_t smc_fid, 682 bool secure_origin, 683 uint64_t x1, 684 uint64_t x2, 685 uint64_t x3, 686 uint64_t x4, 687 void *cookie, 688 void *handle, 689 uint64_t flags) 690 { 691 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 692 uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; 693 694 /* 695 * The SPMC does not support mapping of VM RX/TX pairs to facilitate 696 * indirect messaging with SPs. Check if the Hypervisor has invoked this 697 * ABI on behalf of a VM and reject it if this is the case. 698 */ 699 if (x1 != 0UL) { 700 return spmc_ffa_error_return(handle, 701 FFA_ERROR_INVALID_PARAMETER); 702 } 703 704 spin_lock(&mbox->lock); 705 706 /* Check if buffers are currently mapped. */ 707 if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) { 708 spin_unlock(&mbox->lock); 709 return spmc_ffa_error_return(handle, 710 FFA_ERROR_INVALID_PARAMETER); 711 } 712 713 /* Unmap RX Buffer */ 714 if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer, 715 buf_size) != 0) { 716 WARN("Unable to unmap RX buffer!\n"); 717 } 718 719 mbox->rx_buffer = 0; 720 721 /* Unmap TX Buffer */ 722 if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer, 723 buf_size) != 0) { 724 WARN("Unable to unmap TX buffer!\n"); 725 } 726 727 mbox->tx_buffer = 0; 728 mbox->rxtx_page_count = 0; 729 730 spin_unlock(&mbox->lock); 731 SMC_RET1(handle, FFA_SUCCESS_SMC32); 732 } 733 734 /* 735 * Collate the partition information in a v1.1 partition information 736 * descriptor format, this will be converter later if required. 737 */ 738 static int partition_info_get_handler_v1_1(uint32_t *uuid, 739 struct ffa_partition_info_v1_1 740 *partitions, 741 uint32_t max_partitions, 742 uint32_t *partition_count) 743 { 744 uint32_t index; 745 struct ffa_partition_info_v1_1 *desc; 746 bool null_uuid = is_null_uuid(uuid); 747 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 748 749 /* Deal with Logical Partitions. */ 750 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) { 751 if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) { 752 /* Found a matching UUID, populate appropriately. */ 753 if (*partition_count >= max_partitions) { 754 return FFA_ERROR_NO_MEMORY; 755 } 756 757 desc = &partitions[*partition_count]; 758 desc->ep_id = el3_lp_descs[index].sp_id; 759 desc->execution_ctx_count = PLATFORM_CORE_COUNT; 760 desc->properties = el3_lp_descs[index].properties; 761 if (null_uuid) { 762 copy_uuid(desc->uuid, el3_lp_descs[index].uuid); 763 } 764 (*partition_count)++; 765 } 766 } 767 768 /* Deal with physical SP's. */ 769 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) { 770 if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) { 771 /* Found a matching UUID, populate appropriately. */ 772 if (*partition_count >= max_partitions) { 773 return FFA_ERROR_NO_MEMORY; 774 } 775 776 desc = &partitions[*partition_count]; 777 desc->ep_id = sp_desc[index].sp_id; 778 /* 779 * Execution context count must match No. cores for 780 * S-EL1 SPs. 781 */ 782 desc->execution_ctx_count = PLATFORM_CORE_COUNT; 783 desc->properties = sp_desc[index].properties; 784 if (null_uuid) { 785 copy_uuid(desc->uuid, sp_desc[index].uuid); 786 } 787 (*partition_count)++; 788 } 789 } 790 return 0; 791 } 792 793 /* 794 * Handle the case where that caller only wants the count of partitions 795 * matching a given UUID and does not want the corresponding descriptors 796 * populated. 797 */ 798 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid) 799 { 800 uint32_t index = 0; 801 uint32_t partition_count = 0; 802 bool null_uuid = is_null_uuid(uuid); 803 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 804 805 /* Deal with Logical Partitions. */ 806 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) { 807 if (null_uuid || 808 uuid_match(uuid, el3_lp_descs[index].uuid)) { 809 (partition_count)++; 810 } 811 } 812 813 /* Deal with physical SP's. */ 814 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) { 815 if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) { 816 (partition_count)++; 817 } 818 } 819 return partition_count; 820 } 821 822 /* 823 * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate 824 * the coresponding descriptor format from the v1.1 descriptor array. 825 */ 826 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1 827 *partitions, 828 struct mailbox *mbox, 829 int partition_count) 830 { 831 uint32_t index; 832 uint32_t buf_size; 833 uint32_t descriptor_size; 834 struct ffa_partition_info_v1_0 *v1_0_partitions = 835 (struct ffa_partition_info_v1_0 *) mbox->rx_buffer; 836 837 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; 838 descriptor_size = partition_count * 839 sizeof(struct ffa_partition_info_v1_0); 840 841 if (descriptor_size > buf_size) { 842 return FFA_ERROR_NO_MEMORY; 843 } 844 845 for (index = 0U; index < partition_count; index++) { 846 v1_0_partitions[index].ep_id = partitions[index].ep_id; 847 v1_0_partitions[index].execution_ctx_count = 848 partitions[index].execution_ctx_count; 849 v1_0_partitions[index].properties = 850 partitions[index].properties; 851 } 852 return 0; 853 } 854 855 /* 856 * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and 857 * v1.0 implementations. 858 */ 859 static uint64_t partition_info_get_handler(uint32_t smc_fid, 860 bool secure_origin, 861 uint64_t x1, 862 uint64_t x2, 863 uint64_t x3, 864 uint64_t x4, 865 void *cookie, 866 void *handle, 867 uint64_t flags) 868 { 869 int ret; 870 uint32_t partition_count = 0; 871 uint32_t size = 0; 872 uint32_t ffa_version = get_partition_ffa_version(secure_origin); 873 struct mailbox *mbox; 874 uint64_t info_get_flags; 875 bool count_only; 876 uint32_t uuid[4]; 877 878 uuid[0] = x1; 879 uuid[1] = x2; 880 uuid[2] = x3; 881 uuid[3] = x4; 882 883 /* Determine if the Partition descriptors should be populated. */ 884 info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5); 885 count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK); 886 887 /* Handle the case where we don't need to populate the descriptors. */ 888 if (count_only) { 889 partition_count = partition_info_get_handler_count_only(uuid); 890 if (partition_count == 0) { 891 return spmc_ffa_error_return(handle, 892 FFA_ERROR_INVALID_PARAMETER); 893 } 894 } else { 895 struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS]; 896 897 /* 898 * Handle the case where the partition descriptors are required, 899 * check we have the buffers available and populate the 900 * appropriate structure version. 901 */ 902 903 /* Obtain the v1.1 format of the descriptors. */ 904 ret = partition_info_get_handler_v1_1(uuid, partitions, 905 MAX_SP_LP_PARTITIONS, 906 &partition_count); 907 908 /* Check if an error occurred during discovery. */ 909 if (ret != 0) { 910 goto err; 911 } 912 913 /* If we didn't find any matches the UUID is unknown. */ 914 if (partition_count == 0) { 915 ret = FFA_ERROR_INVALID_PARAMETER; 916 goto err; 917 } 918 919 /* Obtain the partition mailbox RX/TX buffer pair descriptor. */ 920 mbox = spmc_get_mbox_desc(secure_origin); 921 922 /* 923 * If the caller has not bothered registering its RX/TX pair 924 * then return an error code. 925 */ 926 spin_lock(&mbox->lock); 927 if (mbox->rx_buffer == NULL) { 928 ret = FFA_ERROR_BUSY; 929 goto err_unlock; 930 } 931 932 /* Ensure the RX buffer is currently free. */ 933 if (mbox->state != MAILBOX_STATE_EMPTY) { 934 ret = FFA_ERROR_BUSY; 935 goto err_unlock; 936 } 937 938 /* Zero the RX buffer before populating. */ 939 (void)memset(mbox->rx_buffer, 0, 940 mbox->rxtx_page_count * FFA_PAGE_SIZE); 941 942 /* 943 * Depending on the FF-A version of the requesting partition 944 * we may need to convert to a v1.0 format otherwise we can copy 945 * directly. 946 */ 947 if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) { 948 ret = partition_info_populate_v1_0(partitions, 949 mbox, 950 partition_count); 951 if (ret != 0) { 952 goto err_unlock; 953 } 954 } else { 955 uint32_t buf_size = mbox->rxtx_page_count * 956 FFA_PAGE_SIZE; 957 958 /* Ensure the descriptor will fit in the buffer. */ 959 size = sizeof(struct ffa_partition_info_v1_1); 960 if (partition_count * size > buf_size) { 961 ret = FFA_ERROR_NO_MEMORY; 962 goto err_unlock; 963 } 964 memcpy(mbox->rx_buffer, partitions, 965 partition_count * size); 966 } 967 968 mbox->state = MAILBOX_STATE_FULL; 969 spin_unlock(&mbox->lock); 970 } 971 SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size); 972 973 err_unlock: 974 spin_unlock(&mbox->lock); 975 err: 976 return spmc_ffa_error_return(handle, ret); 977 } 978 979 static uint64_t ffa_features_handler(uint32_t smc_fid, 980 bool secure_origin, 981 uint64_t x1, 982 uint64_t x2, 983 uint64_t x3, 984 uint64_t x4, 985 void *cookie, 986 void *handle, 987 uint64_t flags) 988 { 989 uint32_t function_id = (uint32_t) x1; 990 uint32_t input_properties = (uint32_t) x2; 991 992 /* 993 * We don't currently support any additional input properties 994 * for any ABI therefore ensure this value is always set to 0. 995 */ 996 if (input_properties != 0) { 997 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 998 } 999 1000 /* Check if a Feature ID was requested. */ 1001 if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) { 1002 /* We currently don't support any additional features. */ 1003 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1004 } 1005 1006 /* Report if an FF-A ABI is supported. */ 1007 switch (function_id) { 1008 /* Supported features from both worlds. */ 1009 case FFA_ERROR: 1010 case FFA_SUCCESS_SMC32: 1011 case FFA_ID_GET: 1012 case FFA_FEATURES: 1013 case FFA_VERSION: 1014 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 1015 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 1016 case FFA_PARTITION_INFO_GET: 1017 case FFA_RXTX_MAP_SMC32: 1018 case FFA_RXTX_MAP_SMC64: 1019 case FFA_RXTX_UNMAP: 1020 1021 /* 1022 * We are relying on the fact that the other registers 1023 * will be set to 0 as these values align with the 1024 * currently implemented features of the SPMC. If this 1025 * changes this function must be extended to handle 1026 * reporting the additional functionality. 1027 */ 1028 1029 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1030 /* Execution stops here. */ 1031 1032 /* Supported ABIs only from the secure world. */ 1033 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 1034 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1035 case FFA_MSG_WAIT: 1036 1037 if (!secure_origin) { 1038 return spmc_ffa_error_return(handle, 1039 FFA_ERROR_NOT_SUPPORTED); 1040 } 1041 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1042 /* Execution stops here. */ 1043 1044 default: 1045 return spmc_ffa_error_return(handle, 1046 FFA_ERROR_NOT_SUPPORTED); 1047 } 1048 } 1049 1050 static uint64_t ffa_id_get_handler(uint32_t smc_fid, 1051 bool secure_origin, 1052 uint64_t x1, 1053 uint64_t x2, 1054 uint64_t x3, 1055 uint64_t x4, 1056 void *cookie, 1057 void *handle, 1058 uint64_t flags) 1059 { 1060 if (secure_origin) { 1061 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, 1062 spmc_get_current_sp_ctx()->sp_id); 1063 } else { 1064 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, 1065 spmc_get_hyp_ctx()->ns_ep_id); 1066 } 1067 } 1068 1069 /******************************************************************************* 1070 * This function will parse the Secure Partition Manifest. From manifest, it 1071 * will fetch details for preparing Secure partition image context and secure 1072 * partition image boot arguments if any. 1073 ******************************************************************************/ 1074 static int sp_manifest_parse(void *sp_manifest, int offset, 1075 struct secure_partition_desc *sp, 1076 entry_point_info_t *ep_info) 1077 { 1078 int32_t ret, node; 1079 uint32_t config_32; 1080 1081 /* 1082 * Look for the mandatory fields that are expected to be present in 1083 * the SP manifests. 1084 */ 1085 node = fdt_path_offset(sp_manifest, "/"); 1086 if (node < 0) { 1087 ERROR("Did not find root node.\n"); 1088 return node; 1089 } 1090 1091 ret = fdt_read_uint32_array(sp_manifest, node, "uuid", 1092 ARRAY_SIZE(sp->uuid), sp->uuid); 1093 if (ret != 0) { 1094 ERROR("Missing Secure Partition UUID.\n"); 1095 return ret; 1096 } 1097 1098 ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32); 1099 if (ret != 0) { 1100 ERROR("Missing SP Exception Level information.\n"); 1101 return ret; 1102 } 1103 1104 sp->runtime_el = config_32; 1105 1106 ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32); 1107 if (ret != 0) { 1108 ERROR("Missing Secure Partition FF-A Version.\n"); 1109 return ret; 1110 } 1111 1112 sp->ffa_version = config_32; 1113 1114 ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32); 1115 if (ret != 0) { 1116 ERROR("Missing Secure Partition Execution State.\n"); 1117 return ret; 1118 } 1119 1120 sp->execution_state = config_32; 1121 1122 ret = fdt_read_uint32(sp_manifest, node, 1123 "messaging-method", &config_32); 1124 if (ret != 0) { 1125 ERROR("Missing Secure Partition messaging method.\n"); 1126 return ret; 1127 } 1128 1129 /* Validate this entry, we currently only support direct messaging. */ 1130 if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV | 1131 FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) { 1132 WARN("Invalid Secure Partition messaging method (0x%x)\n", 1133 config_32); 1134 return -EINVAL; 1135 } 1136 1137 sp->properties = config_32; 1138 1139 ret = fdt_read_uint32(sp_manifest, node, 1140 "execution-ctx-count", &config_32); 1141 1142 if (ret != 0) { 1143 ERROR("Missing SP Execution Context Count.\n"); 1144 return ret; 1145 } 1146 1147 /* 1148 * Ensure this field is set correctly in the manifest however 1149 * since this is currently a hardcoded value for S-EL1 partitions 1150 * we don't need to save it here, just validate. 1151 */ 1152 if (config_32 != PLATFORM_CORE_COUNT) { 1153 ERROR("SP Execution Context Count (%u) must be %u.\n", 1154 config_32, PLATFORM_CORE_COUNT); 1155 return -EINVAL; 1156 } 1157 1158 /* 1159 * Look for the optional fields that are expected to be present in 1160 * an SP manifest. 1161 */ 1162 ret = fdt_read_uint32(sp_manifest, node, "id", &config_32); 1163 if (ret != 0) { 1164 WARN("Missing Secure Partition ID.\n"); 1165 } else { 1166 if (!is_ffa_secure_id_valid(config_32)) { 1167 ERROR("Invalid Secure Partition ID (0x%x).\n", 1168 config_32); 1169 return -EINVAL; 1170 } 1171 sp->sp_id = config_32; 1172 } 1173 1174 return 0; 1175 } 1176 1177 /******************************************************************************* 1178 * This function gets the Secure Partition Manifest base and maps the manifest 1179 * region. 1180 * Currently only one Secure Partition manifest is considered which is used to 1181 * prepare the context for the single Secure Partition. 1182 ******************************************************************************/ 1183 static int find_and_prepare_sp_context(void) 1184 { 1185 void *sp_manifest; 1186 uintptr_t manifest_base; 1187 uintptr_t manifest_base_align; 1188 entry_point_info_t *next_image_ep_info; 1189 int32_t ret; 1190 struct secure_partition_desc *sp; 1191 1192 next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 1193 if (next_image_ep_info == NULL) { 1194 WARN("No Secure Partition image provided by BL2.\n"); 1195 return -ENOENT; 1196 } 1197 1198 sp_manifest = (void *)next_image_ep_info->args.arg0; 1199 if (sp_manifest == NULL) { 1200 WARN("Secure Partition manifest absent.\n"); 1201 return -ENOENT; 1202 } 1203 1204 manifest_base = (uintptr_t)sp_manifest; 1205 manifest_base_align = page_align(manifest_base, DOWN); 1206 1207 /* 1208 * Map the secure partition manifest region in the EL3 translation 1209 * regime. 1210 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base 1211 * alignment the region of 1 PAGE_SIZE from manifest align base may 1212 * not completely accommodate the secure partition manifest region. 1213 */ 1214 ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align, 1215 manifest_base_align, 1216 PAGE_SIZE * 2, 1217 MT_RO_DATA); 1218 if (ret != 0) { 1219 ERROR("Error while mapping SP manifest (%d).\n", ret); 1220 return ret; 1221 } 1222 1223 ret = fdt_node_offset_by_compatible(sp_manifest, -1, 1224 "arm,ffa-manifest-1.0"); 1225 if (ret < 0) { 1226 ERROR("Error happened in SP manifest reading.\n"); 1227 return -EINVAL; 1228 } 1229 1230 /* 1231 * Store the size of the manifest so that it can be used later to pass 1232 * the manifest as boot information later. 1233 */ 1234 next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest); 1235 INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1); 1236 1237 /* 1238 * Select an SP descriptor for initialising the partition's execution 1239 * context on the primary CPU. 1240 */ 1241 sp = spmc_get_current_sp_ctx(); 1242 1243 /* Initialize entry point information for the SP */ 1244 SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1, 1245 SECURE | EP_ST_ENABLE); 1246 1247 /* Parse the SP manifest. */ 1248 ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info); 1249 if (ret != 0) { 1250 ERROR("Error in Secure Partition manifest parsing.\n"); 1251 return ret; 1252 } 1253 1254 /* Check that the runtime EL in the manifest was correct. */ 1255 if (sp->runtime_el != S_EL1) { 1256 ERROR("Unexpected runtime EL: %d\n", sp->runtime_el); 1257 return -EINVAL; 1258 } 1259 1260 /* Perform any common initialisation. */ 1261 spmc_sp_common_setup(sp, next_image_ep_info); 1262 1263 /* Perform any initialisation specific to S-EL1 SPs. */ 1264 spmc_el1_sp_setup(sp, next_image_ep_info); 1265 1266 /* Initialize the SP context with the required ep info. */ 1267 spmc_sp_common_ep_commit(sp, next_image_ep_info); 1268 1269 return 0; 1270 } 1271 1272 /******************************************************************************* 1273 * This function takes an SP context pointer and performs a synchronous entry 1274 * into it. 1275 ******************************************************************************/ 1276 static int32_t logical_sp_init(void) 1277 { 1278 int32_t rc = 0; 1279 struct el3_lp_desc *el3_lp_descs; 1280 1281 /* Perform initial validation of the Logical Partitions. */ 1282 rc = el3_sp_desc_validate(); 1283 if (rc != 0) { 1284 ERROR("Logical Partition validation failed!\n"); 1285 return rc; 1286 } 1287 1288 el3_lp_descs = get_el3_lp_array(); 1289 1290 INFO("Logical Secure Partition init start.\n"); 1291 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) { 1292 rc = el3_lp_descs[i].init(); 1293 if (rc != 0) { 1294 ERROR("Logical SP (0x%x) Failed to Initialize\n", 1295 el3_lp_descs[i].sp_id); 1296 return rc; 1297 } 1298 VERBOSE("Logical SP (0x%x) Initialized\n", 1299 el3_lp_descs[i].sp_id); 1300 } 1301 1302 INFO("Logical Secure Partition init completed.\n"); 1303 1304 return rc; 1305 } 1306 1307 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec) 1308 { 1309 uint64_t rc; 1310 1311 assert(ec != NULL); 1312 1313 /* Assign the context of the SP to this CPU */ 1314 cm_set_context(&(ec->cpu_ctx), SECURE); 1315 1316 /* Restore the context assigned above */ 1317 cm_el1_sysregs_context_restore(SECURE); 1318 cm_set_next_eret_context(SECURE); 1319 1320 /* Invalidate TLBs at EL1. */ 1321 tlbivmalle1(); 1322 dsbish(); 1323 1324 /* Enter Secure Partition */ 1325 rc = spm_secure_partition_enter(&ec->c_rt_ctx); 1326 1327 /* Save secure state */ 1328 cm_el1_sysregs_context_save(SECURE); 1329 1330 return rc; 1331 } 1332 1333 /******************************************************************************* 1334 * SPMC Helper Functions. 1335 ******************************************************************************/ 1336 static int32_t sp_init(void) 1337 { 1338 uint64_t rc; 1339 struct secure_partition_desc *sp; 1340 struct sp_exec_ctx *ec; 1341 1342 sp = spmc_get_current_sp_ctx(); 1343 ec = spmc_get_sp_ec(sp); 1344 ec->rt_model = RT_MODEL_INIT; 1345 ec->rt_state = RT_STATE_RUNNING; 1346 1347 INFO("Secure Partition (0x%x) init start.\n", sp->sp_id); 1348 1349 rc = spmc_sp_synchronous_entry(ec); 1350 if (rc != 0) { 1351 /* Indicate SP init was not successful. */ 1352 ERROR("SP (0x%x) failed to initialize (%lu).\n", 1353 sp->sp_id, rc); 1354 return 0; 1355 } 1356 1357 ec->rt_state = RT_STATE_WAITING; 1358 INFO("Secure Partition initialized.\n"); 1359 1360 return 1; 1361 } 1362 1363 static void initalize_sp_descs(void) 1364 { 1365 struct secure_partition_desc *sp; 1366 1367 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) { 1368 sp = &sp_desc[i]; 1369 sp->sp_id = INV_SP_ID; 1370 sp->mailbox.rx_buffer = NULL; 1371 sp->mailbox.tx_buffer = NULL; 1372 sp->mailbox.state = MAILBOX_STATE_EMPTY; 1373 sp->secondary_ep = 0; 1374 } 1375 } 1376 1377 static void initalize_ns_ep_descs(void) 1378 { 1379 struct ns_endpoint_desc *ns_ep; 1380 1381 for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) { 1382 ns_ep = &ns_ep_desc[i]; 1383 /* 1384 * Clashes with the Hypervisor ID but will not be a 1385 * problem in practice. 1386 */ 1387 ns_ep->ns_ep_id = 0; 1388 ns_ep->ffa_version = 0; 1389 ns_ep->mailbox.rx_buffer = NULL; 1390 ns_ep->mailbox.tx_buffer = NULL; 1391 ns_ep->mailbox.state = MAILBOX_STATE_EMPTY; 1392 } 1393 } 1394 1395 /******************************************************************************* 1396 * Initialize SPMC attributes for the SPMD. 1397 ******************************************************************************/ 1398 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs) 1399 { 1400 spmc_attrs->major_version = FFA_VERSION_MAJOR; 1401 spmc_attrs->minor_version = FFA_VERSION_MINOR; 1402 spmc_attrs->exec_state = MODE_RW_64; 1403 spmc_attrs->spmc_id = FFA_SPMC_ID; 1404 } 1405 1406 /******************************************************************************* 1407 * Initialize contexts of all Secure Partitions. 1408 ******************************************************************************/ 1409 int32_t spmc_setup(void) 1410 { 1411 int32_t ret; 1412 1413 /* Initialize endpoint descriptors */ 1414 initalize_sp_descs(); 1415 initalize_ns_ep_descs(); 1416 1417 /* Setup logical SPs. */ 1418 ret = logical_sp_init(); 1419 if (ret != 0) { 1420 ERROR("Failed to initialize Logical Partitions.\n"); 1421 return ret; 1422 } 1423 1424 /* Perform physical SP setup. */ 1425 1426 /* Disable MMU at EL1 (initialized by BL2) */ 1427 disable_mmu_icache_el1(); 1428 1429 /* Initialize context of the SP */ 1430 INFO("Secure Partition context setup start.\n"); 1431 1432 ret = find_and_prepare_sp_context(); 1433 if (ret != 0) { 1434 ERROR("Error in SP finding and context preparation.\n"); 1435 return ret; 1436 } 1437 1438 /* Register init function for deferred init. */ 1439 bl31_register_bl32_init(&sp_init); 1440 1441 INFO("Secure Partition setup done.\n"); 1442 1443 return 0; 1444 } 1445 1446 /******************************************************************************* 1447 * Secure Partition Manager SMC handler. 1448 ******************************************************************************/ 1449 uint64_t spmc_smc_handler(uint32_t smc_fid, 1450 bool secure_origin, 1451 uint64_t x1, 1452 uint64_t x2, 1453 uint64_t x3, 1454 uint64_t x4, 1455 void *cookie, 1456 void *handle, 1457 uint64_t flags) 1458 { 1459 switch (smc_fid) { 1460 1461 case FFA_VERSION: 1462 return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3, 1463 x4, cookie, handle, flags); 1464 1465 case FFA_ID_GET: 1466 return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3, 1467 x4, cookie, handle, flags); 1468 1469 case FFA_FEATURES: 1470 return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3, 1471 x4, cookie, handle, flags); 1472 1473 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 1474 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 1475 return direct_req_smc_handler(smc_fid, secure_origin, x1, x2, 1476 x3, x4, cookie, handle, flags); 1477 1478 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 1479 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1480 return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2, 1481 x3, x4, cookie, handle, flags); 1482 1483 case FFA_RXTX_MAP_SMC32: 1484 case FFA_RXTX_MAP_SMC64: 1485 return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4, 1486 cookie, handle, flags); 1487 1488 case FFA_RXTX_UNMAP: 1489 return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3, 1490 x4, cookie, handle, flags); 1491 1492 case FFA_PARTITION_INFO_GET: 1493 return partition_info_get_handler(smc_fid, secure_origin, x1, 1494 x2, x3, x4, cookie, handle, 1495 flags); 1496 1497 case FFA_MSG_WAIT: 1498 return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4, 1499 cookie, handle, flags); 1500 1501 case FFA_ERROR: 1502 return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4, 1503 cookie, handle, flags); 1504 1505 default: 1506 WARN("Unsupported FF-A call 0x%08x.\n", smc_fid); 1507 break; 1508 } 1509 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1510 } 1511