1 /* 2 * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 10 #include <arch_helpers.h> 11 #include <bl31/bl31.h> 12 #include <bl31/ehf.h> 13 #include <common/debug.h> 14 #include <common/fdt_wrappers.h> 15 #include <common/runtime_svc.h> 16 #include <common/uuid.h> 17 #include <lib/el3_runtime/context_mgmt.h> 18 #include <lib/smccc.h> 19 #include <lib/utils.h> 20 #include <lib/xlat_tables/xlat_tables_v2.h> 21 #include <libfdt.h> 22 #include <plat/common/platform.h> 23 #include <services/el3_spmc_logical_sp.h> 24 #include <services/ffa_svc.h> 25 #include <services/spmc_svc.h> 26 #include <services/spmd_svc.h> 27 #include "spmc.h" 28 29 #include <platform_def.h> 30 31 /* Declare the maximum number of SPs and El3 LPs. */ 32 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT 33 34 /* 35 * Allocate a secure partition descriptor to describe each SP in the system that 36 * does not reside at EL3. 37 */ 38 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT]; 39 40 /* 41 * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in 42 * the system that interacts with a SP. It is used to track the Hypervisor 43 * buffer pair, version and ID for now. It could be extended to track VM 44 * properties when the SPMC supports indirect messaging. 45 */ 46 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT]; 47 48 /* 49 * Helper function to obtain the array storing the EL3 50 * Logical Partition descriptors. 51 */ 52 struct el3_lp_desc *get_el3_lp_array(void) 53 { 54 return (struct el3_lp_desc *) EL3_LP_DESCS_START; 55 } 56 57 /* 58 * Helper function to obtain the descriptor of the last SP to whom control was 59 * handed to on this physical cpu. Currently, we assume there is only one SP. 60 * TODO: Expand to track multiple partitions when required. 61 */ 62 struct secure_partition_desc *spmc_get_current_sp_ctx(void) 63 { 64 return &(sp_desc[ACTIVE_SP_DESC_INDEX]); 65 } 66 67 /* 68 * Helper function to obtain the execution context of an SP on the 69 * current physical cpu. 70 */ 71 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp) 72 { 73 return &(sp->ec[get_ec_index(sp)]); 74 } 75 76 /* Helper function to get pointer to SP context from its ID. */ 77 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id) 78 { 79 /* Check for Secure World Partitions. */ 80 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) { 81 if (sp_desc[i].sp_id == id) { 82 return &(sp_desc[i]); 83 } 84 } 85 return NULL; 86 } 87 88 /* 89 * Helper function to obtain the descriptor of the Hypervisor or OS kernel. 90 * We assume that the first descriptor is reserved for this entity. 91 */ 92 struct ns_endpoint_desc *spmc_get_hyp_ctx(void) 93 { 94 return &(ns_ep_desc[0]); 95 } 96 97 /* 98 * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor 99 * or OS kernel in the normal world or the last SP that was run. 100 */ 101 struct mailbox *spmc_get_mbox_desc(bool secure_origin) 102 { 103 /* Obtain the RX/TX buffer pair descriptor. */ 104 if (secure_origin) { 105 return &(spmc_get_current_sp_ctx()->mailbox); 106 } else { 107 return &(spmc_get_hyp_ctx()->mailbox); 108 } 109 } 110 111 /****************************************************************************** 112 * This function returns to the place where spmc_sp_synchronous_entry() was 113 * called originally. 114 ******************************************************************************/ 115 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc) 116 { 117 /* 118 * The SPM must have initiated the original request through a 119 * synchronous entry into the secure partition. Jump back to the 120 * original C runtime context with the value of rc in x0; 121 */ 122 spm_secure_partition_exit(ec->c_rt_ctx, rc); 123 124 panic(); 125 } 126 127 /******************************************************************************* 128 * Return FFA_ERROR with specified error code. 129 ******************************************************************************/ 130 uint64_t spmc_ffa_error_return(void *handle, int error_code) 131 { 132 SMC_RET8(handle, FFA_ERROR, 133 FFA_TARGET_INFO_MBZ, error_code, 134 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 135 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 136 } 137 138 /****************************************************************************** 139 * Helper function to validate a secure partition ID to ensure it does not 140 * conflict with any other FF-A component and follows the convention to 141 * indicate it resides within the secure world. 142 ******************************************************************************/ 143 bool is_ffa_secure_id_valid(uint16_t partition_id) 144 { 145 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 146 147 /* Ensure the ID is not the invalid partition ID. */ 148 if (partition_id == INV_SP_ID) { 149 return false; 150 } 151 152 /* Ensure the ID is not the SPMD ID. */ 153 if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) { 154 return false; 155 } 156 157 /* 158 * Ensure the ID follows the convention to indicate it resides 159 * in the secure world. 160 */ 161 if (!ffa_is_secure_world_id(partition_id)) { 162 return false; 163 } 164 165 /* Ensure we don't conflict with the SPMC partition ID. */ 166 if (partition_id == FFA_SPMC_ID) { 167 return false; 168 } 169 170 /* Ensure we do not already have an SP context with this ID. */ 171 if (spmc_get_sp_ctx(partition_id)) { 172 return false; 173 } 174 175 /* Ensure we don't clash with any Logical SP's. */ 176 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) { 177 if (el3_lp_descs[i].sp_id == partition_id) { 178 return false; 179 } 180 } 181 182 return true; 183 } 184 185 /******************************************************************************* 186 * This function either forwards the request to the other world or returns 187 * with an ERET depending on the source of the call. 188 * We can assume that the destination is for an entity at a lower exception 189 * level as any messages destined for a logical SP resident in EL3 will have 190 * already been taken care of by the SPMC before entering this function. 191 ******************************************************************************/ 192 static uint64_t spmc_smc_return(uint32_t smc_fid, 193 bool secure_origin, 194 uint64_t x1, 195 uint64_t x2, 196 uint64_t x3, 197 uint64_t x4, 198 void *handle, 199 void *cookie, 200 uint64_t flags, 201 uint16_t dst_id) 202 { 203 /* If the destination is in the normal world always go via the SPMD. */ 204 if (ffa_is_normal_world_id(dst_id)) { 205 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, 206 cookie, handle, flags); 207 } 208 /* 209 * If the caller is secure and we want to return to the secure world, 210 * ERET directly. 211 */ 212 else if (secure_origin && ffa_is_secure_world_id(dst_id)) { 213 SMC_RET5(handle, smc_fid, x1, x2, x3, x4); 214 } 215 /* If we originated in the normal world then switch contexts. */ 216 else if (!secure_origin && ffa_is_secure_world_id(dst_id)) { 217 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, 218 x3, x4, handle); 219 } else { 220 /* Unknown State. */ 221 panic(); 222 } 223 224 /* Shouldn't be Reached. */ 225 return 0; 226 } 227 228 /******************************************************************************* 229 * FF-A ABI Handlers. 230 ******************************************************************************/ 231 232 /******************************************************************************* 233 * Helper function to validate arg2 as part of a direct message. 234 ******************************************************************************/ 235 static inline bool direct_msg_validate_arg2(uint64_t x2) 236 { 237 /* 238 * We currently only support partition messages, therefore ensure x2 is 239 * not set. 240 */ 241 if (x2 != (uint64_t) 0) { 242 VERBOSE("Arg2 MBZ for partition messages (0x%lx).\n", x2); 243 return false; 244 } 245 return true; 246 } 247 248 /******************************************************************************* 249 * Handle direct request messages and route to the appropriate destination. 250 ******************************************************************************/ 251 static uint64_t direct_req_smc_handler(uint32_t smc_fid, 252 bool secure_origin, 253 uint64_t x1, 254 uint64_t x2, 255 uint64_t x3, 256 uint64_t x4, 257 void *cookie, 258 void *handle, 259 uint64_t flags) 260 { 261 uint16_t dst_id = ffa_endpoint_destination(x1); 262 struct el3_lp_desc *el3_lp_descs; 263 struct secure_partition_desc *sp; 264 unsigned int idx; 265 266 /* Check if arg2 has been populated correctly based on message type. */ 267 if (!direct_msg_validate_arg2(x2)) { 268 return spmc_ffa_error_return(handle, 269 FFA_ERROR_INVALID_PARAMETER); 270 } 271 272 el3_lp_descs = get_el3_lp_array(); 273 274 /* Check if the request is destined for a Logical Partition. */ 275 for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) { 276 if (el3_lp_descs[i].sp_id == dst_id) { 277 return el3_lp_descs[i].direct_req( 278 smc_fid, secure_origin, x1, x2, x3, x4, 279 cookie, handle, flags); 280 } 281 } 282 283 /* 284 * If the request was not targeted to a LSP and from the secure world 285 * then it is invalid since a SP cannot call into the Normal world and 286 * there is no other SP to call into. If there are other SPs in future 287 * then the partition runtime model would need to be validated as well. 288 */ 289 if (secure_origin) { 290 VERBOSE("Direct request not supported to the Normal World.\n"); 291 return spmc_ffa_error_return(handle, 292 FFA_ERROR_INVALID_PARAMETER); 293 } 294 295 /* Check if the SP ID is valid. */ 296 sp = spmc_get_sp_ctx(dst_id); 297 if (sp == NULL) { 298 VERBOSE("Direct request to unknown partition ID (0x%x).\n", 299 dst_id); 300 return spmc_ffa_error_return(handle, 301 FFA_ERROR_INVALID_PARAMETER); 302 } 303 304 /* 305 * Check that the target execution context is in a waiting state before 306 * forwarding the direct request to it. 307 */ 308 idx = get_ec_index(sp); 309 if (sp->ec[idx].rt_state != RT_STATE_WAITING) { 310 VERBOSE("SP context on core%u is not waiting (%u).\n", 311 idx, sp->ec[idx].rt_model); 312 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY); 313 } 314 315 /* 316 * Everything checks out so forward the request to the SP after updating 317 * its state and runtime model. 318 */ 319 sp->ec[idx].rt_state = RT_STATE_RUNNING; 320 sp->ec[idx].rt_model = RT_MODEL_DIR_REQ; 321 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 322 handle, cookie, flags, dst_id); 323 } 324 325 /******************************************************************************* 326 * Handle direct response messages and route to the appropriate destination. 327 ******************************************************************************/ 328 static uint64_t direct_resp_smc_handler(uint32_t smc_fid, 329 bool secure_origin, 330 uint64_t x1, 331 uint64_t x2, 332 uint64_t x3, 333 uint64_t x4, 334 void *cookie, 335 void *handle, 336 uint64_t flags) 337 { 338 uint16_t dst_id = ffa_endpoint_destination(x1); 339 struct secure_partition_desc *sp; 340 unsigned int idx; 341 342 /* Check if arg2 has been populated correctly based on message type. */ 343 if (!direct_msg_validate_arg2(x2)) { 344 return spmc_ffa_error_return(handle, 345 FFA_ERROR_INVALID_PARAMETER); 346 } 347 348 /* Check that the response did not originate from the Normal world. */ 349 if (!secure_origin) { 350 VERBOSE("Direct Response not supported from Normal World.\n"); 351 return spmc_ffa_error_return(handle, 352 FFA_ERROR_INVALID_PARAMETER); 353 } 354 355 /* 356 * Check that the response is either targeted to the Normal world or the 357 * SPMC e.g. a PM response. 358 */ 359 if ((dst_id != FFA_SPMC_ID) && ffa_is_secure_world_id(dst_id)) { 360 VERBOSE("Direct response to invalid partition ID (0x%x).\n", 361 dst_id); 362 return spmc_ffa_error_return(handle, 363 FFA_ERROR_INVALID_PARAMETER); 364 } 365 366 /* Obtain the SP descriptor and update its runtime state. */ 367 sp = spmc_get_sp_ctx(ffa_endpoint_source(x1)); 368 if (sp == NULL) { 369 VERBOSE("Direct response to unknown partition ID (0x%x).\n", 370 dst_id); 371 return spmc_ffa_error_return(handle, 372 FFA_ERROR_INVALID_PARAMETER); 373 } 374 375 /* Sanity check state is being tracked correctly in the SPMC. */ 376 idx = get_ec_index(sp); 377 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING); 378 379 /* Ensure SP execution context was in the right runtime model. */ 380 if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) { 381 VERBOSE("SP context on core%u not handling direct req (%u).\n", 382 idx, sp->ec[idx].rt_model); 383 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 384 } 385 386 /* Update the state of the SP execution context. */ 387 sp->ec[idx].rt_state = RT_STATE_WAITING; 388 389 /* 390 * If the receiver is not the SPMC then forward the response to the 391 * Normal world. 392 */ 393 if (dst_id == FFA_SPMC_ID) { 394 spmc_sp_synchronous_exit(&sp->ec[idx], x4); 395 /* Should not get here. */ 396 panic(); 397 } 398 399 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 400 handle, cookie, flags, dst_id); 401 } 402 403 /******************************************************************************* 404 * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its 405 * cycles. 406 ******************************************************************************/ 407 static uint64_t msg_wait_handler(uint32_t smc_fid, 408 bool secure_origin, 409 uint64_t x1, 410 uint64_t x2, 411 uint64_t x3, 412 uint64_t x4, 413 void *cookie, 414 void *handle, 415 uint64_t flags) 416 { 417 struct secure_partition_desc *sp; 418 unsigned int idx; 419 420 /* 421 * Check that the response did not originate from the Normal world as 422 * only the secure world can call this ABI. 423 */ 424 if (!secure_origin) { 425 VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n"); 426 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 427 } 428 429 /* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */ 430 sp = spmc_get_current_sp_ctx(); 431 if (sp == NULL) { 432 return spmc_ffa_error_return(handle, 433 FFA_ERROR_INVALID_PARAMETER); 434 } 435 436 /* 437 * Get the execution context of the SP that invoked FFA_MSG_WAIT. 438 */ 439 idx = get_ec_index(sp); 440 441 /* Ensure SP execution context was in the right runtime model. */ 442 if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) { 443 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 444 } 445 446 /* Sanity check the state is being tracked correctly in the SPMC. */ 447 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING); 448 449 /* 450 * Perform a synchronous exit if the partition was initialising. The 451 * state is updated after the exit. 452 */ 453 if (sp->ec[idx].rt_model == RT_MODEL_INIT) { 454 spmc_sp_synchronous_exit(&sp->ec[idx], x4); 455 /* Should not get here */ 456 panic(); 457 } 458 459 /* Update the state of the SP execution context. */ 460 sp->ec[idx].rt_state = RT_STATE_WAITING; 461 462 /* Resume normal world if a secure interrupt was handled. */ 463 if (sp->ec[idx].rt_model == RT_MODEL_INTR) { 464 /* FFA_MSG_WAIT can only be called from the secure world. */ 465 unsigned int secure_state_in = SECURE; 466 unsigned int secure_state_out = NON_SECURE; 467 468 cm_el1_sysregs_context_save(secure_state_in); 469 cm_el1_sysregs_context_restore(secure_state_out); 470 cm_set_next_eret_context(secure_state_out); 471 SMC_RET0(cm_get_context(secure_state_out)); 472 } 473 474 /* Forward the response to the Normal world. */ 475 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 476 handle, cookie, flags, FFA_NWD_ID); 477 } 478 479 static uint64_t ffa_error_handler(uint32_t smc_fid, 480 bool secure_origin, 481 uint64_t x1, 482 uint64_t x2, 483 uint64_t x3, 484 uint64_t x4, 485 void *cookie, 486 void *handle, 487 uint64_t flags) 488 { 489 struct secure_partition_desc *sp; 490 unsigned int idx; 491 492 /* Check that the response did not originate from the Normal world. */ 493 if (!secure_origin) { 494 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 495 } 496 497 /* Get the descriptor of the SP that invoked FFA_ERROR. */ 498 sp = spmc_get_current_sp_ctx(); 499 if (sp == NULL) { 500 return spmc_ffa_error_return(handle, 501 FFA_ERROR_INVALID_PARAMETER); 502 } 503 504 /* Get the execution context of the SP that invoked FFA_ERROR. */ 505 idx = get_ec_index(sp); 506 507 /* 508 * We only expect FFA_ERROR to be received during SP initialisation 509 * otherwise this is an invalid call. 510 */ 511 if (sp->ec[idx].rt_model == RT_MODEL_INIT) { 512 ERROR("SP 0x%x failed to initialize.\n", sp->sp_id); 513 spmc_sp_synchronous_exit(&sp->ec[idx], x2); 514 /* Should not get here. */ 515 panic(); 516 } 517 518 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 519 } 520 521 static uint64_t ffa_version_handler(uint32_t smc_fid, 522 bool secure_origin, 523 uint64_t x1, 524 uint64_t x2, 525 uint64_t x3, 526 uint64_t x4, 527 void *cookie, 528 void *handle, 529 uint64_t flags) 530 { 531 uint32_t requested_version = x1 & FFA_VERSION_MASK; 532 533 if (requested_version & FFA_VERSION_BIT31_MASK) { 534 /* Invalid encoding, return an error. */ 535 SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED); 536 /* Execution stops here. */ 537 } 538 539 /* Determine the caller to store the requested version. */ 540 if (secure_origin) { 541 /* 542 * Ensure that the SP is reporting the same version as 543 * specified in its manifest. If these do not match there is 544 * something wrong with the SP. 545 * TODO: Should we abort the SP? For now assert this is not 546 * case. 547 */ 548 assert(requested_version == 549 spmc_get_current_sp_ctx()->ffa_version); 550 } else { 551 /* 552 * If this is called by the normal world, record this 553 * information in its descriptor. 554 */ 555 spmc_get_hyp_ctx()->ffa_version = requested_version; 556 } 557 558 SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 559 FFA_VERSION_MINOR)); 560 } 561 562 /******************************************************************************* 563 * Helper function to obtain the FF-A version of the calling partition. 564 ******************************************************************************/ 565 uint32_t get_partition_ffa_version(bool secure_origin) 566 { 567 if (secure_origin) { 568 return spmc_get_current_sp_ctx()->ffa_version; 569 } else { 570 return spmc_get_hyp_ctx()->ffa_version; 571 } 572 } 573 574 static uint64_t rxtx_map_handler(uint32_t smc_fid, 575 bool secure_origin, 576 uint64_t x1, 577 uint64_t x2, 578 uint64_t x3, 579 uint64_t x4, 580 void *cookie, 581 void *handle, 582 uint64_t flags) 583 { 584 int ret; 585 uint32_t error_code; 586 uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS; 587 struct mailbox *mbox; 588 uintptr_t tx_address = x1; 589 uintptr_t rx_address = x2; 590 uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */ 591 uint32_t buf_size = page_count * FFA_PAGE_SIZE; 592 593 /* 594 * The SPMC does not support mapping of VM RX/TX pairs to facilitate 595 * indirect messaging with SPs. Check if the Hypervisor has invoked this 596 * ABI on behalf of a VM and reject it if this is the case. 597 */ 598 if (tx_address == 0 || rx_address == 0) { 599 WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n"); 600 return spmc_ffa_error_return(handle, 601 FFA_ERROR_INVALID_PARAMETER); 602 } 603 604 /* Ensure the specified buffers are not the same. */ 605 if (tx_address == rx_address) { 606 WARN("TX Buffer must not be the same as RX Buffer.\n"); 607 return spmc_ffa_error_return(handle, 608 FFA_ERROR_INVALID_PARAMETER); 609 } 610 611 /* Ensure the buffer size is not 0. */ 612 if (buf_size == 0U) { 613 WARN("Buffer size must not be 0\n"); 614 return spmc_ffa_error_return(handle, 615 FFA_ERROR_INVALID_PARAMETER); 616 } 617 618 /* 619 * Ensure the buffer size is a multiple of the translation granule size 620 * in TF-A. 621 */ 622 if (buf_size % PAGE_SIZE != 0U) { 623 WARN("Buffer size must be aligned to translation granule.\n"); 624 return spmc_ffa_error_return(handle, 625 FFA_ERROR_INVALID_PARAMETER); 626 } 627 628 /* Obtain the RX/TX buffer pair descriptor. */ 629 mbox = spmc_get_mbox_desc(secure_origin); 630 631 spin_lock(&mbox->lock); 632 633 /* Check if buffers have already been mapped. */ 634 if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) { 635 WARN("RX/TX Buffers already mapped (%p/%p)\n", 636 (void *) mbox->rx_buffer, (void *)mbox->tx_buffer); 637 error_code = FFA_ERROR_DENIED; 638 goto err; 639 } 640 641 /* memmap the TX buffer as read only. */ 642 ret = mmap_add_dynamic_region(tx_address, /* PA */ 643 tx_address, /* VA */ 644 buf_size, /* size */ 645 mem_atts | MT_RO_DATA); /* attrs */ 646 if (ret != 0) { 647 /* Return the correct error code. */ 648 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY : 649 FFA_ERROR_INVALID_PARAMETER; 650 WARN("Unable to map TX buffer: %d\n", error_code); 651 goto err; 652 } 653 654 /* memmap the RX buffer as read write. */ 655 ret = mmap_add_dynamic_region(rx_address, /* PA */ 656 rx_address, /* VA */ 657 buf_size, /* size */ 658 mem_atts | MT_RW_DATA); /* attrs */ 659 660 if (ret != 0) { 661 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY : 662 FFA_ERROR_INVALID_PARAMETER; 663 WARN("Unable to map RX buffer: %d\n", error_code); 664 /* Unmap the TX buffer again. */ 665 mmap_remove_dynamic_region(tx_address, buf_size); 666 goto err; 667 } 668 669 mbox->tx_buffer = (void *) tx_address; 670 mbox->rx_buffer = (void *) rx_address; 671 mbox->rxtx_page_count = page_count; 672 spin_unlock(&mbox->lock); 673 674 SMC_RET1(handle, FFA_SUCCESS_SMC32); 675 /* Execution stops here. */ 676 err: 677 spin_unlock(&mbox->lock); 678 return spmc_ffa_error_return(handle, error_code); 679 } 680 681 static uint64_t rxtx_unmap_handler(uint32_t smc_fid, 682 bool secure_origin, 683 uint64_t x1, 684 uint64_t x2, 685 uint64_t x3, 686 uint64_t x4, 687 void *cookie, 688 void *handle, 689 uint64_t flags) 690 { 691 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 692 uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; 693 694 /* 695 * The SPMC does not support mapping of VM RX/TX pairs to facilitate 696 * indirect messaging with SPs. Check if the Hypervisor has invoked this 697 * ABI on behalf of a VM and reject it if this is the case. 698 */ 699 if (x1 != 0UL) { 700 return spmc_ffa_error_return(handle, 701 FFA_ERROR_INVALID_PARAMETER); 702 } 703 704 spin_lock(&mbox->lock); 705 706 /* Check if buffers are currently mapped. */ 707 if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) { 708 spin_unlock(&mbox->lock); 709 return spmc_ffa_error_return(handle, 710 FFA_ERROR_INVALID_PARAMETER); 711 } 712 713 /* Unmap RX Buffer */ 714 if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer, 715 buf_size) != 0) { 716 WARN("Unable to unmap RX buffer!\n"); 717 } 718 719 mbox->rx_buffer = 0; 720 721 /* Unmap TX Buffer */ 722 if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer, 723 buf_size) != 0) { 724 WARN("Unable to unmap TX buffer!\n"); 725 } 726 727 mbox->tx_buffer = 0; 728 mbox->rxtx_page_count = 0; 729 730 spin_unlock(&mbox->lock); 731 SMC_RET1(handle, FFA_SUCCESS_SMC32); 732 } 733 734 /* 735 * Collate the partition information in a v1.1 partition information 736 * descriptor format, this will be converter later if required. 737 */ 738 static int partition_info_get_handler_v1_1(uint32_t *uuid, 739 struct ffa_partition_info_v1_1 740 *partitions, 741 uint32_t max_partitions, 742 uint32_t *partition_count) 743 { 744 uint32_t index; 745 struct ffa_partition_info_v1_1 *desc; 746 bool null_uuid = is_null_uuid(uuid); 747 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 748 749 /* Deal with Logical Partitions. */ 750 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) { 751 if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) { 752 /* Found a matching UUID, populate appropriately. */ 753 if (*partition_count >= max_partitions) { 754 return FFA_ERROR_NO_MEMORY; 755 } 756 757 desc = &partitions[*partition_count]; 758 desc->ep_id = el3_lp_descs[index].sp_id; 759 desc->execution_ctx_count = PLATFORM_CORE_COUNT; 760 desc->properties = el3_lp_descs[index].properties; 761 if (null_uuid) { 762 copy_uuid(desc->uuid, el3_lp_descs[index].uuid); 763 } 764 (*partition_count)++; 765 } 766 } 767 768 /* Deal with physical SP's. */ 769 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) { 770 if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) { 771 /* Found a matching UUID, populate appropriately. */ 772 if (*partition_count >= max_partitions) { 773 return FFA_ERROR_NO_MEMORY; 774 } 775 776 desc = &partitions[*partition_count]; 777 desc->ep_id = sp_desc[index].sp_id; 778 /* 779 * Execution context count must match No. cores for 780 * S-EL1 SPs. 781 */ 782 desc->execution_ctx_count = PLATFORM_CORE_COUNT; 783 desc->properties = sp_desc[index].properties; 784 if (null_uuid) { 785 copy_uuid(desc->uuid, sp_desc[index].uuid); 786 } 787 (*partition_count)++; 788 } 789 } 790 return 0; 791 } 792 793 /* 794 * Handle the case where that caller only wants the count of partitions 795 * matching a given UUID and does not want the corresponding descriptors 796 * populated. 797 */ 798 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid) 799 { 800 uint32_t index = 0; 801 uint32_t partition_count = 0; 802 bool null_uuid = is_null_uuid(uuid); 803 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 804 805 /* Deal with Logical Partitions. */ 806 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) { 807 if (null_uuid || 808 uuid_match(uuid, el3_lp_descs[index].uuid)) { 809 (partition_count)++; 810 } 811 } 812 813 /* Deal with physical SP's. */ 814 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) { 815 if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) { 816 (partition_count)++; 817 } 818 } 819 return partition_count; 820 } 821 822 /* 823 * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate 824 * the coresponding descriptor format from the v1.1 descriptor array. 825 */ 826 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1 827 *partitions, 828 struct mailbox *mbox, 829 int partition_count) 830 { 831 uint32_t index; 832 uint32_t buf_size; 833 uint32_t descriptor_size; 834 struct ffa_partition_info_v1_0 *v1_0_partitions = 835 (struct ffa_partition_info_v1_0 *) mbox->rx_buffer; 836 837 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; 838 descriptor_size = partition_count * 839 sizeof(struct ffa_partition_info_v1_0); 840 841 if (descriptor_size > buf_size) { 842 return FFA_ERROR_NO_MEMORY; 843 } 844 845 for (index = 0U; index < partition_count; index++) { 846 v1_0_partitions[index].ep_id = partitions[index].ep_id; 847 v1_0_partitions[index].execution_ctx_count = 848 partitions[index].execution_ctx_count; 849 v1_0_partitions[index].properties = 850 partitions[index].properties; 851 } 852 return 0; 853 } 854 855 /* 856 * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and 857 * v1.0 implementations. 858 */ 859 static uint64_t partition_info_get_handler(uint32_t smc_fid, 860 bool secure_origin, 861 uint64_t x1, 862 uint64_t x2, 863 uint64_t x3, 864 uint64_t x4, 865 void *cookie, 866 void *handle, 867 uint64_t flags) 868 { 869 int ret; 870 uint32_t partition_count = 0; 871 uint32_t size = 0; 872 uint32_t ffa_version = get_partition_ffa_version(secure_origin); 873 struct mailbox *mbox; 874 uint64_t info_get_flags; 875 bool count_only; 876 uint32_t uuid[4]; 877 878 uuid[0] = x1; 879 uuid[1] = x2; 880 uuid[2] = x3; 881 uuid[3] = x4; 882 883 /* Determine if the Partition descriptors should be populated. */ 884 info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5); 885 count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK); 886 887 /* Handle the case where we don't need to populate the descriptors. */ 888 if (count_only) { 889 partition_count = partition_info_get_handler_count_only(uuid); 890 if (partition_count == 0) { 891 return spmc_ffa_error_return(handle, 892 FFA_ERROR_INVALID_PARAMETER); 893 } 894 } else { 895 struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS]; 896 897 /* 898 * Handle the case where the partition descriptors are required, 899 * check we have the buffers available and populate the 900 * appropriate structure version. 901 */ 902 903 /* Obtain the v1.1 format of the descriptors. */ 904 ret = partition_info_get_handler_v1_1(uuid, partitions, 905 MAX_SP_LP_PARTITIONS, 906 &partition_count); 907 908 /* Check if an error occurred during discovery. */ 909 if (ret != 0) { 910 goto err; 911 } 912 913 /* If we didn't find any matches the UUID is unknown. */ 914 if (partition_count == 0) { 915 ret = FFA_ERROR_INVALID_PARAMETER; 916 goto err; 917 } 918 919 /* Obtain the partition mailbox RX/TX buffer pair descriptor. */ 920 mbox = spmc_get_mbox_desc(secure_origin); 921 922 /* 923 * If the caller has not bothered registering its RX/TX pair 924 * then return an error code. 925 */ 926 spin_lock(&mbox->lock); 927 if (mbox->rx_buffer == NULL) { 928 ret = FFA_ERROR_BUSY; 929 goto err_unlock; 930 } 931 932 /* Ensure the RX buffer is currently free. */ 933 if (mbox->state != MAILBOX_STATE_EMPTY) { 934 ret = FFA_ERROR_BUSY; 935 goto err_unlock; 936 } 937 938 /* Zero the RX buffer before populating. */ 939 (void)memset(mbox->rx_buffer, 0, 940 mbox->rxtx_page_count * FFA_PAGE_SIZE); 941 942 /* 943 * Depending on the FF-A version of the requesting partition 944 * we may need to convert to a v1.0 format otherwise we can copy 945 * directly. 946 */ 947 if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) { 948 ret = partition_info_populate_v1_0(partitions, 949 mbox, 950 partition_count); 951 if (ret != 0) { 952 goto err_unlock; 953 } 954 } else { 955 uint32_t buf_size = mbox->rxtx_page_count * 956 FFA_PAGE_SIZE; 957 958 /* Ensure the descriptor will fit in the buffer. */ 959 size = sizeof(struct ffa_partition_info_v1_1); 960 if (partition_count * size > buf_size) { 961 ret = FFA_ERROR_NO_MEMORY; 962 goto err_unlock; 963 } 964 memcpy(mbox->rx_buffer, partitions, 965 partition_count * size); 966 } 967 968 mbox->state = MAILBOX_STATE_FULL; 969 spin_unlock(&mbox->lock); 970 } 971 SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size); 972 973 err_unlock: 974 spin_unlock(&mbox->lock); 975 err: 976 return spmc_ffa_error_return(handle, ret); 977 } 978 979 static uint64_t ffa_features_handler(uint32_t smc_fid, 980 bool secure_origin, 981 uint64_t x1, 982 uint64_t x2, 983 uint64_t x3, 984 uint64_t x4, 985 void *cookie, 986 void *handle, 987 uint64_t flags) 988 { 989 uint32_t function_id = (uint32_t) x1; 990 uint32_t input_properties = (uint32_t) x2; 991 992 /* 993 * We don't currently support any additional input properties 994 * for any ABI therefore ensure this value is always set to 0. 995 */ 996 if (input_properties != 0) { 997 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 998 } 999 1000 /* Check if a Feature ID was requested. */ 1001 if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) { 1002 /* We currently don't support any additional features. */ 1003 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1004 } 1005 1006 /* Report if an FF-A ABI is supported. */ 1007 switch (function_id) { 1008 /* Supported features from both worlds. */ 1009 case FFA_ERROR: 1010 case FFA_SUCCESS_SMC32: 1011 case FFA_ID_GET: 1012 case FFA_FEATURES: 1013 case FFA_VERSION: 1014 case FFA_RX_RELEASE: 1015 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 1016 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 1017 case FFA_PARTITION_INFO_GET: 1018 case FFA_RXTX_MAP_SMC32: 1019 case FFA_RXTX_MAP_SMC64: 1020 case FFA_RXTX_UNMAP: 1021 case FFA_MSG_RUN: 1022 1023 /* 1024 * We are relying on the fact that the other registers 1025 * will be set to 0 as these values align with the 1026 * currently implemented features of the SPMC. If this 1027 * changes this function must be extended to handle 1028 * reporting the additional functionality. 1029 */ 1030 1031 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1032 /* Execution stops here. */ 1033 1034 /* Supported ABIs only from the secure world. */ 1035 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 1036 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1037 case FFA_MSG_WAIT: 1038 1039 if (!secure_origin) { 1040 return spmc_ffa_error_return(handle, 1041 FFA_ERROR_NOT_SUPPORTED); 1042 } 1043 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1044 /* Execution stops here. */ 1045 1046 default: 1047 return spmc_ffa_error_return(handle, 1048 FFA_ERROR_NOT_SUPPORTED); 1049 } 1050 } 1051 1052 static uint64_t ffa_id_get_handler(uint32_t smc_fid, 1053 bool secure_origin, 1054 uint64_t x1, 1055 uint64_t x2, 1056 uint64_t x3, 1057 uint64_t x4, 1058 void *cookie, 1059 void *handle, 1060 uint64_t flags) 1061 { 1062 if (secure_origin) { 1063 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, 1064 spmc_get_current_sp_ctx()->sp_id); 1065 } else { 1066 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, 1067 spmc_get_hyp_ctx()->ns_ep_id); 1068 } 1069 } 1070 1071 static uint64_t ffa_run_handler(uint32_t smc_fid, 1072 bool secure_origin, 1073 uint64_t x1, 1074 uint64_t x2, 1075 uint64_t x3, 1076 uint64_t x4, 1077 void *cookie, 1078 void *handle, 1079 uint64_t flags) 1080 { 1081 struct secure_partition_desc *sp; 1082 uint16_t target_id = FFA_RUN_EP_ID(x1); 1083 uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1); 1084 unsigned int idx; 1085 unsigned int *rt_state; 1086 unsigned int *rt_model; 1087 1088 /* Can only be called from the normal world. */ 1089 if (secure_origin) { 1090 ERROR("FFA_RUN can only be called from NWd.\n"); 1091 return spmc_ffa_error_return(handle, 1092 FFA_ERROR_INVALID_PARAMETER); 1093 } 1094 1095 /* Cannot run a Normal world partition. */ 1096 if (ffa_is_normal_world_id(target_id)) { 1097 ERROR("Cannot run a NWd partition (0x%x).\n", target_id); 1098 return spmc_ffa_error_return(handle, 1099 FFA_ERROR_INVALID_PARAMETER); 1100 } 1101 1102 /* Check that the target SP exists. */ 1103 sp = spmc_get_sp_ctx(target_id); 1104 ERROR("Unknown partition ID (0x%x).\n", target_id); 1105 if (sp == NULL) { 1106 return spmc_ffa_error_return(handle, 1107 FFA_ERROR_INVALID_PARAMETER); 1108 } 1109 1110 idx = get_ec_index(sp); 1111 if (idx != vcpu_id) { 1112 ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id); 1113 return spmc_ffa_error_return(handle, 1114 FFA_ERROR_INVALID_PARAMETER); 1115 } 1116 rt_state = &((sp->ec[idx]).rt_state); 1117 rt_model = &((sp->ec[idx]).rt_model); 1118 if (*rt_state == RT_STATE_RUNNING) { 1119 ERROR("Partition (0x%x) is already running.\n", target_id); 1120 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY); 1121 } 1122 1123 /* 1124 * Sanity check that if the execution context was not waiting then it 1125 * was either in the direct request or the run partition runtime model. 1126 */ 1127 if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) { 1128 assert(*rt_model == RT_MODEL_RUN || 1129 *rt_model == RT_MODEL_DIR_REQ); 1130 } 1131 1132 /* 1133 * If the context was waiting then update the partition runtime model. 1134 */ 1135 if (*rt_state == RT_STATE_WAITING) { 1136 *rt_model = RT_MODEL_RUN; 1137 } 1138 1139 /* 1140 * Forward the request to the correct SP vCPU after updating 1141 * its state. 1142 */ 1143 *rt_state = RT_STATE_RUNNING; 1144 1145 return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0, 1146 handle, cookie, flags, target_id); 1147 } 1148 1149 static uint64_t rx_release_handler(uint32_t smc_fid, 1150 bool secure_origin, 1151 uint64_t x1, 1152 uint64_t x2, 1153 uint64_t x3, 1154 uint64_t x4, 1155 void *cookie, 1156 void *handle, 1157 uint64_t flags) 1158 { 1159 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 1160 1161 spin_lock(&mbox->lock); 1162 1163 if (mbox->state != MAILBOX_STATE_FULL) { 1164 spin_unlock(&mbox->lock); 1165 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1166 } 1167 1168 mbox->state = MAILBOX_STATE_EMPTY; 1169 spin_unlock(&mbox->lock); 1170 1171 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1172 } 1173 1174 /******************************************************************************* 1175 * This function will parse the Secure Partition Manifest. From manifest, it 1176 * will fetch details for preparing Secure partition image context and secure 1177 * partition image boot arguments if any. 1178 ******************************************************************************/ 1179 static int sp_manifest_parse(void *sp_manifest, int offset, 1180 struct secure_partition_desc *sp, 1181 entry_point_info_t *ep_info) 1182 { 1183 int32_t ret, node; 1184 uint32_t config_32; 1185 1186 /* 1187 * Look for the mandatory fields that are expected to be present in 1188 * the SP manifests. 1189 */ 1190 node = fdt_path_offset(sp_manifest, "/"); 1191 if (node < 0) { 1192 ERROR("Did not find root node.\n"); 1193 return node; 1194 } 1195 1196 ret = fdt_read_uint32_array(sp_manifest, node, "uuid", 1197 ARRAY_SIZE(sp->uuid), sp->uuid); 1198 if (ret != 0) { 1199 ERROR("Missing Secure Partition UUID.\n"); 1200 return ret; 1201 } 1202 1203 ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32); 1204 if (ret != 0) { 1205 ERROR("Missing SP Exception Level information.\n"); 1206 return ret; 1207 } 1208 1209 sp->runtime_el = config_32; 1210 1211 ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32); 1212 if (ret != 0) { 1213 ERROR("Missing Secure Partition FF-A Version.\n"); 1214 return ret; 1215 } 1216 1217 sp->ffa_version = config_32; 1218 1219 ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32); 1220 if (ret != 0) { 1221 ERROR("Missing Secure Partition Execution State.\n"); 1222 return ret; 1223 } 1224 1225 sp->execution_state = config_32; 1226 1227 ret = fdt_read_uint32(sp_manifest, node, 1228 "messaging-method", &config_32); 1229 if (ret != 0) { 1230 ERROR("Missing Secure Partition messaging method.\n"); 1231 return ret; 1232 } 1233 1234 /* Validate this entry, we currently only support direct messaging. */ 1235 if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV | 1236 FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) { 1237 WARN("Invalid Secure Partition messaging method (0x%x)\n", 1238 config_32); 1239 return -EINVAL; 1240 } 1241 1242 sp->properties = config_32; 1243 1244 ret = fdt_read_uint32(sp_manifest, node, 1245 "execution-ctx-count", &config_32); 1246 1247 if (ret != 0) { 1248 ERROR("Missing SP Execution Context Count.\n"); 1249 return ret; 1250 } 1251 1252 /* 1253 * Ensure this field is set correctly in the manifest however 1254 * since this is currently a hardcoded value for S-EL1 partitions 1255 * we don't need to save it here, just validate. 1256 */ 1257 if (config_32 != PLATFORM_CORE_COUNT) { 1258 ERROR("SP Execution Context Count (%u) must be %u.\n", 1259 config_32, PLATFORM_CORE_COUNT); 1260 return -EINVAL; 1261 } 1262 1263 /* 1264 * Look for the optional fields that are expected to be present in 1265 * an SP manifest. 1266 */ 1267 ret = fdt_read_uint32(sp_manifest, node, "id", &config_32); 1268 if (ret != 0) { 1269 WARN("Missing Secure Partition ID.\n"); 1270 } else { 1271 if (!is_ffa_secure_id_valid(config_32)) { 1272 ERROR("Invalid Secure Partition ID (0x%x).\n", 1273 config_32); 1274 return -EINVAL; 1275 } 1276 sp->sp_id = config_32; 1277 } 1278 1279 return 0; 1280 } 1281 1282 /******************************************************************************* 1283 * This function gets the Secure Partition Manifest base and maps the manifest 1284 * region. 1285 * Currently only one Secure Partition manifest is considered which is used to 1286 * prepare the context for the single Secure Partition. 1287 ******************************************************************************/ 1288 static int find_and_prepare_sp_context(void) 1289 { 1290 void *sp_manifest; 1291 uintptr_t manifest_base; 1292 uintptr_t manifest_base_align; 1293 entry_point_info_t *next_image_ep_info; 1294 int32_t ret; 1295 struct secure_partition_desc *sp; 1296 1297 next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 1298 if (next_image_ep_info == NULL) { 1299 WARN("No Secure Partition image provided by BL2.\n"); 1300 return -ENOENT; 1301 } 1302 1303 sp_manifest = (void *)next_image_ep_info->args.arg0; 1304 if (sp_manifest == NULL) { 1305 WARN("Secure Partition manifest absent.\n"); 1306 return -ENOENT; 1307 } 1308 1309 manifest_base = (uintptr_t)sp_manifest; 1310 manifest_base_align = page_align(manifest_base, DOWN); 1311 1312 /* 1313 * Map the secure partition manifest region in the EL3 translation 1314 * regime. 1315 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base 1316 * alignment the region of 1 PAGE_SIZE from manifest align base may 1317 * not completely accommodate the secure partition manifest region. 1318 */ 1319 ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align, 1320 manifest_base_align, 1321 PAGE_SIZE * 2, 1322 MT_RO_DATA); 1323 if (ret != 0) { 1324 ERROR("Error while mapping SP manifest (%d).\n", ret); 1325 return ret; 1326 } 1327 1328 ret = fdt_node_offset_by_compatible(sp_manifest, -1, 1329 "arm,ffa-manifest-1.0"); 1330 if (ret < 0) { 1331 ERROR("Error happened in SP manifest reading.\n"); 1332 return -EINVAL; 1333 } 1334 1335 /* 1336 * Store the size of the manifest so that it can be used later to pass 1337 * the manifest as boot information later. 1338 */ 1339 next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest); 1340 INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1); 1341 1342 /* 1343 * Select an SP descriptor for initialising the partition's execution 1344 * context on the primary CPU. 1345 */ 1346 sp = spmc_get_current_sp_ctx(); 1347 1348 /* Initialize entry point information for the SP */ 1349 SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1, 1350 SECURE | EP_ST_ENABLE); 1351 1352 /* Parse the SP manifest. */ 1353 ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info); 1354 if (ret != 0) { 1355 ERROR("Error in Secure Partition manifest parsing.\n"); 1356 return ret; 1357 } 1358 1359 /* Check that the runtime EL in the manifest was correct. */ 1360 if (sp->runtime_el != S_EL1) { 1361 ERROR("Unexpected runtime EL: %d\n", sp->runtime_el); 1362 return -EINVAL; 1363 } 1364 1365 /* Perform any common initialisation. */ 1366 spmc_sp_common_setup(sp, next_image_ep_info); 1367 1368 /* Perform any initialisation specific to S-EL1 SPs. */ 1369 spmc_el1_sp_setup(sp, next_image_ep_info); 1370 1371 /* Initialize the SP context with the required ep info. */ 1372 spmc_sp_common_ep_commit(sp, next_image_ep_info); 1373 1374 return 0; 1375 } 1376 1377 /******************************************************************************* 1378 * This function takes an SP context pointer and performs a synchronous entry 1379 * into it. 1380 ******************************************************************************/ 1381 static int32_t logical_sp_init(void) 1382 { 1383 int32_t rc = 0; 1384 struct el3_lp_desc *el3_lp_descs; 1385 1386 /* Perform initial validation of the Logical Partitions. */ 1387 rc = el3_sp_desc_validate(); 1388 if (rc != 0) { 1389 ERROR("Logical Partition validation failed!\n"); 1390 return rc; 1391 } 1392 1393 el3_lp_descs = get_el3_lp_array(); 1394 1395 INFO("Logical Secure Partition init start.\n"); 1396 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) { 1397 rc = el3_lp_descs[i].init(); 1398 if (rc != 0) { 1399 ERROR("Logical SP (0x%x) Failed to Initialize\n", 1400 el3_lp_descs[i].sp_id); 1401 return rc; 1402 } 1403 VERBOSE("Logical SP (0x%x) Initialized\n", 1404 el3_lp_descs[i].sp_id); 1405 } 1406 1407 INFO("Logical Secure Partition init completed.\n"); 1408 1409 return rc; 1410 } 1411 1412 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec) 1413 { 1414 uint64_t rc; 1415 1416 assert(ec != NULL); 1417 1418 /* Assign the context of the SP to this CPU */ 1419 cm_set_context(&(ec->cpu_ctx), SECURE); 1420 1421 /* Restore the context assigned above */ 1422 cm_el1_sysregs_context_restore(SECURE); 1423 cm_set_next_eret_context(SECURE); 1424 1425 /* Invalidate TLBs at EL1. */ 1426 tlbivmalle1(); 1427 dsbish(); 1428 1429 /* Enter Secure Partition */ 1430 rc = spm_secure_partition_enter(&ec->c_rt_ctx); 1431 1432 /* Save secure state */ 1433 cm_el1_sysregs_context_save(SECURE); 1434 1435 return rc; 1436 } 1437 1438 /******************************************************************************* 1439 * SPMC Helper Functions. 1440 ******************************************************************************/ 1441 static int32_t sp_init(void) 1442 { 1443 uint64_t rc; 1444 struct secure_partition_desc *sp; 1445 struct sp_exec_ctx *ec; 1446 1447 sp = spmc_get_current_sp_ctx(); 1448 ec = spmc_get_sp_ec(sp); 1449 ec->rt_model = RT_MODEL_INIT; 1450 ec->rt_state = RT_STATE_RUNNING; 1451 1452 INFO("Secure Partition (0x%x) init start.\n", sp->sp_id); 1453 1454 rc = spmc_sp_synchronous_entry(ec); 1455 if (rc != 0) { 1456 /* Indicate SP init was not successful. */ 1457 ERROR("SP (0x%x) failed to initialize (%lu).\n", 1458 sp->sp_id, rc); 1459 return 0; 1460 } 1461 1462 ec->rt_state = RT_STATE_WAITING; 1463 INFO("Secure Partition initialized.\n"); 1464 1465 return 1; 1466 } 1467 1468 static void initalize_sp_descs(void) 1469 { 1470 struct secure_partition_desc *sp; 1471 1472 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) { 1473 sp = &sp_desc[i]; 1474 sp->sp_id = INV_SP_ID; 1475 sp->mailbox.rx_buffer = NULL; 1476 sp->mailbox.tx_buffer = NULL; 1477 sp->mailbox.state = MAILBOX_STATE_EMPTY; 1478 sp->secondary_ep = 0; 1479 } 1480 } 1481 1482 static void initalize_ns_ep_descs(void) 1483 { 1484 struct ns_endpoint_desc *ns_ep; 1485 1486 for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) { 1487 ns_ep = &ns_ep_desc[i]; 1488 /* 1489 * Clashes with the Hypervisor ID but will not be a 1490 * problem in practice. 1491 */ 1492 ns_ep->ns_ep_id = 0; 1493 ns_ep->ffa_version = 0; 1494 ns_ep->mailbox.rx_buffer = NULL; 1495 ns_ep->mailbox.tx_buffer = NULL; 1496 ns_ep->mailbox.state = MAILBOX_STATE_EMPTY; 1497 } 1498 } 1499 1500 /******************************************************************************* 1501 * Initialize SPMC attributes for the SPMD. 1502 ******************************************************************************/ 1503 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs) 1504 { 1505 spmc_attrs->major_version = FFA_VERSION_MAJOR; 1506 spmc_attrs->minor_version = FFA_VERSION_MINOR; 1507 spmc_attrs->exec_state = MODE_RW_64; 1508 spmc_attrs->spmc_id = FFA_SPMC_ID; 1509 } 1510 1511 /******************************************************************************* 1512 * Initialize contexts of all Secure Partitions. 1513 ******************************************************************************/ 1514 int32_t spmc_setup(void) 1515 { 1516 int32_t ret; 1517 1518 /* Initialize endpoint descriptors */ 1519 initalize_sp_descs(); 1520 initalize_ns_ep_descs(); 1521 1522 /* Setup logical SPs. */ 1523 ret = logical_sp_init(); 1524 if (ret != 0) { 1525 ERROR("Failed to initialize Logical Partitions.\n"); 1526 return ret; 1527 } 1528 1529 /* Perform physical SP setup. */ 1530 1531 /* Disable MMU at EL1 (initialized by BL2) */ 1532 disable_mmu_icache_el1(); 1533 1534 /* Initialize context of the SP */ 1535 INFO("Secure Partition context setup start.\n"); 1536 1537 ret = find_and_prepare_sp_context(); 1538 if (ret != 0) { 1539 ERROR("Error in SP finding and context preparation.\n"); 1540 return ret; 1541 } 1542 1543 /* Register init function for deferred init. */ 1544 bl31_register_bl32_init(&sp_init); 1545 1546 INFO("Secure Partition setup done.\n"); 1547 1548 return 0; 1549 } 1550 1551 /******************************************************************************* 1552 * Secure Partition Manager SMC handler. 1553 ******************************************************************************/ 1554 uint64_t spmc_smc_handler(uint32_t smc_fid, 1555 bool secure_origin, 1556 uint64_t x1, 1557 uint64_t x2, 1558 uint64_t x3, 1559 uint64_t x4, 1560 void *cookie, 1561 void *handle, 1562 uint64_t flags) 1563 { 1564 switch (smc_fid) { 1565 1566 case FFA_VERSION: 1567 return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3, 1568 x4, cookie, handle, flags); 1569 1570 case FFA_ID_GET: 1571 return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3, 1572 x4, cookie, handle, flags); 1573 1574 case FFA_FEATURES: 1575 return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3, 1576 x4, cookie, handle, flags); 1577 1578 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 1579 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 1580 return direct_req_smc_handler(smc_fid, secure_origin, x1, x2, 1581 x3, x4, cookie, handle, flags); 1582 1583 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 1584 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1585 return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2, 1586 x3, x4, cookie, handle, flags); 1587 1588 case FFA_RXTX_MAP_SMC32: 1589 case FFA_RXTX_MAP_SMC64: 1590 return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4, 1591 cookie, handle, flags); 1592 1593 case FFA_RXTX_UNMAP: 1594 return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3, 1595 x4, cookie, handle, flags); 1596 1597 case FFA_PARTITION_INFO_GET: 1598 return partition_info_get_handler(smc_fid, secure_origin, x1, 1599 x2, x3, x4, cookie, handle, 1600 flags); 1601 1602 case FFA_RX_RELEASE: 1603 return rx_release_handler(smc_fid, secure_origin, x1, x2, x3, 1604 x4, cookie, handle, flags); 1605 1606 case FFA_MSG_WAIT: 1607 return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4, 1608 cookie, handle, flags); 1609 1610 case FFA_ERROR: 1611 return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4, 1612 cookie, handle, flags); 1613 1614 case FFA_MSG_RUN: 1615 return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4, 1616 cookie, handle, flags); 1617 default: 1618 WARN("Unsupported FF-A call 0x%08x.\n", smc_fid); 1619 break; 1620 } 1621 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1622 } 1623