1 /* 2 * Copyright (c) 2022-2024, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <stdio.h> 10 11 #include <arch_helpers.h> 12 #include <bl31/bl31.h> 13 #include <bl31/ehf.h> 14 #include <bl31/interrupt_mgmt.h> 15 #include <common/debug.h> 16 #include <common/fdt_wrappers.h> 17 #include <common/runtime_svc.h> 18 #include <common/uuid.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/smccc.h> 21 #include <lib/utils.h> 22 #include <lib/xlat_tables/xlat_tables_v2.h> 23 #include <libfdt.h> 24 #include <plat/common/platform.h> 25 #include <services/el3_spmc_logical_sp.h> 26 #include <services/ffa_svc.h> 27 #include <services/spmc_svc.h> 28 #include <services/spmd_svc.h> 29 #include "spmc.h" 30 #include "spmc_shared_mem.h" 31 32 #include <platform_def.h> 33 34 /* FFA_MEM_PERM_* helpers */ 35 #define FFA_MEM_PERM_MASK U(7) 36 #define FFA_MEM_PERM_DATA_MASK U(3) 37 #define FFA_MEM_PERM_DATA_SHIFT U(0) 38 #define FFA_MEM_PERM_DATA_NA U(0) 39 #define FFA_MEM_PERM_DATA_RW U(1) 40 #define FFA_MEM_PERM_DATA_RES U(2) 41 #define FFA_MEM_PERM_DATA_RO U(3) 42 #define FFA_MEM_PERM_INST_EXEC (U(0) << 2) 43 #define FFA_MEM_PERM_INST_NON_EXEC (U(1) << 2) 44 45 /* Declare the maximum number of SPs and El3 LPs. */ 46 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT 47 48 /* 49 * Allocate a secure partition descriptor to describe each SP in the system that 50 * does not reside at EL3. 51 */ 52 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT]; 53 54 /* 55 * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in 56 * the system that interacts with a SP. It is used to track the Hypervisor 57 * buffer pair, version and ID for now. It could be extended to track VM 58 * properties when the SPMC supports indirect messaging. 59 */ 60 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT]; 61 62 static uint64_t spmc_sp_interrupt_handler(uint32_t id, 63 uint32_t flags, 64 void *handle, 65 void *cookie); 66 67 /* 68 * Helper function to obtain the array storing the EL3 69 * Logical Partition descriptors. 70 */ 71 struct el3_lp_desc *get_el3_lp_array(void) 72 { 73 return (struct el3_lp_desc *) EL3_LP_DESCS_START; 74 } 75 76 /* 77 * Helper function to obtain the descriptor of the last SP to whom control was 78 * handed to on this physical cpu. Currently, we assume there is only one SP. 79 * TODO: Expand to track multiple partitions when required. 80 */ 81 struct secure_partition_desc *spmc_get_current_sp_ctx(void) 82 { 83 return &(sp_desc[ACTIVE_SP_DESC_INDEX]); 84 } 85 86 /* 87 * Helper function to obtain the execution context of an SP on the 88 * current physical cpu. 89 */ 90 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp) 91 { 92 return &(sp->ec[get_ec_index(sp)]); 93 } 94 95 /* Helper function to get pointer to SP context from its ID. */ 96 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id) 97 { 98 /* Check for Secure World Partitions. */ 99 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) { 100 if (sp_desc[i].sp_id == id) { 101 return &(sp_desc[i]); 102 } 103 } 104 return NULL; 105 } 106 107 /* 108 * Helper function to obtain the descriptor of the Hypervisor or OS kernel. 109 * We assume that the first descriptor is reserved for this entity. 110 */ 111 struct ns_endpoint_desc *spmc_get_hyp_ctx(void) 112 { 113 return &(ns_ep_desc[0]); 114 } 115 116 /* 117 * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor 118 * or OS kernel in the normal world or the last SP that was run. 119 */ 120 struct mailbox *spmc_get_mbox_desc(bool secure_origin) 121 { 122 /* Obtain the RX/TX buffer pair descriptor. */ 123 if (secure_origin) { 124 return &(spmc_get_current_sp_ctx()->mailbox); 125 } else { 126 return &(spmc_get_hyp_ctx()->mailbox); 127 } 128 } 129 130 /****************************************************************************** 131 * This function returns to the place where spmc_sp_synchronous_entry() was 132 * called originally. 133 ******************************************************************************/ 134 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc) 135 { 136 /* 137 * The SPM must have initiated the original request through a 138 * synchronous entry into the secure partition. Jump back to the 139 * original C runtime context with the value of rc in x0; 140 */ 141 spm_secure_partition_exit(ec->c_rt_ctx, rc); 142 143 panic(); 144 } 145 146 /******************************************************************************* 147 * Return FFA_ERROR with specified error code. 148 ******************************************************************************/ 149 uint64_t spmc_ffa_error_return(void *handle, int error_code) 150 { 151 SMC_RET8(handle, FFA_ERROR, 152 FFA_TARGET_INFO_MBZ, error_code, 153 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 154 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 155 } 156 157 /****************************************************************************** 158 * Helper function to validate a secure partition ID to ensure it does not 159 * conflict with any other FF-A component and follows the convention to 160 * indicate it resides within the secure world. 161 ******************************************************************************/ 162 bool is_ffa_secure_id_valid(uint16_t partition_id) 163 { 164 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 165 166 /* Ensure the ID is not the invalid partition ID. */ 167 if (partition_id == INV_SP_ID) { 168 return false; 169 } 170 171 /* Ensure the ID is not the SPMD ID. */ 172 if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) { 173 return false; 174 } 175 176 /* 177 * Ensure the ID follows the convention to indicate it resides 178 * in the secure world. 179 */ 180 if (!ffa_is_secure_world_id(partition_id)) { 181 return false; 182 } 183 184 /* Ensure we don't conflict with the SPMC partition ID. */ 185 if (partition_id == FFA_SPMC_ID) { 186 return false; 187 } 188 189 /* Ensure we do not already have an SP context with this ID. */ 190 if (spmc_get_sp_ctx(partition_id)) { 191 return false; 192 } 193 194 /* Ensure we don't clash with any Logical SP's. */ 195 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) { 196 if (el3_lp_descs[i].sp_id == partition_id) { 197 return false; 198 } 199 } 200 201 return true; 202 } 203 204 /******************************************************************************* 205 * This function either forwards the request to the other world or returns 206 * with an ERET depending on the source of the call. 207 * We can assume that the destination is for an entity at a lower exception 208 * level as any messages destined for a logical SP resident in EL3 will have 209 * already been taken care of by the SPMC before entering this function. 210 ******************************************************************************/ 211 static uint64_t spmc_smc_return(uint32_t smc_fid, 212 bool secure_origin, 213 uint64_t x1, 214 uint64_t x2, 215 uint64_t x3, 216 uint64_t x4, 217 void *handle, 218 void *cookie, 219 uint64_t flags, 220 uint16_t dst_id) 221 { 222 /* If the destination is in the normal world always go via the SPMD. */ 223 if (ffa_is_normal_world_id(dst_id)) { 224 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, 225 cookie, handle, flags); 226 } 227 /* 228 * If the caller is secure and we want to return to the secure world, 229 * ERET directly. 230 */ 231 else if (secure_origin && ffa_is_secure_world_id(dst_id)) { 232 SMC_RET5(handle, smc_fid, x1, x2, x3, x4); 233 } 234 /* If we originated in the normal world then switch contexts. */ 235 else if (!secure_origin && ffa_is_secure_world_id(dst_id)) { 236 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, 237 x3, x4, handle, flags); 238 } else { 239 /* Unknown State. */ 240 panic(); 241 } 242 243 /* Shouldn't be Reached. */ 244 return 0; 245 } 246 247 /******************************************************************************* 248 * FF-A ABI Handlers. 249 ******************************************************************************/ 250 251 /******************************************************************************* 252 * Helper function to validate arg2 as part of a direct message. 253 ******************************************************************************/ 254 static inline bool direct_msg_validate_arg2(uint64_t x2) 255 { 256 /* Check message type. */ 257 if (x2 & FFA_FWK_MSG_BIT) { 258 /* We have a framework message, ensure it is a known message. */ 259 if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) { 260 VERBOSE("Invalid message format 0x%lx.\n", x2); 261 return false; 262 } 263 } else { 264 /* We have a partition messages, ensure x2 is not set. */ 265 if (x2 != (uint64_t) 0) { 266 VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n", 267 x2); 268 return false; 269 } 270 } 271 return true; 272 } 273 274 /******************************************************************************* 275 * Helper function to validate the destination ID of a direct response. 276 ******************************************************************************/ 277 static bool direct_msg_validate_dst_id(uint16_t dst_id) 278 { 279 struct secure_partition_desc *sp; 280 281 /* Check if we're targeting a normal world partition. */ 282 if (ffa_is_normal_world_id(dst_id)) { 283 return true; 284 } 285 286 /* Or directed to the SPMC itself.*/ 287 if (dst_id == FFA_SPMC_ID) { 288 return true; 289 } 290 291 /* Otherwise ensure the SP exists. */ 292 sp = spmc_get_sp_ctx(dst_id); 293 if (sp != NULL) { 294 return true; 295 } 296 297 return false; 298 } 299 300 /******************************************************************************* 301 * Helper function to validate the response from a Logical Partition. 302 ******************************************************************************/ 303 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id, 304 void *handle) 305 { 306 /* Retrieve populated Direct Response Arguments. */ 307 uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1); 308 uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2); 309 uint16_t src_id = ffa_endpoint_source(x1); 310 uint16_t dst_id = ffa_endpoint_destination(x1); 311 312 if (src_id != lp_id) { 313 ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id); 314 return false; 315 } 316 317 /* 318 * Check the destination ID is valid and ensure the LP is responding to 319 * the original request. 320 */ 321 if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) { 322 ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id); 323 return false; 324 } 325 326 if (!direct_msg_validate_arg2(x2)) { 327 ERROR("Invalid EL3 LP message encoding.\n"); 328 return false; 329 } 330 return true; 331 } 332 333 /******************************************************************************* 334 * Handle direct request messages and route to the appropriate destination. 335 ******************************************************************************/ 336 static uint64_t direct_req_smc_handler(uint32_t smc_fid, 337 bool secure_origin, 338 uint64_t x1, 339 uint64_t x2, 340 uint64_t x3, 341 uint64_t x4, 342 void *cookie, 343 void *handle, 344 uint64_t flags) 345 { 346 uint16_t src_id = ffa_endpoint_source(x1); 347 uint16_t dst_id = ffa_endpoint_destination(x1); 348 struct el3_lp_desc *el3_lp_descs; 349 struct secure_partition_desc *sp; 350 unsigned int idx; 351 352 /* Check if arg2 has been populated correctly based on message type. */ 353 if (!direct_msg_validate_arg2(x2)) { 354 return spmc_ffa_error_return(handle, 355 FFA_ERROR_INVALID_PARAMETER); 356 } 357 358 /* Validate Sender is either the current SP or from the normal world. */ 359 if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) || 360 (!secure_origin && !ffa_is_normal_world_id(src_id))) { 361 ERROR("Invalid direct request source ID (0x%x).\n", src_id); 362 return spmc_ffa_error_return(handle, 363 FFA_ERROR_INVALID_PARAMETER); 364 } 365 366 el3_lp_descs = get_el3_lp_array(); 367 368 /* Check if the request is destined for a Logical Partition. */ 369 for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) { 370 if (el3_lp_descs[i].sp_id == dst_id) { 371 uint64_t ret = el3_lp_descs[i].direct_req( 372 smc_fid, secure_origin, x1, x2, 373 x3, x4, cookie, handle, flags); 374 if (!direct_msg_validate_lp_resp(src_id, dst_id, 375 handle)) { 376 panic(); 377 } 378 379 /* Message checks out. */ 380 return ret; 381 } 382 } 383 384 /* 385 * If the request was not targeted to a LSP and from the secure world 386 * then it is invalid since a SP cannot call into the Normal world and 387 * there is no other SP to call into. If there are other SPs in future 388 * then the partition runtime model would need to be validated as well. 389 */ 390 if (secure_origin) { 391 VERBOSE("Direct request not supported to the Normal World.\n"); 392 return spmc_ffa_error_return(handle, 393 FFA_ERROR_INVALID_PARAMETER); 394 } 395 396 /* Check if the SP ID is valid. */ 397 sp = spmc_get_sp_ctx(dst_id); 398 if (sp == NULL) { 399 VERBOSE("Direct request to unknown partition ID (0x%x).\n", 400 dst_id); 401 return spmc_ffa_error_return(handle, 402 FFA_ERROR_INVALID_PARAMETER); 403 } 404 405 /* Protect the runtime state of a UP S-EL0 SP with a lock. */ 406 if (sp->runtime_el == S_EL0) { 407 spin_lock(&sp->rt_state_lock); 408 } 409 410 /* 411 * Check that the target execution context is in a waiting state before 412 * forwarding the direct request to it. 413 */ 414 idx = get_ec_index(sp); 415 if (sp->ec[idx].rt_state != RT_STATE_WAITING) { 416 VERBOSE("SP context on core%u is not waiting (%u).\n", 417 idx, sp->ec[idx].rt_model); 418 419 if (sp->runtime_el == S_EL0) { 420 spin_unlock(&sp->rt_state_lock); 421 } 422 423 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY); 424 } 425 426 /* 427 * Everything checks out so forward the request to the SP after updating 428 * its state and runtime model. 429 */ 430 sp->ec[idx].rt_state = RT_STATE_RUNNING; 431 sp->ec[idx].rt_model = RT_MODEL_DIR_REQ; 432 sp->ec[idx].dir_req_origin_id = src_id; 433 434 if (sp->runtime_el == S_EL0) { 435 spin_unlock(&sp->rt_state_lock); 436 } 437 438 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 439 handle, cookie, flags, dst_id); 440 } 441 442 /******************************************************************************* 443 * Handle direct response messages and route to the appropriate destination. 444 ******************************************************************************/ 445 static uint64_t direct_resp_smc_handler(uint32_t smc_fid, 446 bool secure_origin, 447 uint64_t x1, 448 uint64_t x2, 449 uint64_t x3, 450 uint64_t x4, 451 void *cookie, 452 void *handle, 453 uint64_t flags) 454 { 455 uint16_t dst_id = ffa_endpoint_destination(x1); 456 struct secure_partition_desc *sp; 457 unsigned int idx; 458 459 /* Check if arg2 has been populated correctly based on message type. */ 460 if (!direct_msg_validate_arg2(x2)) { 461 return spmc_ffa_error_return(handle, 462 FFA_ERROR_INVALID_PARAMETER); 463 } 464 465 /* Check that the response did not originate from the Normal world. */ 466 if (!secure_origin) { 467 VERBOSE("Direct Response not supported from Normal World.\n"); 468 return spmc_ffa_error_return(handle, 469 FFA_ERROR_INVALID_PARAMETER); 470 } 471 472 /* 473 * Check that the response is either targeted to the Normal world or the 474 * SPMC e.g. a PM response. 475 */ 476 if (!direct_msg_validate_dst_id(dst_id)) { 477 VERBOSE("Direct response to invalid partition ID (0x%x).\n", 478 dst_id); 479 return spmc_ffa_error_return(handle, 480 FFA_ERROR_INVALID_PARAMETER); 481 } 482 483 /* Obtain the SP descriptor and update its runtime state. */ 484 sp = spmc_get_sp_ctx(ffa_endpoint_source(x1)); 485 if (sp == NULL) { 486 VERBOSE("Direct response to unknown partition ID (0x%x).\n", 487 dst_id); 488 return spmc_ffa_error_return(handle, 489 FFA_ERROR_INVALID_PARAMETER); 490 } 491 492 if (sp->runtime_el == S_EL0) { 493 spin_lock(&sp->rt_state_lock); 494 } 495 496 /* Sanity check state is being tracked correctly in the SPMC. */ 497 idx = get_ec_index(sp); 498 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING); 499 500 /* Ensure SP execution context was in the right runtime model. */ 501 if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) { 502 VERBOSE("SP context on core%u not handling direct req (%u).\n", 503 idx, sp->ec[idx].rt_model); 504 if (sp->runtime_el == S_EL0) { 505 spin_unlock(&sp->rt_state_lock); 506 } 507 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 508 } 509 510 if (sp->ec[idx].dir_req_origin_id != dst_id) { 511 WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n", 512 dst_id, sp->ec[idx].dir_req_origin_id, idx); 513 if (sp->runtime_el == S_EL0) { 514 spin_unlock(&sp->rt_state_lock); 515 } 516 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 517 } 518 519 /* Update the state of the SP execution context. */ 520 sp->ec[idx].rt_state = RT_STATE_WAITING; 521 522 /* Clear the ongoing direct request ID. */ 523 sp->ec[idx].dir_req_origin_id = INV_SP_ID; 524 525 if (sp->runtime_el == S_EL0) { 526 spin_unlock(&sp->rt_state_lock); 527 } 528 529 /* 530 * If the receiver is not the SPMC then forward the response to the 531 * Normal world. 532 */ 533 if (dst_id == FFA_SPMC_ID) { 534 spmc_sp_synchronous_exit(&sp->ec[idx], x4); 535 /* Should not get here. */ 536 panic(); 537 } 538 539 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 540 handle, cookie, flags, dst_id); 541 } 542 543 /******************************************************************************* 544 * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its 545 * cycles. 546 ******************************************************************************/ 547 static uint64_t msg_wait_handler(uint32_t smc_fid, 548 bool secure_origin, 549 uint64_t x1, 550 uint64_t x2, 551 uint64_t x3, 552 uint64_t x4, 553 void *cookie, 554 void *handle, 555 uint64_t flags) 556 { 557 struct secure_partition_desc *sp; 558 unsigned int idx; 559 560 /* 561 * Check that the response did not originate from the Normal world as 562 * only the secure world can call this ABI. 563 */ 564 if (!secure_origin) { 565 VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n"); 566 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 567 } 568 569 /* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */ 570 sp = spmc_get_current_sp_ctx(); 571 if (sp == NULL) { 572 return spmc_ffa_error_return(handle, 573 FFA_ERROR_INVALID_PARAMETER); 574 } 575 576 /* 577 * Get the execution context of the SP that invoked FFA_MSG_WAIT. 578 */ 579 idx = get_ec_index(sp); 580 if (sp->runtime_el == S_EL0) { 581 spin_lock(&sp->rt_state_lock); 582 } 583 584 /* Ensure SP execution context was in the right runtime model. */ 585 if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) { 586 if (sp->runtime_el == S_EL0) { 587 spin_unlock(&sp->rt_state_lock); 588 } 589 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 590 } 591 592 /* Sanity check the state is being tracked correctly in the SPMC. */ 593 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING); 594 595 /* 596 * Perform a synchronous exit if the partition was initialising. The 597 * state is updated after the exit. 598 */ 599 if (sp->ec[idx].rt_model == RT_MODEL_INIT) { 600 if (sp->runtime_el == S_EL0) { 601 spin_unlock(&sp->rt_state_lock); 602 } 603 spmc_sp_synchronous_exit(&sp->ec[idx], x4); 604 /* Should not get here */ 605 panic(); 606 } 607 608 /* Update the state of the SP execution context. */ 609 sp->ec[idx].rt_state = RT_STATE_WAITING; 610 611 /* Resume normal world if a secure interrupt was handled. */ 612 if (sp->ec[idx].rt_model == RT_MODEL_INTR) { 613 /* FFA_MSG_WAIT can only be called from the secure world. */ 614 unsigned int secure_state_in = SECURE; 615 unsigned int secure_state_out = NON_SECURE; 616 617 cm_el1_sysregs_context_save(secure_state_in); 618 cm_el1_sysregs_context_restore(secure_state_out); 619 cm_set_next_eret_context(secure_state_out); 620 621 if (sp->runtime_el == S_EL0) { 622 spin_unlock(&sp->rt_state_lock); 623 } 624 625 SMC_RET0(cm_get_context(secure_state_out)); 626 } 627 628 /* Protect the runtime state of a S-EL0 SP with a lock. */ 629 if (sp->runtime_el == S_EL0) { 630 spin_unlock(&sp->rt_state_lock); 631 } 632 633 /* Forward the response to the Normal world. */ 634 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 635 handle, cookie, flags, FFA_NWD_ID); 636 } 637 638 static uint64_t ffa_error_handler(uint32_t smc_fid, 639 bool secure_origin, 640 uint64_t x1, 641 uint64_t x2, 642 uint64_t x3, 643 uint64_t x4, 644 void *cookie, 645 void *handle, 646 uint64_t flags) 647 { 648 struct secure_partition_desc *sp; 649 unsigned int idx; 650 651 /* Check that the response did not originate from the Normal world. */ 652 if (!secure_origin) { 653 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 654 } 655 656 /* Get the descriptor of the SP that invoked FFA_ERROR. */ 657 sp = spmc_get_current_sp_ctx(); 658 if (sp == NULL) { 659 return spmc_ffa_error_return(handle, 660 FFA_ERROR_INVALID_PARAMETER); 661 } 662 663 /* Get the execution context of the SP that invoked FFA_ERROR. */ 664 idx = get_ec_index(sp); 665 666 /* 667 * We only expect FFA_ERROR to be received during SP initialisation 668 * otherwise this is an invalid call. 669 */ 670 if (sp->ec[idx].rt_model == RT_MODEL_INIT) { 671 ERROR("SP 0x%x failed to initialize.\n", sp->sp_id); 672 spmc_sp_synchronous_exit(&sp->ec[idx], x2); 673 /* Should not get here. */ 674 panic(); 675 } 676 677 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 678 } 679 680 static uint64_t ffa_version_handler(uint32_t smc_fid, 681 bool secure_origin, 682 uint64_t x1, 683 uint64_t x2, 684 uint64_t x3, 685 uint64_t x4, 686 void *cookie, 687 void *handle, 688 uint64_t flags) 689 { 690 uint32_t requested_version = x1 & FFA_VERSION_MASK; 691 692 if (requested_version & FFA_VERSION_BIT31_MASK) { 693 /* Invalid encoding, return an error. */ 694 SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED); 695 /* Execution stops here. */ 696 } 697 698 /* Determine the caller to store the requested version. */ 699 if (secure_origin) { 700 /* 701 * Ensure that the SP is reporting the same version as 702 * specified in its manifest. If these do not match there is 703 * something wrong with the SP. 704 * TODO: Should we abort the SP? For now assert this is not 705 * case. 706 */ 707 assert(requested_version == 708 spmc_get_current_sp_ctx()->ffa_version); 709 } else { 710 /* 711 * If this is called by the normal world, record this 712 * information in its descriptor. 713 */ 714 spmc_get_hyp_ctx()->ffa_version = requested_version; 715 } 716 717 SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR, 718 FFA_VERSION_MINOR)); 719 } 720 721 /******************************************************************************* 722 * Helper function to obtain the FF-A version of the calling partition. 723 ******************************************************************************/ 724 uint32_t get_partition_ffa_version(bool secure_origin) 725 { 726 if (secure_origin) { 727 return spmc_get_current_sp_ctx()->ffa_version; 728 } else { 729 return spmc_get_hyp_ctx()->ffa_version; 730 } 731 } 732 733 static uint64_t rxtx_map_handler(uint32_t smc_fid, 734 bool secure_origin, 735 uint64_t x1, 736 uint64_t x2, 737 uint64_t x3, 738 uint64_t x4, 739 void *cookie, 740 void *handle, 741 uint64_t flags) 742 { 743 int ret; 744 uint32_t error_code; 745 uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS; 746 struct mailbox *mbox; 747 uintptr_t tx_address = x1; 748 uintptr_t rx_address = x2; 749 uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */ 750 uint32_t buf_size = page_count * FFA_PAGE_SIZE; 751 752 /* 753 * The SPMC does not support mapping of VM RX/TX pairs to facilitate 754 * indirect messaging with SPs. Check if the Hypervisor has invoked this 755 * ABI on behalf of a VM and reject it if this is the case. 756 */ 757 if (tx_address == 0 || rx_address == 0) { 758 WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n"); 759 return spmc_ffa_error_return(handle, 760 FFA_ERROR_INVALID_PARAMETER); 761 } 762 763 /* Ensure the specified buffers are not the same. */ 764 if (tx_address == rx_address) { 765 WARN("TX Buffer must not be the same as RX Buffer.\n"); 766 return spmc_ffa_error_return(handle, 767 FFA_ERROR_INVALID_PARAMETER); 768 } 769 770 /* Ensure the buffer size is not 0. */ 771 if (buf_size == 0U) { 772 WARN("Buffer size must not be 0\n"); 773 return spmc_ffa_error_return(handle, 774 FFA_ERROR_INVALID_PARAMETER); 775 } 776 777 /* 778 * Ensure the buffer size is a multiple of the translation granule size 779 * in TF-A. 780 */ 781 if (buf_size % PAGE_SIZE != 0U) { 782 WARN("Buffer size must be aligned to translation granule.\n"); 783 return spmc_ffa_error_return(handle, 784 FFA_ERROR_INVALID_PARAMETER); 785 } 786 787 /* Obtain the RX/TX buffer pair descriptor. */ 788 mbox = spmc_get_mbox_desc(secure_origin); 789 790 spin_lock(&mbox->lock); 791 792 /* Check if buffers have already been mapped. */ 793 if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) { 794 WARN("RX/TX Buffers already mapped (%p/%p)\n", 795 (void *) mbox->rx_buffer, (void *)mbox->tx_buffer); 796 error_code = FFA_ERROR_DENIED; 797 goto err; 798 } 799 800 /* memmap the TX buffer as read only. */ 801 ret = mmap_add_dynamic_region(tx_address, /* PA */ 802 tx_address, /* VA */ 803 buf_size, /* size */ 804 mem_atts | MT_RO_DATA); /* attrs */ 805 if (ret != 0) { 806 /* Return the correct error code. */ 807 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY : 808 FFA_ERROR_INVALID_PARAMETER; 809 WARN("Unable to map TX buffer: %d\n", error_code); 810 goto err; 811 } 812 813 /* memmap the RX buffer as read write. */ 814 ret = mmap_add_dynamic_region(rx_address, /* PA */ 815 rx_address, /* VA */ 816 buf_size, /* size */ 817 mem_atts | MT_RW_DATA); /* attrs */ 818 819 if (ret != 0) { 820 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY : 821 FFA_ERROR_INVALID_PARAMETER; 822 WARN("Unable to map RX buffer: %d\n", error_code); 823 /* Unmap the TX buffer again. */ 824 mmap_remove_dynamic_region(tx_address, buf_size); 825 goto err; 826 } 827 828 mbox->tx_buffer = (void *) tx_address; 829 mbox->rx_buffer = (void *) rx_address; 830 mbox->rxtx_page_count = page_count; 831 spin_unlock(&mbox->lock); 832 833 SMC_RET1(handle, FFA_SUCCESS_SMC32); 834 /* Execution stops here. */ 835 err: 836 spin_unlock(&mbox->lock); 837 return spmc_ffa_error_return(handle, error_code); 838 } 839 840 static uint64_t rxtx_unmap_handler(uint32_t smc_fid, 841 bool secure_origin, 842 uint64_t x1, 843 uint64_t x2, 844 uint64_t x3, 845 uint64_t x4, 846 void *cookie, 847 void *handle, 848 uint64_t flags) 849 { 850 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 851 uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; 852 853 /* 854 * The SPMC does not support mapping of VM RX/TX pairs to facilitate 855 * indirect messaging with SPs. Check if the Hypervisor has invoked this 856 * ABI on behalf of a VM and reject it if this is the case. 857 */ 858 if (x1 != 0UL) { 859 return spmc_ffa_error_return(handle, 860 FFA_ERROR_INVALID_PARAMETER); 861 } 862 863 spin_lock(&mbox->lock); 864 865 /* Check if buffers are currently mapped. */ 866 if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) { 867 spin_unlock(&mbox->lock); 868 return spmc_ffa_error_return(handle, 869 FFA_ERROR_INVALID_PARAMETER); 870 } 871 872 /* Unmap RX Buffer */ 873 if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer, 874 buf_size) != 0) { 875 WARN("Unable to unmap RX buffer!\n"); 876 } 877 878 mbox->rx_buffer = 0; 879 880 /* Unmap TX Buffer */ 881 if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer, 882 buf_size) != 0) { 883 WARN("Unable to unmap TX buffer!\n"); 884 } 885 886 mbox->tx_buffer = 0; 887 mbox->rxtx_page_count = 0; 888 889 spin_unlock(&mbox->lock); 890 SMC_RET1(handle, FFA_SUCCESS_SMC32); 891 } 892 893 /* 894 * Helper function to populate the properties field of a Partition Info Get 895 * descriptor. 896 */ 897 static uint32_t 898 partition_info_get_populate_properties(uint32_t sp_properties, 899 enum sp_execution_state sp_ec_state) 900 { 901 uint32_t properties = sp_properties; 902 uint32_t ec_state; 903 904 /* Determine the execution state of the SP. */ 905 ec_state = sp_ec_state == SP_STATE_AARCH64 ? 906 FFA_PARTITION_INFO_GET_AARCH64_STATE : 907 FFA_PARTITION_INFO_GET_AARCH32_STATE; 908 909 properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT; 910 911 return properties; 912 } 913 914 /* 915 * Collate the partition information in a v1.1 partition information 916 * descriptor format, this will be converter later if required. 917 */ 918 static int partition_info_get_handler_v1_1(uint32_t *uuid, 919 struct ffa_partition_info_v1_1 920 *partitions, 921 uint32_t max_partitions, 922 uint32_t *partition_count) 923 { 924 uint32_t index; 925 struct ffa_partition_info_v1_1 *desc; 926 bool null_uuid = is_null_uuid(uuid); 927 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 928 929 /* Deal with Logical Partitions. */ 930 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) { 931 if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) { 932 /* Found a matching UUID, populate appropriately. */ 933 if (*partition_count >= max_partitions) { 934 return FFA_ERROR_NO_MEMORY; 935 } 936 937 desc = &partitions[*partition_count]; 938 desc->ep_id = el3_lp_descs[index].sp_id; 939 desc->execution_ctx_count = PLATFORM_CORE_COUNT; 940 /* LSPs must be AArch64. */ 941 desc->properties = 942 partition_info_get_populate_properties( 943 el3_lp_descs[index].properties, 944 SP_STATE_AARCH64); 945 946 if (null_uuid) { 947 copy_uuid(desc->uuid, el3_lp_descs[index].uuid); 948 } 949 (*partition_count)++; 950 } 951 } 952 953 /* Deal with physical SP's. */ 954 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) { 955 if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) { 956 /* Found a matching UUID, populate appropriately. */ 957 if (*partition_count >= max_partitions) { 958 return FFA_ERROR_NO_MEMORY; 959 } 960 961 desc = &partitions[*partition_count]; 962 desc->ep_id = sp_desc[index].sp_id; 963 /* 964 * Execution context count must match No. cores for 965 * S-EL1 SPs. 966 */ 967 desc->execution_ctx_count = PLATFORM_CORE_COUNT; 968 desc->properties = 969 partition_info_get_populate_properties( 970 sp_desc[index].properties, 971 sp_desc[index].execution_state); 972 973 if (null_uuid) { 974 copy_uuid(desc->uuid, sp_desc[index].uuid); 975 } 976 (*partition_count)++; 977 } 978 } 979 return 0; 980 } 981 982 /* 983 * Handle the case where that caller only wants the count of partitions 984 * matching a given UUID and does not want the corresponding descriptors 985 * populated. 986 */ 987 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid) 988 { 989 uint32_t index = 0; 990 uint32_t partition_count = 0; 991 bool null_uuid = is_null_uuid(uuid); 992 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 993 994 /* Deal with Logical Partitions. */ 995 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) { 996 if (null_uuid || 997 uuid_match(uuid, el3_lp_descs[index].uuid)) { 998 (partition_count)++; 999 } 1000 } 1001 1002 /* Deal with physical SP's. */ 1003 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) { 1004 if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) { 1005 (partition_count)++; 1006 } 1007 } 1008 return partition_count; 1009 } 1010 1011 /* 1012 * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate 1013 * the corresponding descriptor format from the v1.1 descriptor array. 1014 */ 1015 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1 1016 *partitions, 1017 struct mailbox *mbox, 1018 int partition_count) 1019 { 1020 uint32_t index; 1021 uint32_t buf_size; 1022 uint32_t descriptor_size; 1023 struct ffa_partition_info_v1_0 *v1_0_partitions = 1024 (struct ffa_partition_info_v1_0 *) mbox->rx_buffer; 1025 1026 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; 1027 descriptor_size = partition_count * 1028 sizeof(struct ffa_partition_info_v1_0); 1029 1030 if (descriptor_size > buf_size) { 1031 return FFA_ERROR_NO_MEMORY; 1032 } 1033 1034 for (index = 0U; index < partition_count; index++) { 1035 v1_0_partitions[index].ep_id = partitions[index].ep_id; 1036 v1_0_partitions[index].execution_ctx_count = 1037 partitions[index].execution_ctx_count; 1038 /* Only report v1.0 properties. */ 1039 v1_0_partitions[index].properties = 1040 (partitions[index].properties & 1041 FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK); 1042 } 1043 return 0; 1044 } 1045 1046 /* 1047 * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and 1048 * v1.0 implementations. 1049 */ 1050 static uint64_t partition_info_get_handler(uint32_t smc_fid, 1051 bool secure_origin, 1052 uint64_t x1, 1053 uint64_t x2, 1054 uint64_t x3, 1055 uint64_t x4, 1056 void *cookie, 1057 void *handle, 1058 uint64_t flags) 1059 { 1060 int ret; 1061 uint32_t partition_count = 0; 1062 uint32_t size = 0; 1063 uint32_t ffa_version = get_partition_ffa_version(secure_origin); 1064 struct mailbox *mbox; 1065 uint64_t info_get_flags; 1066 bool count_only; 1067 uint32_t uuid[4]; 1068 1069 uuid[0] = x1; 1070 uuid[1] = x2; 1071 uuid[2] = x3; 1072 uuid[3] = x4; 1073 1074 /* Determine if the Partition descriptors should be populated. */ 1075 info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5); 1076 count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK); 1077 1078 /* Handle the case where we don't need to populate the descriptors. */ 1079 if (count_only) { 1080 partition_count = partition_info_get_handler_count_only(uuid); 1081 if (partition_count == 0) { 1082 return spmc_ffa_error_return(handle, 1083 FFA_ERROR_INVALID_PARAMETER); 1084 } 1085 } else { 1086 struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS]; 1087 1088 /* 1089 * Handle the case where the partition descriptors are required, 1090 * check we have the buffers available and populate the 1091 * appropriate structure version. 1092 */ 1093 1094 /* Obtain the v1.1 format of the descriptors. */ 1095 ret = partition_info_get_handler_v1_1(uuid, partitions, 1096 MAX_SP_LP_PARTITIONS, 1097 &partition_count); 1098 1099 /* Check if an error occurred during discovery. */ 1100 if (ret != 0) { 1101 goto err; 1102 } 1103 1104 /* If we didn't find any matches the UUID is unknown. */ 1105 if (partition_count == 0) { 1106 ret = FFA_ERROR_INVALID_PARAMETER; 1107 goto err; 1108 } 1109 1110 /* Obtain the partition mailbox RX/TX buffer pair descriptor. */ 1111 mbox = spmc_get_mbox_desc(secure_origin); 1112 1113 /* 1114 * If the caller has not bothered registering its RX/TX pair 1115 * then return an error code. 1116 */ 1117 spin_lock(&mbox->lock); 1118 if (mbox->rx_buffer == NULL) { 1119 ret = FFA_ERROR_BUSY; 1120 goto err_unlock; 1121 } 1122 1123 /* Ensure the RX buffer is currently free. */ 1124 if (mbox->state != MAILBOX_STATE_EMPTY) { 1125 ret = FFA_ERROR_BUSY; 1126 goto err_unlock; 1127 } 1128 1129 /* Zero the RX buffer before populating. */ 1130 (void)memset(mbox->rx_buffer, 0, 1131 mbox->rxtx_page_count * FFA_PAGE_SIZE); 1132 1133 /* 1134 * Depending on the FF-A version of the requesting partition 1135 * we may need to convert to a v1.0 format otherwise we can copy 1136 * directly. 1137 */ 1138 if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) { 1139 ret = partition_info_populate_v1_0(partitions, 1140 mbox, 1141 partition_count); 1142 if (ret != 0) { 1143 goto err_unlock; 1144 } 1145 } else { 1146 uint32_t buf_size = mbox->rxtx_page_count * 1147 FFA_PAGE_SIZE; 1148 1149 /* Ensure the descriptor will fit in the buffer. */ 1150 size = sizeof(struct ffa_partition_info_v1_1); 1151 if (partition_count * size > buf_size) { 1152 ret = FFA_ERROR_NO_MEMORY; 1153 goto err_unlock; 1154 } 1155 memcpy(mbox->rx_buffer, partitions, 1156 partition_count * size); 1157 } 1158 1159 mbox->state = MAILBOX_STATE_FULL; 1160 spin_unlock(&mbox->lock); 1161 } 1162 SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size); 1163 1164 err_unlock: 1165 spin_unlock(&mbox->lock); 1166 err: 1167 return spmc_ffa_error_return(handle, ret); 1168 } 1169 1170 static uint64_t ffa_feature_success(void *handle, uint32_t arg2) 1171 { 1172 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2); 1173 } 1174 1175 static uint64_t ffa_features_retrieve_request(bool secure_origin, 1176 uint32_t input_properties, 1177 void *handle) 1178 { 1179 /* 1180 * If we're called by the normal world we don't support any 1181 * additional features. 1182 */ 1183 if (!secure_origin) { 1184 if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) { 1185 return spmc_ffa_error_return(handle, 1186 FFA_ERROR_NOT_SUPPORTED); 1187 } 1188 1189 } else { 1190 struct secure_partition_desc *sp = spmc_get_current_sp_ctx(); 1191 /* 1192 * If v1.1 the NS bit must be set otherwise it is an invalid 1193 * call. If v1.0 check and store whether the SP has requested 1194 * the use of the NS bit. 1195 */ 1196 if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) { 1197 if ((input_properties & 1198 FFA_FEATURES_RET_REQ_NS_BIT) == 0U) { 1199 return spmc_ffa_error_return(handle, 1200 FFA_ERROR_NOT_SUPPORTED); 1201 } 1202 return ffa_feature_success(handle, 1203 FFA_FEATURES_RET_REQ_NS_BIT); 1204 } else { 1205 sp->ns_bit_requested = (input_properties & 1206 FFA_FEATURES_RET_REQ_NS_BIT) != 1207 0U; 1208 } 1209 if (sp->ns_bit_requested) { 1210 return ffa_feature_success(handle, 1211 FFA_FEATURES_RET_REQ_NS_BIT); 1212 } 1213 } 1214 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1215 } 1216 1217 static uint64_t ffa_features_handler(uint32_t smc_fid, 1218 bool secure_origin, 1219 uint64_t x1, 1220 uint64_t x2, 1221 uint64_t x3, 1222 uint64_t x4, 1223 void *cookie, 1224 void *handle, 1225 uint64_t flags) 1226 { 1227 uint32_t function_id = (uint32_t) x1; 1228 uint32_t input_properties = (uint32_t) x2; 1229 1230 /* Check if a Feature ID was requested. */ 1231 if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) { 1232 /* We currently don't support any additional features. */ 1233 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1234 } 1235 1236 /* 1237 * Handle the cases where we have separate handlers due to additional 1238 * properties. 1239 */ 1240 switch (function_id) { 1241 case FFA_MEM_RETRIEVE_REQ_SMC32: 1242 case FFA_MEM_RETRIEVE_REQ_SMC64: 1243 return ffa_features_retrieve_request(secure_origin, 1244 input_properties, 1245 handle); 1246 } 1247 1248 /* 1249 * We don't currently support additional input properties for these 1250 * other ABIs therefore ensure this value is set to 0. 1251 */ 1252 if (input_properties != 0U) { 1253 return spmc_ffa_error_return(handle, 1254 FFA_ERROR_NOT_SUPPORTED); 1255 } 1256 1257 /* Report if any other FF-A ABI is supported. */ 1258 switch (function_id) { 1259 /* Supported features from both worlds. */ 1260 case FFA_ERROR: 1261 case FFA_SUCCESS_SMC32: 1262 case FFA_INTERRUPT: 1263 case FFA_SPM_ID_GET: 1264 case FFA_ID_GET: 1265 case FFA_FEATURES: 1266 case FFA_VERSION: 1267 case FFA_RX_RELEASE: 1268 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 1269 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 1270 case FFA_PARTITION_INFO_GET: 1271 case FFA_RXTX_MAP_SMC32: 1272 case FFA_RXTX_MAP_SMC64: 1273 case FFA_RXTX_UNMAP: 1274 case FFA_MEM_FRAG_TX: 1275 case FFA_MSG_RUN: 1276 1277 /* 1278 * We are relying on the fact that the other registers 1279 * will be set to 0 as these values align with the 1280 * currently implemented features of the SPMC. If this 1281 * changes this function must be extended to handle 1282 * reporting the additional functionality. 1283 */ 1284 1285 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1286 /* Execution stops here. */ 1287 1288 /* Supported ABIs only from the secure world. */ 1289 case FFA_SECONDARY_EP_REGISTER_SMC64: 1290 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 1291 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1292 case FFA_MEM_RELINQUISH: 1293 case FFA_MSG_WAIT: 1294 case FFA_CONSOLE_LOG_SMC32: 1295 case FFA_CONSOLE_LOG_SMC64: 1296 1297 if (!secure_origin) { 1298 return spmc_ffa_error_return(handle, 1299 FFA_ERROR_NOT_SUPPORTED); 1300 } 1301 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1302 /* Execution stops here. */ 1303 1304 /* Supported features only from the normal world. */ 1305 case FFA_MEM_SHARE_SMC32: 1306 case FFA_MEM_SHARE_SMC64: 1307 case FFA_MEM_LEND_SMC32: 1308 case FFA_MEM_LEND_SMC64: 1309 case FFA_MEM_RECLAIM: 1310 case FFA_MEM_FRAG_RX: 1311 1312 if (secure_origin) { 1313 return spmc_ffa_error_return(handle, 1314 FFA_ERROR_NOT_SUPPORTED); 1315 } 1316 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1317 /* Execution stops here. */ 1318 1319 default: 1320 return spmc_ffa_error_return(handle, 1321 FFA_ERROR_NOT_SUPPORTED); 1322 } 1323 } 1324 1325 static uint64_t ffa_id_get_handler(uint32_t smc_fid, 1326 bool secure_origin, 1327 uint64_t x1, 1328 uint64_t x2, 1329 uint64_t x3, 1330 uint64_t x4, 1331 void *cookie, 1332 void *handle, 1333 uint64_t flags) 1334 { 1335 if (secure_origin) { 1336 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, 1337 spmc_get_current_sp_ctx()->sp_id); 1338 } else { 1339 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, 1340 spmc_get_hyp_ctx()->ns_ep_id); 1341 } 1342 } 1343 1344 /* 1345 * Enable an SP to query the ID assigned to the SPMC. 1346 */ 1347 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid, 1348 bool secure_origin, 1349 uint64_t x1, 1350 uint64_t x2, 1351 uint64_t x3, 1352 uint64_t x4, 1353 void *cookie, 1354 void *handle, 1355 uint64_t flags) 1356 { 1357 assert(x1 == 0UL); 1358 assert(x2 == 0UL); 1359 assert(x3 == 0UL); 1360 assert(x4 == 0UL); 1361 assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL); 1362 assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL); 1363 assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL); 1364 1365 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID); 1366 } 1367 1368 static uint64_t ffa_run_handler(uint32_t smc_fid, 1369 bool secure_origin, 1370 uint64_t x1, 1371 uint64_t x2, 1372 uint64_t x3, 1373 uint64_t x4, 1374 void *cookie, 1375 void *handle, 1376 uint64_t flags) 1377 { 1378 struct secure_partition_desc *sp; 1379 uint16_t target_id = FFA_RUN_EP_ID(x1); 1380 uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1); 1381 unsigned int idx; 1382 unsigned int *rt_state; 1383 unsigned int *rt_model; 1384 1385 /* Can only be called from the normal world. */ 1386 if (secure_origin) { 1387 ERROR("FFA_RUN can only be called from NWd.\n"); 1388 return spmc_ffa_error_return(handle, 1389 FFA_ERROR_INVALID_PARAMETER); 1390 } 1391 1392 /* Cannot run a Normal world partition. */ 1393 if (ffa_is_normal_world_id(target_id)) { 1394 ERROR("Cannot run a NWd partition (0x%x).\n", target_id); 1395 return spmc_ffa_error_return(handle, 1396 FFA_ERROR_INVALID_PARAMETER); 1397 } 1398 1399 /* Check that the target SP exists. */ 1400 sp = spmc_get_sp_ctx(target_id); 1401 ERROR("Unknown partition ID (0x%x).\n", target_id); 1402 if (sp == NULL) { 1403 return spmc_ffa_error_return(handle, 1404 FFA_ERROR_INVALID_PARAMETER); 1405 } 1406 1407 idx = get_ec_index(sp); 1408 1409 if (idx != vcpu_id) { 1410 ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id); 1411 return spmc_ffa_error_return(handle, 1412 FFA_ERROR_INVALID_PARAMETER); 1413 } 1414 if (sp->runtime_el == S_EL0) { 1415 spin_lock(&sp->rt_state_lock); 1416 } 1417 rt_state = &((sp->ec[idx]).rt_state); 1418 rt_model = &((sp->ec[idx]).rt_model); 1419 if (*rt_state == RT_STATE_RUNNING) { 1420 if (sp->runtime_el == S_EL0) { 1421 spin_unlock(&sp->rt_state_lock); 1422 } 1423 ERROR("Partition (0x%x) is already running.\n", target_id); 1424 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY); 1425 } 1426 1427 /* 1428 * Sanity check that if the execution context was not waiting then it 1429 * was either in the direct request or the run partition runtime model. 1430 */ 1431 if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) { 1432 assert(*rt_model == RT_MODEL_RUN || 1433 *rt_model == RT_MODEL_DIR_REQ); 1434 } 1435 1436 /* 1437 * If the context was waiting then update the partition runtime model. 1438 */ 1439 if (*rt_state == RT_STATE_WAITING) { 1440 *rt_model = RT_MODEL_RUN; 1441 } 1442 1443 /* 1444 * Forward the request to the correct SP vCPU after updating 1445 * its state. 1446 */ 1447 *rt_state = RT_STATE_RUNNING; 1448 1449 if (sp->runtime_el == S_EL0) { 1450 spin_unlock(&sp->rt_state_lock); 1451 } 1452 1453 return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0, 1454 handle, cookie, flags, target_id); 1455 } 1456 1457 static uint64_t rx_release_handler(uint32_t smc_fid, 1458 bool secure_origin, 1459 uint64_t x1, 1460 uint64_t x2, 1461 uint64_t x3, 1462 uint64_t x4, 1463 void *cookie, 1464 void *handle, 1465 uint64_t flags) 1466 { 1467 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 1468 1469 spin_lock(&mbox->lock); 1470 1471 if (mbox->state != MAILBOX_STATE_FULL) { 1472 spin_unlock(&mbox->lock); 1473 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1474 } 1475 1476 mbox->state = MAILBOX_STATE_EMPTY; 1477 spin_unlock(&mbox->lock); 1478 1479 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1480 } 1481 1482 static uint64_t spmc_ffa_console_log(uint32_t smc_fid, 1483 bool secure_origin, 1484 uint64_t x1, 1485 uint64_t x2, 1486 uint64_t x3, 1487 uint64_t x4, 1488 void *cookie, 1489 void *handle, 1490 uint64_t flags) 1491 { 1492 /* Maximum number of characters is 48: 6 registers of 8 bytes each. */ 1493 char chars[48] = {0}; 1494 size_t chars_max; 1495 size_t chars_count = x1; 1496 1497 /* Does not support request from Nwd. */ 1498 if (!secure_origin) { 1499 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1500 } 1501 1502 assert(smc_fid == FFA_CONSOLE_LOG_SMC32 || smc_fid == FFA_CONSOLE_LOG_SMC64); 1503 if (smc_fid == FFA_CONSOLE_LOG_SMC32) { 1504 uint32_t *registers = (uint32_t *)chars; 1505 registers[0] = (uint32_t)x2; 1506 registers[1] = (uint32_t)x3; 1507 registers[2] = (uint32_t)x4; 1508 registers[3] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X5); 1509 registers[4] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X6); 1510 registers[5] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X7); 1511 chars_max = 6 * sizeof(uint32_t); 1512 } else { 1513 uint64_t *registers = (uint64_t *)chars; 1514 registers[0] = x2; 1515 registers[1] = x3; 1516 registers[2] = x4; 1517 registers[3] = SMC_GET_GP(handle, CTX_GPREG_X5); 1518 registers[4] = SMC_GET_GP(handle, CTX_GPREG_X6); 1519 registers[5] = SMC_GET_GP(handle, CTX_GPREG_X7); 1520 chars_max = 6 * sizeof(uint64_t); 1521 } 1522 1523 if ((chars_count == 0) || (chars_count > chars_max)) { 1524 return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER); 1525 } 1526 1527 for (size_t i = 0; (i < chars_count) && (chars[i] != '\0'); i++) { 1528 putchar(chars[i]); 1529 } 1530 1531 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1532 } 1533 1534 /* 1535 * Perform initial validation on the provided secondary entry point. 1536 * For now ensure it does not lie within the BL31 Image or the SP's 1537 * RX/TX buffers as these are mapped within EL3. 1538 * TODO: perform validation for additional invalid memory regions. 1539 */ 1540 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp) 1541 { 1542 struct mailbox *mb; 1543 uintptr_t buffer_size; 1544 uintptr_t sp_rx_buffer; 1545 uintptr_t sp_tx_buffer; 1546 uintptr_t sp_rx_buffer_limit; 1547 uintptr_t sp_tx_buffer_limit; 1548 1549 mb = &sp->mailbox; 1550 buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE); 1551 sp_rx_buffer = (uintptr_t) mb->rx_buffer; 1552 sp_tx_buffer = (uintptr_t) mb->tx_buffer; 1553 sp_rx_buffer_limit = sp_rx_buffer + buffer_size; 1554 sp_tx_buffer_limit = sp_tx_buffer + buffer_size; 1555 1556 /* 1557 * Check if the entry point lies within BL31, or the 1558 * SP's RX or TX buffer. 1559 */ 1560 if ((ep >= BL31_BASE && ep < BL31_LIMIT) || 1561 (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) || 1562 (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) { 1563 return -EINVAL; 1564 } 1565 return 0; 1566 } 1567 1568 /******************************************************************************* 1569 * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to 1570 * register an entry point for initialization during a secondary cold boot. 1571 ******************************************************************************/ 1572 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid, 1573 bool secure_origin, 1574 uint64_t x1, 1575 uint64_t x2, 1576 uint64_t x3, 1577 uint64_t x4, 1578 void *cookie, 1579 void *handle, 1580 uint64_t flags) 1581 { 1582 struct secure_partition_desc *sp; 1583 struct sp_exec_ctx *sp_ctx; 1584 1585 /* This request cannot originate from the Normal world. */ 1586 if (!secure_origin) { 1587 WARN("%s: Can only be called from SWd.\n", __func__); 1588 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1589 } 1590 1591 /* Get the context of the current SP. */ 1592 sp = spmc_get_current_sp_ctx(); 1593 if (sp == NULL) { 1594 WARN("%s: Cannot find SP context.\n", __func__); 1595 return spmc_ffa_error_return(handle, 1596 FFA_ERROR_INVALID_PARAMETER); 1597 } 1598 1599 /* Only an S-EL1 SP should be invoking this ABI. */ 1600 if (sp->runtime_el != S_EL1) { 1601 WARN("%s: Can only be called for a S-EL1 SP.\n", __func__); 1602 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1603 } 1604 1605 /* Ensure the SP is in its initialization state. */ 1606 sp_ctx = spmc_get_sp_ec(sp); 1607 if (sp_ctx->rt_model != RT_MODEL_INIT) { 1608 WARN("%s: Can only be called during SP initialization.\n", 1609 __func__); 1610 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1611 } 1612 1613 /* Perform initial validation of the secondary entry point. */ 1614 if (validate_secondary_ep(x1, sp)) { 1615 WARN("%s: Invalid entry point provided (0x%lx).\n", 1616 __func__, x1); 1617 return spmc_ffa_error_return(handle, 1618 FFA_ERROR_INVALID_PARAMETER); 1619 } 1620 1621 /* 1622 * Update the secondary entrypoint in SP context. 1623 * We don't need a lock here as during partition initialization there 1624 * will only be a single core online. 1625 */ 1626 sp->secondary_ep = x1; 1627 VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep); 1628 1629 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1630 } 1631 1632 /******************************************************************************* 1633 * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs 1634 * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This 1635 * function converts a permission value from the FF-A format to the mmap_attr_t 1636 * format by setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and 1637 * MT_EXECUTE/MT_EXECUTE_NEVER. The other fields are left as 0 because they are 1638 * ignored by the function xlat_change_mem_attributes_ctx(). 1639 ******************************************************************************/ 1640 static unsigned int ffa_perm_to_mmap_perm(unsigned int perms) 1641 { 1642 unsigned int tf_attr = 0U; 1643 unsigned int access; 1644 1645 /* Deal with data access permissions first. */ 1646 access = (perms & FFA_MEM_PERM_DATA_MASK) >> FFA_MEM_PERM_DATA_SHIFT; 1647 1648 switch (access) { 1649 case FFA_MEM_PERM_DATA_RW: 1650 /* Return 0 if the execute is set with RW. */ 1651 if ((perms & FFA_MEM_PERM_INST_NON_EXEC) != 0) { 1652 tf_attr |= MT_RW | MT_USER | MT_EXECUTE_NEVER; 1653 } 1654 break; 1655 1656 case FFA_MEM_PERM_DATA_RO: 1657 tf_attr |= MT_RO | MT_USER; 1658 /* Deal with the instruction access permissions next. */ 1659 if ((perms & FFA_MEM_PERM_INST_NON_EXEC) == 0) { 1660 tf_attr |= MT_EXECUTE; 1661 } else { 1662 tf_attr |= MT_EXECUTE_NEVER; 1663 } 1664 break; 1665 1666 case FFA_MEM_PERM_DATA_NA: 1667 default: 1668 return tf_attr; 1669 } 1670 1671 return tf_attr; 1672 } 1673 1674 /******************************************************************************* 1675 * Handler to set the permissions of a set of contiguous pages of a S-EL0 SP 1676 ******************************************************************************/ 1677 static uint64_t ffa_mem_perm_set_handler(uint32_t smc_fid, 1678 bool secure_origin, 1679 uint64_t x1, 1680 uint64_t x2, 1681 uint64_t x3, 1682 uint64_t x4, 1683 void *cookie, 1684 void *handle, 1685 uint64_t flags) 1686 { 1687 struct secure_partition_desc *sp; 1688 unsigned int idx; 1689 uintptr_t base_va = (uintptr_t) x1; 1690 size_t size = (size_t)(x2 * PAGE_SIZE); 1691 uint32_t tf_attr; 1692 int ret; 1693 1694 /* This request cannot originate from the Normal world. */ 1695 if (!secure_origin) { 1696 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1697 } 1698 1699 if (size == 0) { 1700 return spmc_ffa_error_return(handle, 1701 FFA_ERROR_INVALID_PARAMETER); 1702 } 1703 1704 /* Get the context of the current SP. */ 1705 sp = spmc_get_current_sp_ctx(); 1706 if (sp == NULL) { 1707 return spmc_ffa_error_return(handle, 1708 FFA_ERROR_INVALID_PARAMETER); 1709 } 1710 1711 /* A S-EL1 SP has no business invoking this ABI. */ 1712 if (sp->runtime_el == S_EL1) { 1713 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1714 } 1715 1716 if ((x3 & ~((uint64_t)FFA_MEM_PERM_MASK)) != 0) { 1717 return spmc_ffa_error_return(handle, 1718 FFA_ERROR_INVALID_PARAMETER); 1719 } 1720 1721 /* Get the execution context of the calling SP. */ 1722 idx = get_ec_index(sp); 1723 1724 /* 1725 * Ensure that the S-EL0 SP is initialising itself. We do not need to 1726 * synchronise this operation through a spinlock since a S-EL0 SP is UP 1727 * and can only be initialising on this cpu. 1728 */ 1729 if (sp->ec[idx].rt_model != RT_MODEL_INIT) { 1730 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1731 } 1732 1733 VERBOSE("Setting memory permissions:\n"); 1734 VERBOSE(" Start address : 0x%lx\n", base_va); 1735 VERBOSE(" Number of pages: %lu (%zu bytes)\n", x2, size); 1736 VERBOSE(" Attributes : 0x%x\n", (uint32_t)x3); 1737 1738 /* Convert inbound permissions to TF-A permission attributes */ 1739 tf_attr = ffa_perm_to_mmap_perm((unsigned int)x3); 1740 if (tf_attr == 0U) { 1741 return spmc_ffa_error_return(handle, 1742 FFA_ERROR_INVALID_PARAMETER); 1743 } 1744 1745 /* Request the change in permissions */ 1746 ret = xlat_change_mem_attributes_ctx(sp->xlat_ctx_handle, 1747 base_va, size, tf_attr); 1748 if (ret != 0) { 1749 return spmc_ffa_error_return(handle, 1750 FFA_ERROR_INVALID_PARAMETER); 1751 } 1752 1753 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1754 } 1755 1756 /******************************************************************************* 1757 * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs 1758 * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This 1759 * function converts a permission value from the mmap_attr_t format to the FF-A 1760 * format. 1761 ******************************************************************************/ 1762 static unsigned int mmap_perm_to_ffa_perm(unsigned int attr) 1763 { 1764 unsigned int perms = 0U; 1765 unsigned int data_access; 1766 1767 if ((attr & MT_USER) == 0) { 1768 /* No access from EL0. */ 1769 data_access = FFA_MEM_PERM_DATA_NA; 1770 } else { 1771 if ((attr & MT_RW) != 0) { 1772 data_access = FFA_MEM_PERM_DATA_RW; 1773 } else { 1774 data_access = FFA_MEM_PERM_DATA_RO; 1775 } 1776 } 1777 1778 perms |= (data_access & FFA_MEM_PERM_DATA_MASK) 1779 << FFA_MEM_PERM_DATA_SHIFT; 1780 1781 if ((attr & MT_EXECUTE_NEVER) != 0U) { 1782 perms |= FFA_MEM_PERM_INST_NON_EXEC; 1783 } 1784 1785 return perms; 1786 } 1787 1788 /******************************************************************************* 1789 * Handler to get the permissions of a set of contiguous pages of a S-EL0 SP 1790 ******************************************************************************/ 1791 static uint64_t ffa_mem_perm_get_handler(uint32_t smc_fid, 1792 bool secure_origin, 1793 uint64_t x1, 1794 uint64_t x2, 1795 uint64_t x3, 1796 uint64_t x4, 1797 void *cookie, 1798 void *handle, 1799 uint64_t flags) 1800 { 1801 struct secure_partition_desc *sp; 1802 unsigned int idx; 1803 uintptr_t base_va = (uintptr_t)x1; 1804 uint32_t tf_attr = 0; 1805 int ret; 1806 1807 /* This request cannot originate from the Normal world. */ 1808 if (!secure_origin) { 1809 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1810 } 1811 1812 /* Get the context of the current SP. */ 1813 sp = spmc_get_current_sp_ctx(); 1814 if (sp == NULL) { 1815 return spmc_ffa_error_return(handle, 1816 FFA_ERROR_INVALID_PARAMETER); 1817 } 1818 1819 /* A S-EL1 SP has no business invoking this ABI. */ 1820 if (sp->runtime_el == S_EL1) { 1821 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1822 } 1823 1824 /* Get the execution context of the calling SP. */ 1825 idx = get_ec_index(sp); 1826 1827 /* 1828 * Ensure that the S-EL0 SP is initialising itself. We do not need to 1829 * synchronise this operation through a spinlock since a S-EL0 SP is UP 1830 * and can only be initialising on this cpu. 1831 */ 1832 if (sp->ec[idx].rt_model != RT_MODEL_INIT) { 1833 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1834 } 1835 1836 /* Request the permissions */ 1837 ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va, &tf_attr); 1838 if (ret != 0) { 1839 return spmc_ffa_error_return(handle, 1840 FFA_ERROR_INVALID_PARAMETER); 1841 } 1842 1843 /* Convert TF-A permission to FF-A permissions attributes. */ 1844 x2 = mmap_perm_to_ffa_perm(tf_attr); 1845 1846 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, x2); 1847 } 1848 1849 /******************************************************************************* 1850 * This function will parse the Secure Partition Manifest. From manifest, it 1851 * will fetch details for preparing Secure partition image context and secure 1852 * partition image boot arguments if any. 1853 ******************************************************************************/ 1854 static int sp_manifest_parse(void *sp_manifest, int offset, 1855 struct secure_partition_desc *sp, 1856 entry_point_info_t *ep_info, 1857 int32_t *boot_info_reg) 1858 { 1859 int32_t ret, node; 1860 uint32_t config_32; 1861 1862 /* 1863 * Look for the mandatory fields that are expected to be present in 1864 * the SP manifests. 1865 */ 1866 node = fdt_path_offset(sp_manifest, "/"); 1867 if (node < 0) { 1868 ERROR("Did not find root node.\n"); 1869 return node; 1870 } 1871 1872 ret = fdt_read_uint32_array(sp_manifest, node, "uuid", 1873 ARRAY_SIZE(sp->uuid), sp->uuid); 1874 if (ret != 0) { 1875 ERROR("Missing Secure Partition UUID.\n"); 1876 return ret; 1877 } 1878 1879 ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32); 1880 if (ret != 0) { 1881 ERROR("Missing SP Exception Level information.\n"); 1882 return ret; 1883 } 1884 1885 sp->runtime_el = config_32; 1886 1887 ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32); 1888 if (ret != 0) { 1889 ERROR("Missing Secure Partition FF-A Version.\n"); 1890 return ret; 1891 } 1892 1893 sp->ffa_version = config_32; 1894 1895 ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32); 1896 if (ret != 0) { 1897 ERROR("Missing Secure Partition Execution State.\n"); 1898 return ret; 1899 } 1900 1901 sp->execution_state = config_32; 1902 1903 ret = fdt_read_uint32(sp_manifest, node, 1904 "messaging-method", &config_32); 1905 if (ret != 0) { 1906 ERROR("Missing Secure Partition messaging method.\n"); 1907 return ret; 1908 } 1909 1910 /* Validate this entry, we currently only support direct messaging. */ 1911 if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV | 1912 FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) { 1913 WARN("Invalid Secure Partition messaging method (0x%x)\n", 1914 config_32); 1915 return -EINVAL; 1916 } 1917 1918 sp->properties = config_32; 1919 1920 ret = fdt_read_uint32(sp_manifest, node, 1921 "execution-ctx-count", &config_32); 1922 1923 if (ret != 0) { 1924 ERROR("Missing SP Execution Context Count.\n"); 1925 return ret; 1926 } 1927 1928 /* 1929 * Ensure this field is set correctly in the manifest however 1930 * since this is currently a hardcoded value for S-EL1 partitions 1931 * we don't need to save it here, just validate. 1932 */ 1933 if ((sp->runtime_el == S_EL1) && (config_32 != PLATFORM_CORE_COUNT)) { 1934 ERROR("SP Execution Context Count (%u) must be %u.\n", 1935 config_32, PLATFORM_CORE_COUNT); 1936 return -EINVAL; 1937 } 1938 1939 /* 1940 * Look for the optional fields that are expected to be present in 1941 * an SP manifest. 1942 */ 1943 ret = fdt_read_uint32(sp_manifest, node, "id", &config_32); 1944 if (ret != 0) { 1945 WARN("Missing Secure Partition ID.\n"); 1946 } else { 1947 if (!is_ffa_secure_id_valid(config_32)) { 1948 ERROR("Invalid Secure Partition ID (0x%x).\n", 1949 config_32); 1950 return -EINVAL; 1951 } 1952 sp->sp_id = config_32; 1953 } 1954 1955 ret = fdt_read_uint32(sp_manifest, node, 1956 "power-management-messages", &config_32); 1957 if (ret != 0) { 1958 WARN("Missing Power Management Messages entry.\n"); 1959 } else { 1960 if ((sp->runtime_el == S_EL0) && (config_32 != 0)) { 1961 ERROR("Power messages not supported for S-EL0 SP\n"); 1962 return -EINVAL; 1963 } 1964 1965 /* 1966 * Ensure only the currently supported power messages have 1967 * been requested. 1968 */ 1969 if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF | 1970 FFA_PM_MSG_SUB_CPU_SUSPEND | 1971 FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) { 1972 ERROR("Requested unsupported PM messages (%x)\n", 1973 config_32); 1974 return -EINVAL; 1975 } 1976 sp->pwr_mgmt_msgs = config_32; 1977 } 1978 1979 ret = fdt_read_uint32(sp_manifest, node, 1980 "gp-register-num", &config_32); 1981 if (ret != 0) { 1982 WARN("Missing boot information register.\n"); 1983 } else { 1984 /* Check if a register number between 0-3 is specified. */ 1985 if (config_32 < 4) { 1986 *boot_info_reg = config_32; 1987 } else { 1988 WARN("Incorrect boot information register (%u).\n", 1989 config_32); 1990 } 1991 } 1992 1993 return 0; 1994 } 1995 1996 /******************************************************************************* 1997 * This function gets the Secure Partition Manifest base and maps the manifest 1998 * region. 1999 * Currently only one Secure Partition manifest is considered which is used to 2000 * prepare the context for the single Secure Partition. 2001 ******************************************************************************/ 2002 static int find_and_prepare_sp_context(void) 2003 { 2004 void *sp_manifest; 2005 uintptr_t manifest_base; 2006 uintptr_t manifest_base_align; 2007 entry_point_info_t *next_image_ep_info; 2008 int32_t ret, boot_info_reg = -1; 2009 struct secure_partition_desc *sp; 2010 2011 next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 2012 if (next_image_ep_info == NULL) { 2013 WARN("No Secure Partition image provided by BL2.\n"); 2014 return -ENOENT; 2015 } 2016 2017 sp_manifest = (void *)next_image_ep_info->args.arg0; 2018 if (sp_manifest == NULL) { 2019 WARN("Secure Partition manifest absent.\n"); 2020 return -ENOENT; 2021 } 2022 2023 manifest_base = (uintptr_t)sp_manifest; 2024 manifest_base_align = page_align(manifest_base, DOWN); 2025 2026 /* 2027 * Map the secure partition manifest region in the EL3 translation 2028 * regime. 2029 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base 2030 * alignment the region of 1 PAGE_SIZE from manifest align base may 2031 * not completely accommodate the secure partition manifest region. 2032 */ 2033 ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align, 2034 manifest_base_align, 2035 PAGE_SIZE * 2, 2036 MT_RO_DATA); 2037 if (ret != 0) { 2038 ERROR("Error while mapping SP manifest (%d).\n", ret); 2039 return ret; 2040 } 2041 2042 ret = fdt_node_offset_by_compatible(sp_manifest, -1, 2043 "arm,ffa-manifest-1.0"); 2044 if (ret < 0) { 2045 ERROR("Error happened in SP manifest reading.\n"); 2046 return -EINVAL; 2047 } 2048 2049 /* 2050 * Store the size of the manifest so that it can be used later to pass 2051 * the manifest as boot information later. 2052 */ 2053 next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest); 2054 INFO("Manifest adr = %lx , size = %lu bytes\n", manifest_base, 2055 next_image_ep_info->args.arg1); 2056 2057 /* 2058 * Select an SP descriptor for initialising the partition's execution 2059 * context on the primary CPU. 2060 */ 2061 sp = spmc_get_current_sp_ctx(); 2062 2063 #if SPMC_AT_EL3_SEL0_SP 2064 /* Assign translation tables context. */ 2065 sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context(); 2066 2067 #endif /* SPMC_AT_EL3_SEL0_SP */ 2068 /* Initialize entry point information for the SP */ 2069 SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1, 2070 SECURE | EP_ST_ENABLE); 2071 2072 /* Parse the SP manifest. */ 2073 ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info, 2074 &boot_info_reg); 2075 if (ret != 0) { 2076 ERROR("Error in Secure Partition manifest parsing.\n"); 2077 return ret; 2078 } 2079 2080 /* Check that the runtime EL in the manifest was correct. */ 2081 if (sp->runtime_el != S_EL0 && sp->runtime_el != S_EL1) { 2082 ERROR("Unexpected runtime EL: %d\n", sp->runtime_el); 2083 return -EINVAL; 2084 } 2085 2086 /* Perform any common initialisation. */ 2087 spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg); 2088 2089 /* Perform any initialisation specific to S-EL1 SPs. */ 2090 if (sp->runtime_el == S_EL1) { 2091 spmc_el1_sp_setup(sp, next_image_ep_info); 2092 } 2093 2094 #if SPMC_AT_EL3_SEL0_SP 2095 /* Setup spsr in endpoint info for common context management routine. */ 2096 if (sp->runtime_el == S_EL0) { 2097 spmc_el0_sp_spsr_setup(next_image_ep_info); 2098 } 2099 #endif /* SPMC_AT_EL3_SEL0_SP */ 2100 2101 /* Initialize the SP context with the required ep info. */ 2102 spmc_sp_common_ep_commit(sp, next_image_ep_info); 2103 2104 #if SPMC_AT_EL3_SEL0_SP 2105 /* 2106 * Perform any initialisation specific to S-EL0 not set by common 2107 * context management routine. 2108 */ 2109 if (sp->runtime_el == S_EL0) { 2110 spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest); 2111 } 2112 #endif /* SPMC_AT_EL3_SEL0_SP */ 2113 return 0; 2114 } 2115 2116 /******************************************************************************* 2117 * This function takes an SP context pointer and performs a synchronous entry 2118 * into it. 2119 ******************************************************************************/ 2120 static int32_t logical_sp_init(void) 2121 { 2122 int32_t rc = 0; 2123 struct el3_lp_desc *el3_lp_descs; 2124 2125 /* Perform initial validation of the Logical Partitions. */ 2126 rc = el3_sp_desc_validate(); 2127 if (rc != 0) { 2128 ERROR("Logical Partition validation failed!\n"); 2129 return rc; 2130 } 2131 2132 el3_lp_descs = get_el3_lp_array(); 2133 2134 INFO("Logical Secure Partition init start.\n"); 2135 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) { 2136 rc = el3_lp_descs[i].init(); 2137 if (rc != 0) { 2138 ERROR("Logical SP (0x%x) Failed to Initialize\n", 2139 el3_lp_descs[i].sp_id); 2140 return rc; 2141 } 2142 VERBOSE("Logical SP (0x%x) Initialized\n", 2143 el3_lp_descs[i].sp_id); 2144 } 2145 2146 INFO("Logical Secure Partition init completed.\n"); 2147 2148 return rc; 2149 } 2150 2151 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec) 2152 { 2153 uint64_t rc; 2154 2155 assert(ec != NULL); 2156 2157 /* Assign the context of the SP to this CPU */ 2158 cm_set_context(&(ec->cpu_ctx), SECURE); 2159 2160 /* Restore the context assigned above */ 2161 cm_el1_sysregs_context_restore(SECURE); 2162 cm_set_next_eret_context(SECURE); 2163 2164 /* Invalidate TLBs at EL1. */ 2165 tlbivmalle1(); 2166 dsbish(); 2167 2168 /* Enter Secure Partition */ 2169 rc = spm_secure_partition_enter(&ec->c_rt_ctx); 2170 2171 /* Save secure state */ 2172 cm_el1_sysregs_context_save(SECURE); 2173 2174 return rc; 2175 } 2176 2177 /******************************************************************************* 2178 * SPMC Helper Functions. 2179 ******************************************************************************/ 2180 static int32_t sp_init(void) 2181 { 2182 uint64_t rc; 2183 struct secure_partition_desc *sp; 2184 struct sp_exec_ctx *ec; 2185 2186 sp = spmc_get_current_sp_ctx(); 2187 ec = spmc_get_sp_ec(sp); 2188 ec->rt_model = RT_MODEL_INIT; 2189 ec->rt_state = RT_STATE_RUNNING; 2190 2191 INFO("Secure Partition (0x%x) init start.\n", sp->sp_id); 2192 2193 rc = spmc_sp_synchronous_entry(ec); 2194 if (rc != 0) { 2195 /* Indicate SP init was not successful. */ 2196 ERROR("SP (0x%x) failed to initialize (%lu).\n", 2197 sp->sp_id, rc); 2198 return 0; 2199 } 2200 2201 ec->rt_state = RT_STATE_WAITING; 2202 INFO("Secure Partition initialized.\n"); 2203 2204 return 1; 2205 } 2206 2207 static void initalize_sp_descs(void) 2208 { 2209 struct secure_partition_desc *sp; 2210 2211 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) { 2212 sp = &sp_desc[i]; 2213 sp->sp_id = INV_SP_ID; 2214 sp->mailbox.rx_buffer = NULL; 2215 sp->mailbox.tx_buffer = NULL; 2216 sp->mailbox.state = MAILBOX_STATE_EMPTY; 2217 sp->secondary_ep = 0; 2218 } 2219 } 2220 2221 static void initalize_ns_ep_descs(void) 2222 { 2223 struct ns_endpoint_desc *ns_ep; 2224 2225 for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) { 2226 ns_ep = &ns_ep_desc[i]; 2227 /* 2228 * Clashes with the Hypervisor ID but will not be a 2229 * problem in practice. 2230 */ 2231 ns_ep->ns_ep_id = 0; 2232 ns_ep->ffa_version = 0; 2233 ns_ep->mailbox.rx_buffer = NULL; 2234 ns_ep->mailbox.tx_buffer = NULL; 2235 ns_ep->mailbox.state = MAILBOX_STATE_EMPTY; 2236 } 2237 } 2238 2239 /******************************************************************************* 2240 * Initialize SPMC attributes for the SPMD. 2241 ******************************************************************************/ 2242 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs) 2243 { 2244 spmc_attrs->major_version = FFA_VERSION_MAJOR; 2245 spmc_attrs->minor_version = FFA_VERSION_MINOR; 2246 spmc_attrs->exec_state = MODE_RW_64; 2247 spmc_attrs->spmc_id = FFA_SPMC_ID; 2248 } 2249 2250 /******************************************************************************* 2251 * Initialize contexts of all Secure Partitions. 2252 ******************************************************************************/ 2253 int32_t spmc_setup(void) 2254 { 2255 int32_t ret; 2256 uint32_t flags; 2257 2258 /* Initialize endpoint descriptors */ 2259 initalize_sp_descs(); 2260 initalize_ns_ep_descs(); 2261 2262 /* 2263 * Retrieve the information of the datastore for tracking shared memory 2264 * requests allocated by platform code and zero the region if available. 2265 */ 2266 ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data, 2267 &spmc_shmem_obj_state.data_size); 2268 if (ret != 0) { 2269 ERROR("Failed to obtain memory descriptor backing store!\n"); 2270 return ret; 2271 } 2272 memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size); 2273 2274 /* Setup logical SPs. */ 2275 ret = logical_sp_init(); 2276 if (ret != 0) { 2277 ERROR("Failed to initialize Logical Partitions.\n"); 2278 return ret; 2279 } 2280 2281 /* Perform physical SP setup. */ 2282 2283 /* Disable MMU at EL1 (initialized by BL2) */ 2284 disable_mmu_icache_el1(); 2285 2286 /* Initialize context of the SP */ 2287 INFO("Secure Partition context setup start.\n"); 2288 2289 ret = find_and_prepare_sp_context(); 2290 if (ret != 0) { 2291 ERROR("Error in SP finding and context preparation.\n"); 2292 return ret; 2293 } 2294 2295 /* Register power management hooks with PSCI */ 2296 psci_register_spd_pm_hook(&spmc_pm); 2297 2298 /* 2299 * Register an interrupt handler for S-EL1 interrupts 2300 * when generated during code executing in the 2301 * non-secure state. 2302 */ 2303 flags = 0; 2304 set_interrupt_rm_flag(flags, NON_SECURE); 2305 ret = register_interrupt_type_handler(INTR_TYPE_S_EL1, 2306 spmc_sp_interrupt_handler, 2307 flags); 2308 if (ret != 0) { 2309 ERROR("Failed to register interrupt handler! (%d)\n", ret); 2310 panic(); 2311 } 2312 2313 /* Register init function for deferred init. */ 2314 bl31_register_bl32_init(&sp_init); 2315 2316 INFO("Secure Partition setup done.\n"); 2317 2318 return 0; 2319 } 2320 2321 /******************************************************************************* 2322 * Secure Partition Manager SMC handler. 2323 ******************************************************************************/ 2324 uint64_t spmc_smc_handler(uint32_t smc_fid, 2325 bool secure_origin, 2326 uint64_t x1, 2327 uint64_t x2, 2328 uint64_t x3, 2329 uint64_t x4, 2330 void *cookie, 2331 void *handle, 2332 uint64_t flags) 2333 { 2334 switch (smc_fid) { 2335 2336 case FFA_VERSION: 2337 return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3, 2338 x4, cookie, handle, flags); 2339 2340 case FFA_SPM_ID_GET: 2341 return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2, 2342 x3, x4, cookie, handle, flags); 2343 2344 case FFA_ID_GET: 2345 return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3, 2346 x4, cookie, handle, flags); 2347 2348 case FFA_FEATURES: 2349 return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3, 2350 x4, cookie, handle, flags); 2351 2352 case FFA_SECONDARY_EP_REGISTER_SMC64: 2353 return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1, 2354 x2, x3, x4, cookie, handle, 2355 flags); 2356 2357 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 2358 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 2359 return direct_req_smc_handler(smc_fid, secure_origin, x1, x2, 2360 x3, x4, cookie, handle, flags); 2361 2362 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 2363 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 2364 return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2, 2365 x3, x4, cookie, handle, flags); 2366 2367 case FFA_RXTX_MAP_SMC32: 2368 case FFA_RXTX_MAP_SMC64: 2369 return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4, 2370 cookie, handle, flags); 2371 2372 case FFA_RXTX_UNMAP: 2373 return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3, 2374 x4, cookie, handle, flags); 2375 2376 case FFA_PARTITION_INFO_GET: 2377 return partition_info_get_handler(smc_fid, secure_origin, x1, 2378 x2, x3, x4, cookie, handle, 2379 flags); 2380 2381 case FFA_RX_RELEASE: 2382 return rx_release_handler(smc_fid, secure_origin, x1, x2, x3, 2383 x4, cookie, handle, flags); 2384 2385 case FFA_MSG_WAIT: 2386 return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4, 2387 cookie, handle, flags); 2388 2389 case FFA_ERROR: 2390 return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4, 2391 cookie, handle, flags); 2392 2393 case FFA_MSG_RUN: 2394 return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4, 2395 cookie, handle, flags); 2396 2397 case FFA_MEM_SHARE_SMC32: 2398 case FFA_MEM_SHARE_SMC64: 2399 case FFA_MEM_LEND_SMC32: 2400 case FFA_MEM_LEND_SMC64: 2401 return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4, 2402 cookie, handle, flags); 2403 2404 case FFA_MEM_FRAG_TX: 2405 return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3, 2406 x4, cookie, handle, flags); 2407 2408 case FFA_MEM_FRAG_RX: 2409 return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3, 2410 x4, cookie, handle, flags); 2411 2412 case FFA_MEM_RETRIEVE_REQ_SMC32: 2413 case FFA_MEM_RETRIEVE_REQ_SMC64: 2414 return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2, 2415 x3, x4, cookie, handle, flags); 2416 2417 case FFA_MEM_RELINQUISH: 2418 return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2, 2419 x3, x4, cookie, handle, flags); 2420 2421 case FFA_MEM_RECLAIM: 2422 return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3, 2423 x4, cookie, handle, flags); 2424 case FFA_CONSOLE_LOG_SMC32: 2425 case FFA_CONSOLE_LOG_SMC64: 2426 return spmc_ffa_console_log(smc_fid, secure_origin, x1, x2, x3, 2427 x4, cookie, handle, flags); 2428 2429 case FFA_MEM_PERM_GET: 2430 return ffa_mem_perm_get_handler(smc_fid, secure_origin, x1, x2, 2431 x3, x4, cookie, handle, flags); 2432 2433 case FFA_MEM_PERM_SET: 2434 return ffa_mem_perm_set_handler(smc_fid, secure_origin, x1, x2, 2435 x3, x4, cookie, handle, flags); 2436 2437 default: 2438 WARN("Unsupported FF-A call 0x%08x.\n", smc_fid); 2439 break; 2440 } 2441 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 2442 } 2443 2444 /******************************************************************************* 2445 * This function is the handler registered for S-EL1 interrupts by the SPMC. It 2446 * validates the interrupt and upon success arranges entry into the SP for 2447 * handling the interrupt. 2448 ******************************************************************************/ 2449 static uint64_t spmc_sp_interrupt_handler(uint32_t id, 2450 uint32_t flags, 2451 void *handle, 2452 void *cookie) 2453 { 2454 struct secure_partition_desc *sp = spmc_get_current_sp_ctx(); 2455 struct sp_exec_ctx *ec; 2456 uint32_t linear_id = plat_my_core_pos(); 2457 2458 /* Sanity check for a NULL pointer dereference. */ 2459 assert(sp != NULL); 2460 2461 /* Check the security state when the exception was generated. */ 2462 assert(get_interrupt_src_ss(flags) == NON_SECURE); 2463 2464 /* Panic if not an S-EL1 Partition. */ 2465 if (sp->runtime_el != S_EL1) { 2466 ERROR("Interrupt received for a non S-EL1 SP on core%u.\n", 2467 linear_id); 2468 panic(); 2469 } 2470 2471 /* Obtain a reference to the SP execution context. */ 2472 ec = spmc_get_sp_ec(sp); 2473 2474 /* Ensure that the execution context is in waiting state else panic. */ 2475 if (ec->rt_state != RT_STATE_WAITING) { 2476 ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n", 2477 linear_id, RT_STATE_WAITING, ec->rt_state); 2478 panic(); 2479 } 2480 2481 /* Update the runtime model and state of the partition. */ 2482 ec->rt_model = RT_MODEL_INTR; 2483 ec->rt_state = RT_STATE_RUNNING; 2484 2485 VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id); 2486 2487 /* 2488 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not 2489 * populated as the SP can determine this by itself. 2490 * The flags field is forced to 0 mainly to pass the SVE hint bit 2491 * cleared for consumption by the lower EL. 2492 */ 2493 return spmd_smc_switch_state(FFA_INTERRUPT, false, 2494 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 2495 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 2496 handle, 0ULL); 2497 } 2498