1 /* 2 * Copyright (c) 2022-2025, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <stdio.h> 10 11 #include <arch_helpers.h> 12 #include <bl31/bl31.h> 13 #include <bl31/ehf.h> 14 #include <bl31/interrupt_mgmt.h> 15 #include <common/debug.h> 16 #include <common/fdt_wrappers.h> 17 #include <common/runtime_svc.h> 18 #include <common/uuid.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/smccc.h> 21 #include <lib/utils.h> 22 #include <lib/xlat_tables/xlat_tables_v2.h> 23 #include <libfdt.h> 24 #include <plat/common/platform.h> 25 #include <services/el3_spmc_logical_sp.h> 26 #include <services/ffa_svc.h> 27 #include <services/spmc_svc.h> 28 #include <services/spmd_svc.h> 29 #include "spmc.h" 30 #include "spmc_shared_mem.h" 31 #if TRANSFER_LIST 32 #include <transfer_list.h> 33 #endif 34 35 #include <platform_def.h> 36 37 /* FFA_MEM_PERM_* helpers */ 38 #define FFA_MEM_PERM_MASK U(7) 39 #define FFA_MEM_PERM_DATA_MASK U(3) 40 #define FFA_MEM_PERM_DATA_SHIFT U(0) 41 #define FFA_MEM_PERM_DATA_NA U(0) 42 #define FFA_MEM_PERM_DATA_RW U(1) 43 #define FFA_MEM_PERM_DATA_RES U(2) 44 #define FFA_MEM_PERM_DATA_RO U(3) 45 #define FFA_MEM_PERM_INST_EXEC (U(0) << 2) 46 #define FFA_MEM_PERM_INST_NON_EXEC (U(1) << 2) 47 48 /* Declare the maximum number of SPs and El3 LPs. */ 49 #define MAX_SP_LP_PARTITIONS (SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT) 50 51 #define FFA_VERSION_SPMC_MAJOR U(1) 52 #define FFA_VERSION_SPMC_MINOR U(2) 53 54 /* 55 * Allocate a secure partition descriptor to describe each SP in the system that 56 * does not reside at EL3. 57 */ 58 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT]; 59 60 /* 61 * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in 62 * the system that interacts with a SP. It is used to track the Hypervisor 63 * buffer pair, version and ID for now. It could be extended to track VM 64 * properties when the SPMC supports indirect messaging. 65 */ 66 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT]; 67 68 static uint64_t spmc_sp_interrupt_handler(uint32_t id, 69 uint32_t flags, 70 void *handle, 71 void *cookie); 72 73 /* 74 * Helper function to obtain the array storing the EL3 75 * Logical Partition descriptors. 76 */ 77 struct el3_lp_desc *get_el3_lp_array(void) 78 { 79 return (struct el3_lp_desc *) EL3_LP_DESCS_START; 80 } 81 82 /* 83 * Helper function to obtain the descriptor of the last SP to whom control was 84 * handed to on this physical cpu. Currently, we assume there is only one SP. 85 * TODO: Expand to track multiple partitions when required. 86 */ 87 struct secure_partition_desc *spmc_get_current_sp_ctx(void) 88 { 89 return &(sp_desc[ACTIVE_SP_DESC_INDEX]); 90 } 91 92 /* 93 * Helper function to obtain the execution context of an SP on the 94 * current physical cpu. 95 */ 96 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp) 97 { 98 return &(sp->ec[get_ec_index(sp)]); 99 } 100 101 /* Helper function to get pointer to SP context from its ID. */ 102 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id) 103 { 104 /* Check for Secure World Partitions. */ 105 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) { 106 if (sp_desc[i].sp_id == id) { 107 return &(sp_desc[i]); 108 } 109 } 110 return NULL; 111 } 112 113 /* 114 * Helper function to obtain the descriptor of the Hypervisor or OS kernel. 115 * We assume that the first descriptor is reserved for this entity. 116 */ 117 struct ns_endpoint_desc *spmc_get_hyp_ctx(void) 118 { 119 return &(ns_ep_desc[0]); 120 } 121 122 /* 123 * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor 124 * or OS kernel in the normal world or the last SP that was run. 125 */ 126 struct mailbox *spmc_get_mbox_desc(bool secure_origin) 127 { 128 /* Obtain the RX/TX buffer pair descriptor. */ 129 if (secure_origin) { 130 return &(spmc_get_current_sp_ctx()->mailbox); 131 } else { 132 return &(spmc_get_hyp_ctx()->mailbox); 133 } 134 } 135 136 /****************************************************************************** 137 * This function returns to the place where spmc_sp_synchronous_entry() was 138 * called originally. 139 ******************************************************************************/ 140 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc) 141 { 142 /* 143 * The SPM must have initiated the original request through a 144 * synchronous entry into the secure partition. Jump back to the 145 * original C runtime context with the value of rc in x0; 146 */ 147 spm_secure_partition_exit(ec->c_rt_ctx, rc); 148 149 panic(); 150 } 151 152 /******************************************************************************* 153 * Return FFA_ERROR with specified error code. 154 ******************************************************************************/ 155 uint64_t spmc_ffa_error_return(void *handle, int error_code) 156 { 157 SMC_RET8(handle, FFA_ERROR, 158 FFA_TARGET_INFO_MBZ, error_code, 159 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 160 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 161 } 162 163 /****************************************************************************** 164 * Helper function to validate a secure partition ID to ensure it does not 165 * conflict with any other FF-A component and follows the convention to 166 * indicate it resides within the secure world. 167 ******************************************************************************/ 168 bool is_ffa_secure_id_valid(uint16_t partition_id) 169 { 170 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 171 172 /* Ensure the ID is not the invalid partition ID. */ 173 if (partition_id == INV_SP_ID) { 174 return false; 175 } 176 177 /* Ensure the ID is not the SPMD ID. */ 178 if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) { 179 return false; 180 } 181 182 /* 183 * Ensure the ID follows the convention to indicate it resides 184 * in the secure world. 185 */ 186 if (!ffa_is_secure_world_id(partition_id)) { 187 return false; 188 } 189 190 /* Ensure we don't conflict with the SPMC partition ID. */ 191 if (partition_id == FFA_SPMC_ID) { 192 return false; 193 } 194 195 /* Ensure we do not already have an SP context with this ID. */ 196 if (spmc_get_sp_ctx(partition_id)) { 197 return false; 198 } 199 200 /* Ensure we don't clash with any Logical SP's. */ 201 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) { 202 if (el3_lp_descs[i].sp_id == partition_id) { 203 return false; 204 } 205 } 206 207 return true; 208 } 209 210 /******************************************************************************* 211 * This function either forwards the request to the other world or returns 212 * with an ERET depending on the source of the call. 213 * We can assume that the destination is for an entity at a lower exception 214 * level as any messages destined for a logical SP resident in EL3 will have 215 * already been taken care of by the SPMC before entering this function. 216 ******************************************************************************/ 217 static uint64_t spmc_smc_return(uint32_t smc_fid, 218 bool secure_origin, 219 uint64_t x1, 220 uint64_t x2, 221 uint64_t x3, 222 uint64_t x4, 223 void *handle, 224 void *cookie, 225 uint64_t flags, 226 uint16_t dst_id, 227 uint32_t sp_ffa_version) 228 { 229 /* If the destination is in the normal world always go via the SPMD. */ 230 if (ffa_is_normal_world_id(dst_id)) { 231 return spmd_smc_handler(smc_fid, x1, x2, x3, x4, 232 cookie, handle, flags, sp_ffa_version); 233 } 234 /* 235 * If the caller is secure and we want to return to the secure world, 236 * ERET directly. 237 */ 238 else if (secure_origin && ffa_is_secure_world_id(dst_id)) { 239 SMC_RET5(handle, smc_fid, x1, x2, x3, x4); 240 } 241 /* If we originated in the normal world then switch contexts. */ 242 else if (!secure_origin && ffa_is_secure_world_id(dst_id)) { 243 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, 244 x3, x4, handle, flags, sp_ffa_version); 245 } else { 246 /* Unknown State. */ 247 panic(); 248 } 249 250 /* Shouldn't be Reached. */ 251 return 0; 252 } 253 254 /******************************************************************************* 255 * FF-A ABI Handlers. 256 ******************************************************************************/ 257 258 /******************************************************************************* 259 * Helper function to validate arg2 as part of a direct message. 260 ******************************************************************************/ 261 static inline bool direct_msg_validate_arg2(uint64_t x2) 262 { 263 /* Check message type. */ 264 if (x2 & FFA_FWK_MSG_BIT) { 265 /* We have a framework message, ensure it is a known message. */ 266 if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) { 267 VERBOSE("Invalid message format 0x%lx.\n", x2); 268 return false; 269 } 270 } else { 271 /* We have a partition messages, ensure x2 is not set. */ 272 if (x2 != (uint64_t) 0) { 273 VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n", 274 x2); 275 return false; 276 } 277 } 278 return true; 279 } 280 281 /******************************************************************************* 282 * Helper function to validate the destination ID of a direct response. 283 ******************************************************************************/ 284 static bool direct_msg_validate_dst_id(uint16_t dst_id) 285 { 286 struct secure_partition_desc *sp; 287 288 /* Check if we're targeting a normal world partition. */ 289 if (ffa_is_normal_world_id(dst_id)) { 290 return true; 291 } 292 293 /* Or directed to the SPMC itself.*/ 294 if (dst_id == FFA_SPMC_ID) { 295 return true; 296 } 297 298 /* Otherwise ensure the SP exists. */ 299 sp = spmc_get_sp_ctx(dst_id); 300 if (sp != NULL) { 301 return true; 302 } 303 304 return false; 305 } 306 307 /******************************************************************************* 308 * Helper function to validate the response from a Logical Partition. 309 ******************************************************************************/ 310 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id, 311 void *handle) 312 { 313 /* Retrieve populated Direct Response Arguments. */ 314 uint64_t smc_fid = SMC_GET_GP(handle, CTX_GPREG_X0); 315 uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1); 316 uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2); 317 uint16_t src_id = ffa_endpoint_source(x1); 318 uint16_t dst_id = ffa_endpoint_destination(x1); 319 320 if (src_id != lp_id) { 321 ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id); 322 return false; 323 } 324 325 /* 326 * Check the destination ID is valid and ensure the LP is responding to 327 * the original request. 328 */ 329 if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) { 330 ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id); 331 return false; 332 } 333 334 if ((smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) && 335 !direct_msg_validate_arg2(x2)) { 336 ERROR("Invalid EL3 LP message encoding.\n"); 337 return false; 338 } 339 return true; 340 } 341 342 /******************************************************************************* 343 * Helper function to check that partition can receive direct msg or not. 344 ******************************************************************************/ 345 static bool direct_msg_receivable(uint32_t properties, uint16_t dir_req_fnum) 346 { 347 if ((dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ && 348 ((properties & FFA_PARTITION_DIRECT_REQ_RECV) == 0U)) || 349 (dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ2 && 350 ((properties & FFA_PARTITION_DIRECT_REQ2_RECV) == 0U))) { 351 return false; 352 } 353 354 return true; 355 } 356 357 /******************************************************************************* 358 * Helper function to obtain the FF-A version of the calling partition. 359 ******************************************************************************/ 360 uint32_t get_partition_ffa_version(bool secure_origin) 361 { 362 if (secure_origin) { 363 return spmc_get_current_sp_ctx()->ffa_version; 364 } else { 365 return spmc_get_hyp_ctx()->ffa_version; 366 } 367 } 368 369 /******************************************************************************* 370 * Handle direct request messages and route to the appropriate destination. 371 ******************************************************************************/ 372 static uint64_t direct_req_smc_handler(uint32_t smc_fid, 373 bool secure_origin, 374 uint64_t x1, 375 uint64_t x2, 376 uint64_t x3, 377 uint64_t x4, 378 void *cookie, 379 void *handle, 380 uint64_t flags) 381 { 382 uint16_t src_id = ffa_endpoint_source(x1); 383 uint16_t dst_id = ffa_endpoint_destination(x1); 384 uint16_t dir_req_funcid; 385 struct el3_lp_desc *el3_lp_descs; 386 struct secure_partition_desc *sp; 387 unsigned int idx; 388 uint32_t ffa_version = get_partition_ffa_version(secure_origin); 389 390 dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_REQ2_SMC64) ? 391 FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2; 392 393 if ((dir_req_funcid == FFA_FNUM_MSG_SEND_DIRECT_REQ2) && 394 ffa_version < MAKE_FFA_VERSION(U(1), U(2))) { 395 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 396 } 397 398 /* 399 * Sanity check for DIRECT_REQ: 400 * Check if arg2 has been populated correctly based on message type 401 */ 402 if ((dir_req_funcid == FFA_FNUM_MSG_SEND_DIRECT_REQ) && 403 !direct_msg_validate_arg2(x2)) { 404 return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER); 405 } 406 407 /* Validate Sender is either the current SP or from the normal world. */ 408 if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) || 409 (!secure_origin && !ffa_is_normal_world_id(src_id))) { 410 ERROR("Invalid direct request source ID (0x%x).\n", src_id); 411 return spmc_ffa_error_return(handle, 412 FFA_ERROR_INVALID_PARAMETER); 413 } 414 415 el3_lp_descs = get_el3_lp_array(); 416 417 /* Check if the request is destined for a Logical Partition. */ 418 for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) { 419 if (el3_lp_descs[i].sp_id == dst_id) { 420 if (!direct_msg_receivable(el3_lp_descs[i].properties, dir_req_funcid)) { 421 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 422 } 423 424 uint64_t ret = el3_lp_descs[i].direct_req( 425 smc_fid, secure_origin, x1, x2, 426 x3, x4, cookie, handle, flags); 427 if (!direct_msg_validate_lp_resp(src_id, dst_id, 428 handle)) { 429 panic(); 430 } 431 432 /* Message checks out. */ 433 return ret; 434 } 435 } 436 437 /* 438 * If the request was not targeted to a LSP and from the secure world 439 * then it is invalid since a SP cannot call into the Normal world and 440 * there is no other SP to call into. If there are other SPs in future 441 * then the partition runtime model would need to be validated as well. 442 */ 443 if (secure_origin) { 444 VERBOSE("Direct request not supported to the Normal World.\n"); 445 return spmc_ffa_error_return(handle, 446 FFA_ERROR_INVALID_PARAMETER); 447 } 448 449 /* Check if the SP ID is valid. */ 450 sp = spmc_get_sp_ctx(dst_id); 451 if (sp == NULL) { 452 VERBOSE("Direct request to unknown partition ID (0x%x).\n", 453 dst_id); 454 return spmc_ffa_error_return(handle, 455 FFA_ERROR_INVALID_PARAMETER); 456 } 457 458 if (!direct_msg_receivable(sp->properties, dir_req_funcid)) { 459 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 460 } 461 462 /* Protect the runtime state of a UP S-EL0 SP with a lock. */ 463 if (sp->runtime_el == S_EL0) { 464 spin_lock(&sp->rt_state_lock); 465 } 466 467 /* 468 * Check that the target execution context is in a waiting state before 469 * forwarding the direct request to it. 470 */ 471 idx = get_ec_index(sp); 472 if (sp->ec[idx].rt_state != RT_STATE_WAITING) { 473 VERBOSE("SP context on core%u is not waiting (%u).\n", 474 idx, sp->ec[idx].rt_model); 475 476 if (sp->runtime_el == S_EL0) { 477 spin_unlock(&sp->rt_state_lock); 478 } 479 480 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY); 481 } 482 483 /* 484 * Everything checks out so forward the request to the SP after updating 485 * its state and runtime model. 486 */ 487 sp->ec[idx].rt_state = RT_STATE_RUNNING; 488 sp->ec[idx].rt_model = RT_MODEL_DIR_REQ; 489 sp->ec[idx].dir_req_origin_id = src_id; 490 sp->ec[idx].dir_req_funcid = dir_req_funcid; 491 492 if (sp->runtime_el == S_EL0) { 493 spin_unlock(&sp->rt_state_lock); 494 } 495 496 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 497 handle, cookie, flags, dst_id, sp->ffa_version); 498 } 499 500 /******************************************************************************* 501 * Handle direct response messages and route to the appropriate destination. 502 ******************************************************************************/ 503 static uint64_t direct_resp_smc_handler(uint32_t smc_fid, 504 bool secure_origin, 505 uint64_t x1, 506 uint64_t x2, 507 uint64_t x3, 508 uint64_t x4, 509 void *cookie, 510 void *handle, 511 uint64_t flags) 512 { 513 uint16_t dst_id = ffa_endpoint_destination(x1); 514 uint16_t dir_req_funcid; 515 struct secure_partition_desc *sp; 516 unsigned int idx; 517 518 dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) ? 519 FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2; 520 521 /* Check if arg2 has been populated correctly based on message type. */ 522 if (!direct_msg_validate_arg2(x2)) { 523 return spmc_ffa_error_return(handle, 524 FFA_ERROR_INVALID_PARAMETER); 525 } 526 527 /* Check that the response did not originate from the Normal world. */ 528 if (!secure_origin) { 529 VERBOSE("Direct Response not supported from Normal World.\n"); 530 return spmc_ffa_error_return(handle, 531 FFA_ERROR_INVALID_PARAMETER); 532 } 533 534 /* 535 * Check that the response is either targeted to the Normal world or the 536 * SPMC e.g. a PM response. 537 */ 538 if (!direct_msg_validate_dst_id(dst_id)) { 539 VERBOSE("Direct response to invalid partition ID (0x%x).\n", 540 dst_id); 541 return spmc_ffa_error_return(handle, 542 FFA_ERROR_INVALID_PARAMETER); 543 } 544 545 /* Obtain the SP descriptor and update its runtime state. */ 546 sp = spmc_get_sp_ctx(ffa_endpoint_source(x1)); 547 if (sp == NULL) { 548 VERBOSE("Direct response to unknown partition ID (0x%x).\n", 549 dst_id); 550 return spmc_ffa_error_return(handle, 551 FFA_ERROR_INVALID_PARAMETER); 552 } 553 554 if (sp->runtime_el == S_EL0) { 555 spin_lock(&sp->rt_state_lock); 556 } 557 558 /* Sanity check state is being tracked correctly in the SPMC. */ 559 idx = get_ec_index(sp); 560 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING); 561 562 /* Ensure SP execution context was in the right runtime model. */ 563 if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) { 564 VERBOSE("SP context on core%u not handling direct req (%u).\n", 565 idx, sp->ec[idx].rt_model); 566 if (sp->runtime_el == S_EL0) { 567 spin_unlock(&sp->rt_state_lock); 568 } 569 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 570 } 571 572 if (dir_req_funcid != sp->ec[idx].dir_req_funcid) { 573 WARN("Unmatched direct req/resp func id. req:%x, resp:%x on core%u.\n", 574 sp->ec[idx].dir_req_funcid, (smc_fid & FUNCID_NUM_MASK), idx); 575 if (sp->runtime_el == S_EL0) { 576 spin_unlock(&sp->rt_state_lock); 577 } 578 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 579 } 580 581 if (sp->ec[idx].dir_req_origin_id != dst_id) { 582 WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n", 583 dst_id, sp->ec[idx].dir_req_origin_id, idx); 584 if (sp->runtime_el == S_EL0) { 585 spin_unlock(&sp->rt_state_lock); 586 } 587 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 588 } 589 590 /* Update the state of the SP execution context. */ 591 sp->ec[idx].rt_state = RT_STATE_WAITING; 592 593 /* Clear the ongoing direct request ID. */ 594 sp->ec[idx].dir_req_origin_id = INV_SP_ID; 595 596 /* Clear the ongoing direct request message version. */ 597 sp->ec[idx].dir_req_funcid = 0U; 598 599 if (sp->runtime_el == S_EL0) { 600 spin_unlock(&sp->rt_state_lock); 601 } 602 603 /* 604 * If the receiver is not the SPMC then forward the response to the 605 * Normal world. 606 */ 607 if (dst_id == FFA_SPMC_ID) { 608 spmc_sp_synchronous_exit(&sp->ec[idx], x4); 609 /* Should not get here. */ 610 panic(); 611 } 612 613 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 614 handle, cookie, flags, dst_id, sp->ffa_version); 615 } 616 617 /******************************************************************************* 618 * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its 619 * cycles. 620 ******************************************************************************/ 621 static uint64_t msg_wait_handler(uint32_t smc_fid, 622 bool secure_origin, 623 uint64_t x1, 624 uint64_t x2, 625 uint64_t x3, 626 uint64_t x4, 627 void *cookie, 628 void *handle, 629 uint64_t flags) 630 { 631 struct secure_partition_desc *sp; 632 unsigned int idx; 633 634 /* 635 * Check that the response did not originate from the Normal world as 636 * only the secure world can call this ABI. 637 */ 638 if (!secure_origin) { 639 VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n"); 640 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 641 } 642 643 /* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */ 644 sp = spmc_get_current_sp_ctx(); 645 if (sp == NULL) { 646 return spmc_ffa_error_return(handle, 647 FFA_ERROR_INVALID_PARAMETER); 648 } 649 650 /* 651 * Get the execution context of the SP that invoked FFA_MSG_WAIT. 652 */ 653 idx = get_ec_index(sp); 654 if (sp->runtime_el == S_EL0) { 655 spin_lock(&sp->rt_state_lock); 656 } 657 658 /* Ensure SP execution context was in the right runtime model. */ 659 if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) { 660 if (sp->runtime_el == S_EL0) { 661 spin_unlock(&sp->rt_state_lock); 662 } 663 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 664 } 665 666 /* Sanity check the state is being tracked correctly in the SPMC. */ 667 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING); 668 669 /* 670 * Perform a synchronous exit if the partition was initialising. The 671 * state is updated after the exit. 672 */ 673 if (sp->ec[idx].rt_model == RT_MODEL_INIT) { 674 if (sp->runtime_el == S_EL0) { 675 spin_unlock(&sp->rt_state_lock); 676 } 677 spmc_sp_synchronous_exit(&sp->ec[idx], x4); 678 /* Should not get here */ 679 panic(); 680 } 681 682 /* Update the state of the SP execution context. */ 683 sp->ec[idx].rt_state = RT_STATE_WAITING; 684 685 /* Resume normal world if a secure interrupt was handled. */ 686 if (sp->ec[idx].rt_model == RT_MODEL_INTR) { 687 if (sp->runtime_el == S_EL0) { 688 spin_unlock(&sp->rt_state_lock); 689 } 690 691 return spmd_smc_switch_state(FFA_NORMAL_WORLD_RESUME, secure_origin, 692 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 693 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 694 handle, flags, sp->ffa_version); 695 } 696 697 /* Protect the runtime state of a S-EL0 SP with a lock. */ 698 if (sp->runtime_el == S_EL0) { 699 spin_unlock(&sp->rt_state_lock); 700 } 701 702 /* Forward the response to the Normal world. */ 703 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 704 handle, cookie, flags, FFA_NWD_ID, sp->ffa_version); 705 } 706 707 static uint64_t ffa_error_handler(uint32_t smc_fid, 708 bool secure_origin, 709 uint64_t x1, 710 uint64_t x2, 711 uint64_t x3, 712 uint64_t x4, 713 void *cookie, 714 void *handle, 715 uint64_t flags) 716 { 717 struct secure_partition_desc *sp; 718 unsigned int idx; 719 uint16_t dst_id = ffa_endpoint_destination(x1); 720 bool cancel_dir_req = false; 721 722 /* Check that the response did not originate from the Normal world. */ 723 if (!secure_origin) { 724 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 725 } 726 727 /* Get the descriptor of the SP that invoked FFA_ERROR. */ 728 sp = spmc_get_current_sp_ctx(); 729 if (sp == NULL) { 730 return spmc_ffa_error_return(handle, 731 FFA_ERROR_INVALID_PARAMETER); 732 } 733 734 /* Get the execution context of the SP that invoked FFA_ERROR. */ 735 idx = get_ec_index(sp); 736 737 /* 738 * We only expect FFA_ERROR to be received during SP initialisation 739 * otherwise this is an invalid call. 740 */ 741 if (sp->ec[idx].rt_model == RT_MODEL_INIT) { 742 ERROR("SP 0x%x failed to initialize.\n", sp->sp_id); 743 spmc_sp_synchronous_exit(&sp->ec[idx], x2); 744 /* Should not get here. */ 745 panic(); 746 } 747 748 if (sp->runtime_el == S_EL0) { 749 spin_lock(&sp->rt_state_lock); 750 } 751 752 if (sp->ec[idx].rt_state == RT_STATE_RUNNING && 753 sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) { 754 sp->ec[idx].rt_state = RT_STATE_WAITING; 755 sp->ec[idx].dir_req_origin_id = INV_SP_ID; 756 sp->ec[idx].dir_req_funcid = 0x00; 757 cancel_dir_req = true; 758 } 759 760 if (sp->runtime_el == S_EL0) { 761 spin_unlock(&sp->rt_state_lock); 762 } 763 764 if (cancel_dir_req) { 765 if (dst_id == FFA_SPMC_ID) { 766 spmc_sp_synchronous_exit(&sp->ec[idx], x4); 767 /* Should not get here. */ 768 panic(); 769 } else 770 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, 771 handle, cookie, flags, dst_id, sp->ffa_version); 772 } 773 774 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 775 } 776 777 static uint64_t ffa_version_handler(uint32_t smc_fid, 778 bool secure_origin, 779 uint64_t x1, 780 uint64_t x2, 781 uint64_t x3, 782 uint64_t x4, 783 void *cookie, 784 void *handle, 785 uint64_t flags) 786 { 787 uint32_t requested_version = x1 & FFA_VERSION_MASK; 788 789 if (requested_version & FFA_VERSION_BIT31_MASK) { 790 /* Invalid encoding, return an error. */ 791 SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED); 792 /* Execution stops here. */ 793 } 794 795 /* Determine the caller to store the requested version. */ 796 if (secure_origin) { 797 /* 798 * Ensure that the SP is reporting the same version as 799 * specified in its manifest. If these do not match there is 800 * something wrong with the SP. 801 * TODO: Should we abort the SP? For now assert this is not 802 * case. 803 */ 804 assert(requested_version == 805 spmc_get_current_sp_ctx()->ffa_version); 806 } else { 807 /* 808 * If this is called by the normal world, record this 809 * information in its descriptor. 810 */ 811 spmc_get_hyp_ctx()->ffa_version = requested_version; 812 } 813 814 SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_SPMC_MAJOR, 815 FFA_VERSION_SPMC_MINOR)); 816 } 817 818 static uint64_t rxtx_map_handler(uint32_t smc_fid, 819 bool secure_origin, 820 uint64_t x1, 821 uint64_t x2, 822 uint64_t x3, 823 uint64_t x4, 824 void *cookie, 825 void *handle, 826 uint64_t flags) 827 { 828 int ret; 829 uint32_t error_code; 830 uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS; 831 struct mailbox *mbox; 832 uintptr_t tx_address = x1; 833 uintptr_t rx_address = x2; 834 uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */ 835 uint32_t buf_size = page_count * FFA_PAGE_SIZE; 836 837 /* 838 * The SPMC does not support mapping of VM RX/TX pairs to facilitate 839 * indirect messaging with SPs. Check if the Hypervisor has invoked this 840 * ABI on behalf of a VM and reject it if this is the case. 841 */ 842 if (tx_address == 0 || rx_address == 0) { 843 WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n"); 844 return spmc_ffa_error_return(handle, 845 FFA_ERROR_INVALID_PARAMETER); 846 } 847 848 /* Ensure the specified buffers are not the same. */ 849 if (tx_address == rx_address) { 850 WARN("TX Buffer must not be the same as RX Buffer.\n"); 851 return spmc_ffa_error_return(handle, 852 FFA_ERROR_INVALID_PARAMETER); 853 } 854 855 /* Ensure the buffer size is not 0. */ 856 if (buf_size == 0U) { 857 WARN("Buffer size must not be 0\n"); 858 return spmc_ffa_error_return(handle, 859 FFA_ERROR_INVALID_PARAMETER); 860 } 861 862 /* 863 * Ensure the buffer size is a multiple of the translation granule size 864 * in TF-A. 865 */ 866 if (buf_size % PAGE_SIZE != 0U) { 867 WARN("Buffer size must be aligned to translation granule.\n"); 868 return spmc_ffa_error_return(handle, 869 FFA_ERROR_INVALID_PARAMETER); 870 } 871 872 /* Obtain the RX/TX buffer pair descriptor. */ 873 mbox = spmc_get_mbox_desc(secure_origin); 874 875 spin_lock(&mbox->lock); 876 877 /* Check if buffers have already been mapped. */ 878 if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) { 879 WARN("RX/TX Buffers already mapped (%p/%p)\n", 880 (void *) mbox->rx_buffer, (void *)mbox->tx_buffer); 881 error_code = FFA_ERROR_DENIED; 882 goto err; 883 } 884 885 /* memmap the TX buffer as read only. */ 886 ret = mmap_add_dynamic_region(tx_address, /* PA */ 887 tx_address, /* VA */ 888 buf_size, /* size */ 889 mem_atts | MT_RO_DATA); /* attrs */ 890 if (ret != 0) { 891 /* Return the correct error code. */ 892 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY : 893 FFA_ERROR_INVALID_PARAMETER; 894 WARN("Unable to map TX buffer: %d\n", error_code); 895 goto err; 896 } 897 898 /* memmap the RX buffer as read write. */ 899 ret = mmap_add_dynamic_region(rx_address, /* PA */ 900 rx_address, /* VA */ 901 buf_size, /* size */ 902 mem_atts | MT_RW_DATA); /* attrs */ 903 904 if (ret != 0) { 905 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY : 906 FFA_ERROR_INVALID_PARAMETER; 907 WARN("Unable to map RX buffer: %d\n", error_code); 908 /* Unmap the TX buffer again. */ 909 mmap_remove_dynamic_region(tx_address, buf_size); 910 goto err; 911 } 912 913 mbox->tx_buffer = (void *) tx_address; 914 mbox->rx_buffer = (void *) rx_address; 915 mbox->rxtx_page_count = page_count; 916 spin_unlock(&mbox->lock); 917 918 SMC_RET1(handle, FFA_SUCCESS_SMC32); 919 /* Execution stops here. */ 920 err: 921 spin_unlock(&mbox->lock); 922 return spmc_ffa_error_return(handle, error_code); 923 } 924 925 static uint64_t rxtx_unmap_handler(uint32_t smc_fid, 926 bool secure_origin, 927 uint64_t x1, 928 uint64_t x2, 929 uint64_t x3, 930 uint64_t x4, 931 void *cookie, 932 void *handle, 933 uint64_t flags) 934 { 935 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 936 uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; 937 938 /* 939 * The SPMC does not support mapping of VM RX/TX pairs to facilitate 940 * indirect messaging with SPs. Check if the Hypervisor has invoked this 941 * ABI on behalf of a VM and reject it if this is the case. 942 */ 943 if (x1 != 0UL) { 944 return spmc_ffa_error_return(handle, 945 FFA_ERROR_INVALID_PARAMETER); 946 } 947 948 spin_lock(&mbox->lock); 949 950 /* Check if buffers are currently mapped. */ 951 if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) { 952 spin_unlock(&mbox->lock); 953 return spmc_ffa_error_return(handle, 954 FFA_ERROR_INVALID_PARAMETER); 955 } 956 957 /* Unmap RX Buffer */ 958 if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer, 959 buf_size) != 0) { 960 WARN("Unable to unmap RX buffer!\n"); 961 } 962 963 mbox->rx_buffer = 0; 964 965 /* Unmap TX Buffer */ 966 if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer, 967 buf_size) != 0) { 968 WARN("Unable to unmap TX buffer!\n"); 969 } 970 971 mbox->tx_buffer = 0; 972 mbox->rxtx_page_count = 0; 973 974 spin_unlock(&mbox->lock); 975 SMC_RET1(handle, FFA_SUCCESS_SMC32); 976 } 977 978 /* 979 * Helper function to populate the properties field of a Partition Info Get 980 * descriptor. 981 */ 982 static uint32_t 983 partition_info_get_populate_properties(uint32_t sp_properties, 984 enum sp_execution_state sp_ec_state) 985 { 986 uint32_t properties = sp_properties; 987 uint32_t ec_state; 988 989 /* Determine the execution state of the SP. */ 990 ec_state = sp_ec_state == SP_STATE_AARCH64 ? 991 FFA_PARTITION_INFO_GET_AARCH64_STATE : 992 FFA_PARTITION_INFO_GET_AARCH32_STATE; 993 994 properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT; 995 996 return properties; 997 } 998 999 /* 1000 * Collate the partition information in a v1.1 partition information 1001 * descriptor format, this will be converter later if required. 1002 */ 1003 static int partition_info_get_handler_v1_1(uint32_t *uuid, 1004 struct ffa_partition_info_v1_1 1005 *partitions, 1006 uint32_t max_partitions, 1007 uint32_t *partition_count) 1008 { 1009 uint32_t index; 1010 struct ffa_partition_info_v1_1 *desc; 1011 bool null_uuid = is_null_uuid(uuid); 1012 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 1013 1014 /* Deal with Logical Partitions. */ 1015 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) { 1016 if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) { 1017 /* Found a matching UUID, populate appropriately. */ 1018 if (*partition_count >= max_partitions) { 1019 return FFA_ERROR_NO_MEMORY; 1020 } 1021 1022 desc = &partitions[*partition_count]; 1023 desc->ep_id = el3_lp_descs[index].sp_id; 1024 desc->execution_ctx_count = PLATFORM_CORE_COUNT; 1025 /* LSPs must be AArch64. */ 1026 desc->properties = 1027 partition_info_get_populate_properties( 1028 el3_lp_descs[index].properties, 1029 SP_STATE_AARCH64); 1030 1031 if (null_uuid) { 1032 copy_uuid(desc->uuid, el3_lp_descs[index].uuid); 1033 } 1034 (*partition_count)++; 1035 } 1036 } 1037 1038 /* Deal with physical SP's. */ 1039 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) { 1040 uint32_t uuid_index; 1041 uint32_t *sp_uuid; 1042 1043 for (uuid_index = 0; uuid_index < sp_desc[index].num_uuids; 1044 uuid_index++) { 1045 sp_uuid = sp_desc[index].uuid_array[uuid_index].uuid; 1046 1047 if (null_uuid || uuid_match(uuid, sp_uuid)) { 1048 /* Found a matching UUID, populate appropriately. */ 1049 1050 if (*partition_count >= max_partitions) { 1051 return FFA_ERROR_NO_MEMORY; 1052 } 1053 1054 desc = &partitions[*partition_count]; 1055 desc->ep_id = sp_desc[index].sp_id; 1056 /* 1057 * Execution context count must match No. cores for 1058 * S-EL1 SPs. 1059 */ 1060 desc->execution_ctx_count = PLATFORM_CORE_COUNT; 1061 desc->properties = 1062 partition_info_get_populate_properties( 1063 sp_desc[index].properties, 1064 sp_desc[index].execution_state); 1065 1066 (*partition_count)++; 1067 if (null_uuid) { 1068 copy_uuid(desc->uuid, sp_uuid); 1069 } else { 1070 /* Found UUID in this SP, go to next SP */ 1071 break; 1072 } 1073 } 1074 } 1075 } 1076 return 0; 1077 } 1078 1079 /* 1080 * Handle the case where that caller only wants the count of partitions 1081 * matching a given UUID and does not want the corresponding descriptors 1082 * populated. 1083 */ 1084 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid) 1085 { 1086 uint32_t index = 0; 1087 uint32_t partition_count = 0; 1088 bool null_uuid = is_null_uuid(uuid); 1089 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array(); 1090 1091 /* Deal with Logical Partitions. */ 1092 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) { 1093 if (null_uuid || 1094 uuid_match(uuid, el3_lp_descs[index].uuid)) { 1095 (partition_count)++; 1096 } 1097 } 1098 1099 /* Deal with physical SP's. */ 1100 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) { 1101 uint32_t uuid_index; 1102 1103 for (uuid_index = 0; uuid_index < sp_desc[index].num_uuids; 1104 uuid_index++) { 1105 uint32_t *sp_uuid = 1106 sp_desc[index].uuid_array[uuid_index].uuid; 1107 1108 if (null_uuid) { 1109 (partition_count)++; 1110 } else if (uuid_match(uuid, sp_uuid)) { 1111 (partition_count)++; 1112 /* Found a match, go to next SP */ 1113 break; 1114 } 1115 } 1116 } 1117 return partition_count; 1118 } 1119 1120 /* 1121 * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate 1122 * the corresponding descriptor format from the v1.1 descriptor array. 1123 */ 1124 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1 1125 *partitions, 1126 struct mailbox *mbox, 1127 int partition_count) 1128 { 1129 uint32_t index; 1130 uint32_t buf_size; 1131 uint32_t descriptor_size; 1132 struct ffa_partition_info_v1_0 *v1_0_partitions = 1133 (struct ffa_partition_info_v1_0 *) mbox->rx_buffer; 1134 1135 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; 1136 descriptor_size = partition_count * 1137 sizeof(struct ffa_partition_info_v1_0); 1138 1139 if (descriptor_size > buf_size) { 1140 return FFA_ERROR_NO_MEMORY; 1141 } 1142 1143 for (index = 0U; index < partition_count; index++) { 1144 v1_0_partitions[index].ep_id = partitions[index].ep_id; 1145 v1_0_partitions[index].execution_ctx_count = 1146 partitions[index].execution_ctx_count; 1147 /* Only report v1.0 properties. */ 1148 v1_0_partitions[index].properties = 1149 (partitions[index].properties & 1150 FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK); 1151 } 1152 return 0; 1153 } 1154 1155 /* 1156 * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and 1157 * v1.0 implementations. 1158 */ 1159 static uint64_t partition_info_get_handler(uint32_t smc_fid, 1160 bool secure_origin, 1161 uint64_t x1, 1162 uint64_t x2, 1163 uint64_t x3, 1164 uint64_t x4, 1165 void *cookie, 1166 void *handle, 1167 uint64_t flags) 1168 { 1169 int ret; 1170 uint32_t partition_count = 0; 1171 uint32_t size = 0; 1172 uint32_t ffa_version = get_partition_ffa_version(secure_origin); 1173 struct mailbox *mbox; 1174 uint64_t info_get_flags; 1175 bool count_only; 1176 uint32_t uuid[4]; 1177 1178 uuid[0] = x1; 1179 uuid[1] = x2; 1180 uuid[2] = x3; 1181 uuid[3] = x4; 1182 1183 /* Determine if the Partition descriptors should be populated. */ 1184 info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5); 1185 count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK); 1186 1187 /* Handle the case where we don't need to populate the descriptors. */ 1188 if (count_only) { 1189 partition_count = partition_info_get_handler_count_only(uuid); 1190 if (partition_count == 0) { 1191 return spmc_ffa_error_return(handle, 1192 FFA_ERROR_INVALID_PARAMETER); 1193 } 1194 } else { 1195 struct ffa_partition_info_v1_1 1196 partitions[MAX_SP_LP_PARTITIONS * 1197 SPMC_AT_EL3_PARTITION_MAX_UUIDS]; 1198 /* 1199 * Handle the case where the partition descriptors are required, 1200 * check we have the buffers available and populate the 1201 * appropriate structure version. 1202 */ 1203 1204 /* Obtain the v1.1 format of the descriptors. */ 1205 ret = partition_info_get_handler_v1_1( 1206 uuid, partitions, 1207 (MAX_SP_LP_PARTITIONS * 1208 SPMC_AT_EL3_PARTITION_MAX_UUIDS), 1209 &partition_count); 1210 1211 /* Check if an error occurred during discovery. */ 1212 if (ret != 0) { 1213 goto err; 1214 } 1215 1216 /* If we didn't find any matches the UUID is unknown. */ 1217 if (partition_count == 0) { 1218 ret = FFA_ERROR_INVALID_PARAMETER; 1219 goto err; 1220 } 1221 1222 /* Obtain the partition mailbox RX/TX buffer pair descriptor. */ 1223 mbox = spmc_get_mbox_desc(secure_origin); 1224 1225 /* 1226 * If the caller has not bothered registering its RX/TX pair 1227 * then return an error code. 1228 */ 1229 spin_lock(&mbox->lock); 1230 if (mbox->rx_buffer == NULL) { 1231 ret = FFA_ERROR_BUSY; 1232 goto err_unlock; 1233 } 1234 1235 /* Ensure the RX buffer is currently free. */ 1236 if (mbox->state != MAILBOX_STATE_EMPTY) { 1237 ret = FFA_ERROR_BUSY; 1238 goto err_unlock; 1239 } 1240 1241 /* Zero the RX buffer before populating. */ 1242 (void)memset(mbox->rx_buffer, 0, 1243 mbox->rxtx_page_count * FFA_PAGE_SIZE); 1244 1245 /* 1246 * Depending on the FF-A version of the requesting partition 1247 * we may need to convert to a v1.0 format otherwise we can copy 1248 * directly. 1249 */ 1250 if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) { 1251 ret = partition_info_populate_v1_0(partitions, 1252 mbox, 1253 partition_count); 1254 if (ret != 0) { 1255 goto err_unlock; 1256 } 1257 } else { 1258 uint32_t buf_size = mbox->rxtx_page_count * 1259 FFA_PAGE_SIZE; 1260 1261 /* Ensure the descriptor will fit in the buffer. */ 1262 size = sizeof(struct ffa_partition_info_v1_1); 1263 if (partition_count * size > buf_size) { 1264 ret = FFA_ERROR_NO_MEMORY; 1265 goto err_unlock; 1266 } 1267 memcpy(mbox->rx_buffer, partitions, 1268 partition_count * size); 1269 } 1270 1271 mbox->state = MAILBOX_STATE_FULL; 1272 spin_unlock(&mbox->lock); 1273 } 1274 SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size); 1275 1276 err_unlock: 1277 spin_unlock(&mbox->lock); 1278 err: 1279 return spmc_ffa_error_return(handle, ret); 1280 } 1281 1282 static uint64_t ffa_feature_success(void *handle, uint32_t arg2) 1283 { 1284 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2); 1285 } 1286 1287 static uint64_t ffa_features_retrieve_request(bool secure_origin, 1288 uint32_t input_properties, 1289 void *handle) 1290 { 1291 /* 1292 * If we're called by the normal world we don't support any 1293 * additional features. 1294 */ 1295 if (!secure_origin) { 1296 if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) { 1297 return spmc_ffa_error_return(handle, 1298 FFA_ERROR_NOT_SUPPORTED); 1299 } 1300 1301 } else { 1302 struct secure_partition_desc *sp = spmc_get_current_sp_ctx(); 1303 /* 1304 * If v1.1 the NS bit must be set otherwise it is an invalid 1305 * call. If v1.0 check and store whether the SP has requested 1306 * the use of the NS bit. 1307 */ 1308 if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) { 1309 if ((input_properties & 1310 FFA_FEATURES_RET_REQ_NS_BIT) == 0U) { 1311 return spmc_ffa_error_return(handle, 1312 FFA_ERROR_NOT_SUPPORTED); 1313 } 1314 return ffa_feature_success(handle, 1315 FFA_FEATURES_RET_REQ_NS_BIT); 1316 } else { 1317 sp->ns_bit_requested = (input_properties & 1318 FFA_FEATURES_RET_REQ_NS_BIT) != 1319 0U; 1320 } 1321 if (sp->ns_bit_requested) { 1322 return ffa_feature_success(handle, 1323 FFA_FEATURES_RET_REQ_NS_BIT); 1324 } 1325 } 1326 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1327 } 1328 1329 static uint64_t ffa_features_handler(uint32_t smc_fid, 1330 bool secure_origin, 1331 uint64_t x1, 1332 uint64_t x2, 1333 uint64_t x3, 1334 uint64_t x4, 1335 void *cookie, 1336 void *handle, 1337 uint64_t flags) 1338 { 1339 uint32_t function_id = (uint32_t) x1; 1340 uint32_t input_properties = (uint32_t) x2; 1341 1342 /* Check if a Feature ID was requested. */ 1343 if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) { 1344 /* We currently don't support any additional features. */ 1345 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1346 } 1347 1348 /* 1349 * Handle the cases where we have separate handlers due to additional 1350 * properties. 1351 */ 1352 switch (function_id) { 1353 case FFA_MEM_RETRIEVE_REQ_SMC32: 1354 case FFA_MEM_RETRIEVE_REQ_SMC64: 1355 return ffa_features_retrieve_request(secure_origin, 1356 input_properties, 1357 handle); 1358 } 1359 1360 /* 1361 * We don't currently support additional input properties for these 1362 * other ABIs therefore ensure this value is set to 0. 1363 */ 1364 if (input_properties != 0U) { 1365 return spmc_ffa_error_return(handle, 1366 FFA_ERROR_NOT_SUPPORTED); 1367 } 1368 1369 /* Report if any other FF-A ABI is supported. */ 1370 switch (function_id) { 1371 /* Supported features from both worlds. */ 1372 case FFA_ERROR: 1373 case FFA_SUCCESS_SMC32: 1374 case FFA_INTERRUPT: 1375 case FFA_SPM_ID_GET: 1376 case FFA_ID_GET: 1377 case FFA_FEATURES: 1378 case FFA_VERSION: 1379 case FFA_RX_RELEASE: 1380 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 1381 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 1382 case FFA_MSG_SEND_DIRECT_REQ2_SMC64: 1383 case FFA_PARTITION_INFO_GET: 1384 case FFA_RXTX_MAP_SMC32: 1385 case FFA_RXTX_MAP_SMC64: 1386 case FFA_RXTX_UNMAP: 1387 case FFA_MEM_FRAG_TX: 1388 case FFA_MSG_RUN: 1389 1390 /* 1391 * We are relying on the fact that the other registers 1392 * will be set to 0 as these values align with the 1393 * currently implemented features of the SPMC. If this 1394 * changes this function must be extended to handle 1395 * reporting the additional functionality. 1396 */ 1397 1398 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1399 /* Execution stops here. */ 1400 1401 /* Supported ABIs only from the secure world. */ 1402 case FFA_MEM_PERM_GET_SMC32: 1403 case FFA_MEM_PERM_GET_SMC64: 1404 case FFA_MEM_PERM_SET_SMC32: 1405 case FFA_MEM_PERM_SET_SMC64: 1406 /* these ABIs are only supported from S-EL0 SPs */ 1407 #if !(SPMC_AT_EL3_SEL0_SP) 1408 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1409 #endif 1410 /* fall through */ 1411 1412 case FFA_SECONDARY_EP_REGISTER_SMC64: 1413 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 1414 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 1415 case FFA_MSG_SEND_DIRECT_RESP2_SMC64: 1416 case FFA_MEM_RELINQUISH: 1417 case FFA_MSG_WAIT: 1418 case FFA_CONSOLE_LOG_SMC32: 1419 case FFA_CONSOLE_LOG_SMC64: 1420 if (!secure_origin) { 1421 return spmc_ffa_error_return(handle, 1422 FFA_ERROR_NOT_SUPPORTED); 1423 } 1424 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1425 /* Execution stops here. */ 1426 1427 /* Supported features only from the normal world. */ 1428 case FFA_MEM_SHARE_SMC32: 1429 case FFA_MEM_SHARE_SMC64: 1430 case FFA_MEM_LEND_SMC32: 1431 case FFA_MEM_LEND_SMC64: 1432 case FFA_MEM_RECLAIM: 1433 case FFA_MEM_FRAG_RX: 1434 1435 if (secure_origin) { 1436 return spmc_ffa_error_return(handle, 1437 FFA_ERROR_NOT_SUPPORTED); 1438 } 1439 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1440 /* Execution stops here. */ 1441 1442 default: 1443 return spmc_ffa_error_return(handle, 1444 FFA_ERROR_NOT_SUPPORTED); 1445 } 1446 } 1447 1448 static uint64_t ffa_id_get_handler(uint32_t smc_fid, 1449 bool secure_origin, 1450 uint64_t x1, 1451 uint64_t x2, 1452 uint64_t x3, 1453 uint64_t x4, 1454 void *cookie, 1455 void *handle, 1456 uint64_t flags) 1457 { 1458 if (secure_origin) { 1459 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, 1460 spmc_get_current_sp_ctx()->sp_id); 1461 } else { 1462 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, 1463 spmc_get_hyp_ctx()->ns_ep_id); 1464 } 1465 } 1466 1467 /* 1468 * Enable an SP to query the ID assigned to the SPMC. 1469 */ 1470 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid, 1471 bool secure_origin, 1472 uint64_t x1, 1473 uint64_t x2, 1474 uint64_t x3, 1475 uint64_t x4, 1476 void *cookie, 1477 void *handle, 1478 uint64_t flags) 1479 { 1480 assert(x1 == 0UL); 1481 assert(x2 == 0UL); 1482 assert(x3 == 0UL); 1483 assert(x4 == 0UL); 1484 assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL); 1485 assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL); 1486 assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL); 1487 1488 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID); 1489 } 1490 1491 static uint64_t ffa_run_handler(uint32_t smc_fid, 1492 bool secure_origin, 1493 uint64_t x1, 1494 uint64_t x2, 1495 uint64_t x3, 1496 uint64_t x4, 1497 void *cookie, 1498 void *handle, 1499 uint64_t flags) 1500 { 1501 struct secure_partition_desc *sp; 1502 uint16_t target_id = FFA_RUN_EP_ID(x1); 1503 uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1); 1504 unsigned int idx; 1505 unsigned int *rt_state; 1506 unsigned int *rt_model; 1507 1508 /* Can only be called from the normal world. */ 1509 if (secure_origin) { 1510 ERROR("FFA_RUN can only be called from NWd.\n"); 1511 return spmc_ffa_error_return(handle, 1512 FFA_ERROR_INVALID_PARAMETER); 1513 } 1514 1515 /* Cannot run a Normal world partition. */ 1516 if (ffa_is_normal_world_id(target_id)) { 1517 ERROR("Cannot run a NWd partition (0x%x).\n", target_id); 1518 return spmc_ffa_error_return(handle, 1519 FFA_ERROR_INVALID_PARAMETER); 1520 } 1521 1522 /* Check that the target SP exists. */ 1523 sp = spmc_get_sp_ctx(target_id); 1524 if (sp == NULL) { 1525 ERROR("Unknown partition ID (0x%x).\n", target_id); 1526 return spmc_ffa_error_return(handle, 1527 FFA_ERROR_INVALID_PARAMETER); 1528 } 1529 1530 idx = get_ec_index(sp); 1531 1532 if (idx != vcpu_id) { 1533 ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id); 1534 return spmc_ffa_error_return(handle, 1535 FFA_ERROR_INVALID_PARAMETER); 1536 } 1537 if (sp->runtime_el == S_EL0) { 1538 spin_lock(&sp->rt_state_lock); 1539 } 1540 rt_state = &((sp->ec[idx]).rt_state); 1541 rt_model = &((sp->ec[idx]).rt_model); 1542 if (*rt_state == RT_STATE_RUNNING) { 1543 if (sp->runtime_el == S_EL0) { 1544 spin_unlock(&sp->rt_state_lock); 1545 } 1546 ERROR("Partition (0x%x) is already running.\n", target_id); 1547 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY); 1548 } 1549 1550 /* 1551 * Sanity check that if the execution context was not waiting then it 1552 * was either in the direct request or the run partition runtime model. 1553 */ 1554 if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) { 1555 assert(*rt_model == RT_MODEL_RUN || 1556 *rt_model == RT_MODEL_DIR_REQ); 1557 } 1558 1559 /* 1560 * If the context was waiting then update the partition runtime model. 1561 */ 1562 if (*rt_state == RT_STATE_WAITING) { 1563 *rt_model = RT_MODEL_RUN; 1564 } 1565 1566 /* 1567 * Forward the request to the correct SP vCPU after updating 1568 * its state. 1569 */ 1570 *rt_state = RT_STATE_RUNNING; 1571 1572 if (sp->runtime_el == S_EL0) { 1573 spin_unlock(&sp->rt_state_lock); 1574 } 1575 1576 return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0, 1577 handle, cookie, flags, target_id, sp->ffa_version); 1578 } 1579 1580 static uint64_t rx_release_handler(uint32_t smc_fid, 1581 bool secure_origin, 1582 uint64_t x1, 1583 uint64_t x2, 1584 uint64_t x3, 1585 uint64_t x4, 1586 void *cookie, 1587 void *handle, 1588 uint64_t flags) 1589 { 1590 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 1591 1592 spin_lock(&mbox->lock); 1593 1594 if (mbox->state != MAILBOX_STATE_FULL) { 1595 spin_unlock(&mbox->lock); 1596 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1597 } 1598 1599 mbox->state = MAILBOX_STATE_EMPTY; 1600 spin_unlock(&mbox->lock); 1601 1602 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1603 } 1604 1605 static uint64_t spmc_ffa_console_log(uint32_t smc_fid, 1606 bool secure_origin, 1607 uint64_t x1, 1608 uint64_t x2, 1609 uint64_t x3, 1610 uint64_t x4, 1611 void *cookie, 1612 void *handle, 1613 uint64_t flags) 1614 { 1615 /* Maximum number of characters is 48: 6 registers of 8 bytes each. */ 1616 char chars[48] = {0}; 1617 size_t chars_max; 1618 size_t chars_count = x1; 1619 1620 /* Does not support request from Nwd. */ 1621 if (!secure_origin) { 1622 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1623 } 1624 1625 assert(smc_fid == FFA_CONSOLE_LOG_SMC32 || smc_fid == FFA_CONSOLE_LOG_SMC64); 1626 if (smc_fid == FFA_CONSOLE_LOG_SMC32) { 1627 uint32_t *registers = (uint32_t *)chars; 1628 registers[0] = (uint32_t)x2; 1629 registers[1] = (uint32_t)x3; 1630 registers[2] = (uint32_t)x4; 1631 registers[3] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X5); 1632 registers[4] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X6); 1633 registers[5] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X7); 1634 chars_max = 6 * sizeof(uint32_t); 1635 } else { 1636 uint64_t *registers = (uint64_t *)chars; 1637 registers[0] = x2; 1638 registers[1] = x3; 1639 registers[2] = x4; 1640 registers[3] = SMC_GET_GP(handle, CTX_GPREG_X5); 1641 registers[4] = SMC_GET_GP(handle, CTX_GPREG_X6); 1642 registers[5] = SMC_GET_GP(handle, CTX_GPREG_X7); 1643 chars_max = 6 * sizeof(uint64_t); 1644 } 1645 1646 if ((chars_count == 0) || (chars_count > chars_max)) { 1647 return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER); 1648 } 1649 1650 for (size_t i = 0; (i < chars_count) && (chars[i] != '\0'); i++) { 1651 putchar(chars[i]); 1652 } 1653 1654 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1655 } 1656 1657 /* 1658 * Perform initial validation on the provided secondary entry point. 1659 * For now ensure it does not lie within the BL31 Image or the SP's 1660 * RX/TX buffers as these are mapped within EL3. 1661 * TODO: perform validation for additional invalid memory regions. 1662 */ 1663 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp) 1664 { 1665 struct mailbox *mb; 1666 uintptr_t buffer_size; 1667 uintptr_t sp_rx_buffer; 1668 uintptr_t sp_tx_buffer; 1669 uintptr_t sp_rx_buffer_limit; 1670 uintptr_t sp_tx_buffer_limit; 1671 1672 mb = &sp->mailbox; 1673 buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE); 1674 sp_rx_buffer = (uintptr_t) mb->rx_buffer; 1675 sp_tx_buffer = (uintptr_t) mb->tx_buffer; 1676 sp_rx_buffer_limit = sp_rx_buffer + buffer_size; 1677 sp_tx_buffer_limit = sp_tx_buffer + buffer_size; 1678 1679 /* 1680 * Check if the entry point lies within BL31, or the 1681 * SP's RX or TX buffer. 1682 */ 1683 if ((ep >= BL31_BASE && ep < BL31_LIMIT) || 1684 (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) || 1685 (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) { 1686 return -EINVAL; 1687 } 1688 return 0; 1689 } 1690 1691 /******************************************************************************* 1692 * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to 1693 * register an entry point for initialization during a secondary cold boot. 1694 ******************************************************************************/ 1695 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid, 1696 bool secure_origin, 1697 uint64_t x1, 1698 uint64_t x2, 1699 uint64_t x3, 1700 uint64_t x4, 1701 void *cookie, 1702 void *handle, 1703 uint64_t flags) 1704 { 1705 struct secure_partition_desc *sp; 1706 struct sp_exec_ctx *sp_ctx; 1707 1708 /* This request cannot originate from the Normal world. */ 1709 if (!secure_origin) { 1710 WARN("%s: Can only be called from SWd.\n", __func__); 1711 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1712 } 1713 1714 /* Get the context of the current SP. */ 1715 sp = spmc_get_current_sp_ctx(); 1716 if (sp == NULL) { 1717 WARN("%s: Cannot find SP context.\n", __func__); 1718 return spmc_ffa_error_return(handle, 1719 FFA_ERROR_INVALID_PARAMETER); 1720 } 1721 1722 /* Only an S-EL1 SP should be invoking this ABI. */ 1723 if (sp->runtime_el != S_EL1) { 1724 WARN("%s: Can only be called for a S-EL1 SP.\n", __func__); 1725 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1726 } 1727 1728 /* Ensure the SP is in its initialization state. */ 1729 sp_ctx = spmc_get_sp_ec(sp); 1730 if (sp_ctx->rt_model != RT_MODEL_INIT) { 1731 WARN("%s: Can only be called during SP initialization.\n", 1732 __func__); 1733 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1734 } 1735 1736 /* Perform initial validation of the secondary entry point. */ 1737 if (validate_secondary_ep(x1, sp)) { 1738 WARN("%s: Invalid entry point provided (0x%lx).\n", 1739 __func__, x1); 1740 return spmc_ffa_error_return(handle, 1741 FFA_ERROR_INVALID_PARAMETER); 1742 } 1743 1744 /* 1745 * Update the secondary entrypoint in SP context. 1746 * We don't need a lock here as during partition initialization there 1747 * will only be a single core online. 1748 */ 1749 sp->secondary_ep = x1; 1750 VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep); 1751 1752 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1753 } 1754 1755 /******************************************************************************* 1756 * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs 1757 * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This 1758 * function converts a permission value from the FF-A format to the mmap_attr_t 1759 * format by setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and 1760 * MT_EXECUTE/MT_EXECUTE_NEVER. The other fields are left as 0 because they are 1761 * ignored by the function xlat_change_mem_attributes_ctx(). 1762 ******************************************************************************/ 1763 static unsigned int ffa_perm_to_mmap_perm(unsigned int perms) 1764 { 1765 unsigned int tf_attr = 0U; 1766 unsigned int access; 1767 1768 /* Deal with data access permissions first. */ 1769 access = (perms & FFA_MEM_PERM_DATA_MASK) >> FFA_MEM_PERM_DATA_SHIFT; 1770 1771 switch (access) { 1772 case FFA_MEM_PERM_DATA_RW: 1773 /* Return 0 if the execute is set with RW. */ 1774 if ((perms & FFA_MEM_PERM_INST_NON_EXEC) != 0) { 1775 tf_attr |= MT_RW | MT_USER | MT_EXECUTE_NEVER; 1776 } 1777 break; 1778 1779 case FFA_MEM_PERM_DATA_RO: 1780 tf_attr |= MT_RO | MT_USER; 1781 /* Deal with the instruction access permissions next. */ 1782 if ((perms & FFA_MEM_PERM_INST_NON_EXEC) == 0) { 1783 tf_attr |= MT_EXECUTE; 1784 } else { 1785 tf_attr |= MT_EXECUTE_NEVER; 1786 } 1787 break; 1788 1789 case FFA_MEM_PERM_DATA_NA: 1790 default: 1791 return tf_attr; 1792 } 1793 1794 return tf_attr; 1795 } 1796 1797 /******************************************************************************* 1798 * Handler to set the permissions of a set of contiguous pages of a S-EL0 SP 1799 ******************************************************************************/ 1800 static uint64_t ffa_mem_perm_set_handler(uint32_t smc_fid, 1801 bool secure_origin, 1802 uint64_t x1, 1803 uint64_t x2, 1804 uint64_t x3, 1805 uint64_t x4, 1806 void *cookie, 1807 void *handle, 1808 uint64_t flags) 1809 { 1810 struct secure_partition_desc *sp; 1811 unsigned int idx; 1812 uintptr_t base_va = (uintptr_t) x1; 1813 size_t size = (size_t)(x2 * PAGE_SIZE); 1814 uint32_t tf_attr; 1815 int ret; 1816 1817 /* This request cannot originate from the Normal world. */ 1818 if (!secure_origin) { 1819 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1820 } 1821 1822 if (size == 0) { 1823 return spmc_ffa_error_return(handle, 1824 FFA_ERROR_INVALID_PARAMETER); 1825 } 1826 1827 /* Get the context of the current SP. */ 1828 sp = spmc_get_current_sp_ctx(); 1829 if (sp == NULL) { 1830 return spmc_ffa_error_return(handle, 1831 FFA_ERROR_INVALID_PARAMETER); 1832 } 1833 1834 /* A S-EL1 SP has no business invoking this ABI. */ 1835 if (sp->runtime_el == S_EL1) { 1836 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1837 } 1838 1839 if ((x3 & ~((uint64_t)FFA_MEM_PERM_MASK)) != 0) { 1840 return spmc_ffa_error_return(handle, 1841 FFA_ERROR_INVALID_PARAMETER); 1842 } 1843 1844 /* Get the execution context of the calling SP. */ 1845 idx = get_ec_index(sp); 1846 1847 /* 1848 * Ensure that the S-EL0 SP is initialising itself. We do not need to 1849 * synchronise this operation through a spinlock since a S-EL0 SP is UP 1850 * and can only be initialising on this cpu. 1851 */ 1852 if (sp->ec[idx].rt_model != RT_MODEL_INIT) { 1853 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1854 } 1855 1856 VERBOSE("Setting memory permissions:\n"); 1857 VERBOSE(" Start address : 0x%lx\n", base_va); 1858 VERBOSE(" Number of pages: %lu (%zu bytes)\n", x2, size); 1859 VERBOSE(" Attributes : 0x%x\n", (uint32_t)x3); 1860 1861 /* Convert inbound permissions to TF-A permission attributes */ 1862 tf_attr = ffa_perm_to_mmap_perm((unsigned int)x3); 1863 if (tf_attr == 0U) { 1864 return spmc_ffa_error_return(handle, 1865 FFA_ERROR_INVALID_PARAMETER); 1866 } 1867 1868 /* Request the change in permissions */ 1869 ret = xlat_change_mem_attributes_ctx(sp->xlat_ctx_handle, 1870 base_va, size, tf_attr); 1871 if (ret != 0) { 1872 return spmc_ffa_error_return(handle, 1873 FFA_ERROR_INVALID_PARAMETER); 1874 } 1875 1876 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1877 } 1878 1879 /******************************************************************************* 1880 * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs 1881 * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This 1882 * function converts a permission value from the mmap_attr_t format to the FF-A 1883 * format. 1884 ******************************************************************************/ 1885 static unsigned int mmap_perm_to_ffa_perm(unsigned int attr) 1886 { 1887 unsigned int perms = 0U; 1888 unsigned int data_access; 1889 1890 if ((attr & MT_USER) == 0) { 1891 /* No access from EL0. */ 1892 data_access = FFA_MEM_PERM_DATA_NA; 1893 } else { 1894 if ((attr & MT_RW) != 0) { 1895 data_access = FFA_MEM_PERM_DATA_RW; 1896 } else { 1897 data_access = FFA_MEM_PERM_DATA_RO; 1898 } 1899 } 1900 1901 perms |= (data_access & FFA_MEM_PERM_DATA_MASK) 1902 << FFA_MEM_PERM_DATA_SHIFT; 1903 1904 if ((attr & MT_EXECUTE_NEVER) != 0U) { 1905 perms |= FFA_MEM_PERM_INST_NON_EXEC; 1906 } 1907 1908 return perms; 1909 } 1910 1911 /******************************************************************************* 1912 * Handler to get the permissions of a set of contiguous pages of a S-EL0 SP 1913 ******************************************************************************/ 1914 static uint64_t ffa_mem_perm_get_handler(uint32_t smc_fid, 1915 bool secure_origin, 1916 uint64_t x1, 1917 uint64_t x2, 1918 uint64_t x3, 1919 uint64_t x4, 1920 void *cookie, 1921 void *handle, 1922 uint64_t flags) 1923 { 1924 struct secure_partition_desc *sp; 1925 unsigned int idx; 1926 uintptr_t base_va = (uintptr_t)x1; 1927 uint64_t max_page_count = x2 + 1; 1928 uint64_t page_count = 0; 1929 uint32_t base_page_attr = 0; 1930 uint32_t page_attr = 0; 1931 unsigned int table_level; 1932 int ret; 1933 1934 /* This request cannot originate from the Normal world. */ 1935 if (!secure_origin) { 1936 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 1937 } 1938 1939 /* Get the context of the current SP. */ 1940 sp = spmc_get_current_sp_ctx(); 1941 if (sp == NULL) { 1942 return spmc_ffa_error_return(handle, 1943 FFA_ERROR_INVALID_PARAMETER); 1944 } 1945 1946 /* A S-EL1 SP has no business invoking this ABI. */ 1947 if (sp->runtime_el == S_EL1) { 1948 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1949 } 1950 1951 /* Get the execution context of the calling SP. */ 1952 idx = get_ec_index(sp); 1953 1954 /* 1955 * Ensure that the S-EL0 SP is initialising itself. We do not need to 1956 * synchronise this operation through a spinlock since a S-EL0 SP is UP 1957 * and can only be initialising on this cpu. 1958 */ 1959 if (sp->ec[idx].rt_model != RT_MODEL_INIT) { 1960 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED); 1961 } 1962 1963 base_va &= ~(PAGE_SIZE_MASK); 1964 1965 /* Request the permissions */ 1966 ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va, 1967 &base_page_attr, &table_level); 1968 if (ret != 0) { 1969 return spmc_ffa_error_return(handle, 1970 FFA_ERROR_INVALID_PARAMETER); 1971 } 1972 1973 /* 1974 * Caculate how many pages in this block entry from base_va including 1975 * its page. 1976 */ 1977 page_count = ((XLAT_BLOCK_SIZE(table_level) - 1978 (base_va & XLAT_BLOCK_MASK(table_level))) >> PAGE_SIZE_SHIFT); 1979 base_va += XLAT_BLOCK_SIZE(table_level); 1980 1981 while ((page_count < max_page_count) && (base_va != 0x00)) { 1982 ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va, 1983 &page_attr, &table_level); 1984 if (ret != 0) { 1985 return spmc_ffa_error_return(handle, 1986 FFA_ERROR_INVALID_PARAMETER); 1987 } 1988 1989 if (page_attr != base_page_attr) { 1990 break; 1991 } 1992 1993 base_va += XLAT_BLOCK_SIZE(table_level); 1994 page_count += (XLAT_BLOCK_SIZE(table_level) >> PAGE_SIZE_SHIFT); 1995 } 1996 1997 if (page_count > max_page_count) { 1998 page_count = max_page_count; 1999 } 2000 2001 /* Convert TF-A permission to FF-A permissions attributes. */ 2002 x2 = mmap_perm_to_ffa_perm(base_page_attr); 2003 2004 /* x3 should be page count - 1 */ 2005 SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, x2, --page_count); 2006 } 2007 2008 /******************************************************************************* 2009 * This function will parse the Secure Partition Manifest. From manifest, it 2010 * will fetch details for preparing Secure partition image context and secure 2011 * partition image boot arguments if any. 2012 ******************************************************************************/ 2013 static int sp_manifest_parse(void *sp_manifest, int offset, 2014 struct secure_partition_desc *sp, 2015 entry_point_info_t *ep_info, 2016 int32_t *boot_info_reg) 2017 { 2018 int32_t ret, node; 2019 uint32_t config_32; 2020 int uuid_size; 2021 const fdt32_t *prop; 2022 2023 /* 2024 * Look for the mandatory fields that are expected to be present in 2025 * the SP manifests. 2026 */ 2027 node = fdt_path_offset(sp_manifest, "/"); 2028 if (node < 0) { 2029 ERROR("Did not find root node.\n"); 2030 return node; 2031 } 2032 2033 prop = fdt_getprop(sp_manifest, node, "uuid", &uuid_size); 2034 if (prop == NULL) { 2035 ERROR("Couldn't find property uuid in manifest\n"); 2036 return -FDT_ERR_NOTFOUND; 2037 } 2038 2039 sp->num_uuids = (uint32_t)uuid_size / sizeof(struct ffa_uuid); 2040 if (sp->num_uuids > ARRAY_SIZE(sp->uuid_array)) { 2041 ERROR("Too many UUIDs (%d) in manifest, maximum is %zd\n", 2042 sp->num_uuids, ARRAY_SIZE(sp->uuid_array)); 2043 return -FDT_ERR_BADVALUE; 2044 } 2045 2046 ret = fdt_read_uint32_array(sp_manifest, node, "uuid", 2047 (uuid_size / sizeof(uint32_t)), 2048 sp->uuid_array[0].uuid); 2049 if (ret != 0) { 2050 ERROR("Missing Secure Partition UUID.\n"); 2051 return ret; 2052 } 2053 2054 for (uint32_t i = 0; i < sp->num_uuids; i++) { 2055 for (uint32_t j = 0; j < i; j++) { 2056 if (memcmp(&sp->uuid_array[i], &sp->uuid_array[j], 2057 sizeof(struct ffa_uuid)) == 0) { 2058 ERROR("Duplicate UUIDs in manifest: 0x%x 0x%x 0x%x 0x%x\n", 2059 sp->uuid_array[i].uuid[0], 2060 sp->uuid_array[i].uuid[1], 2061 sp->uuid_array[i].uuid[2], 2062 sp->uuid_array[i].uuid[3]); 2063 return -FDT_ERR_BADVALUE; 2064 } 2065 } 2066 } 2067 2068 ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32); 2069 if (ret != 0) { 2070 ERROR("Missing SP Exception Level information.\n"); 2071 return ret; 2072 } 2073 2074 sp->runtime_el = config_32; 2075 2076 ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32); 2077 if (ret != 0) { 2078 ERROR("Missing Secure Partition FF-A Version.\n"); 2079 return ret; 2080 } 2081 2082 sp->ffa_version = config_32; 2083 2084 ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32); 2085 if (ret != 0) { 2086 ERROR("Missing Secure Partition Execution State.\n"); 2087 return ret; 2088 } 2089 2090 sp->execution_state = config_32; 2091 2092 ret = fdt_read_uint32(sp_manifest, node, 2093 "messaging-method", &config_32); 2094 if (ret != 0) { 2095 ERROR("Missing Secure Partition messaging method.\n"); 2096 return ret; 2097 } 2098 2099 /* Validate this entry, we currently only support direct messaging. */ 2100 if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV | 2101 FFA_PARTITION_DIRECT_REQ_SEND | 2102 FFA_PARTITION_DIRECT_REQ2_RECV | 2103 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) { 2104 WARN("Invalid Secure Partition messaging method (0x%x)\n", 2105 config_32); 2106 return -EINVAL; 2107 } 2108 2109 sp->properties = config_32; 2110 2111 ret = fdt_read_uint32(sp_manifest, node, 2112 "execution-ctx-count", &config_32); 2113 2114 if (ret != 0) { 2115 ERROR("Missing SP Execution Context Count.\n"); 2116 return ret; 2117 } 2118 2119 /* 2120 * Ensure this field is set correctly in the manifest however 2121 * since this is currently a hardcoded value for S-EL1 partitions 2122 * we don't need to save it here, just validate. 2123 */ 2124 if ((sp->runtime_el == S_EL1) && (config_32 != PLATFORM_CORE_COUNT)) { 2125 ERROR("SP Execution Context Count (%u) must be %u.\n", 2126 config_32, PLATFORM_CORE_COUNT); 2127 return -EINVAL; 2128 } 2129 2130 /* 2131 * Look for the optional fields that are expected to be present in 2132 * an SP manifest. 2133 */ 2134 ret = fdt_read_uint32(sp_manifest, node, "id", &config_32); 2135 if (ret != 0) { 2136 WARN("Missing Secure Partition ID.\n"); 2137 } else { 2138 if (!is_ffa_secure_id_valid(config_32)) { 2139 ERROR("Invalid Secure Partition ID (0x%x).\n", 2140 config_32); 2141 return -EINVAL; 2142 } 2143 sp->sp_id = config_32; 2144 } 2145 2146 ret = fdt_read_uint32(sp_manifest, node, 2147 "power-management-messages", &config_32); 2148 if (ret != 0) { 2149 WARN("Missing Power Management Messages entry.\n"); 2150 } else { 2151 if ((sp->runtime_el == S_EL0) && (config_32 != 0)) { 2152 ERROR("Power messages not supported for S-EL0 SP\n"); 2153 return -EINVAL; 2154 } 2155 2156 /* 2157 * Ensure only the currently supported power messages have 2158 * been requested. 2159 */ 2160 if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF | 2161 FFA_PM_MSG_SUB_CPU_SUSPEND | 2162 FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) { 2163 ERROR("Requested unsupported PM messages (%x)\n", 2164 config_32); 2165 return -EINVAL; 2166 } 2167 sp->pwr_mgmt_msgs = config_32; 2168 } 2169 2170 ret = fdt_read_uint32(sp_manifest, node, 2171 "gp-register-num", &config_32); 2172 if (ret != 0) { 2173 WARN("Missing boot information register.\n"); 2174 } else { 2175 /* Check if a register number between 0-3 is specified. */ 2176 if (config_32 < 4) { 2177 *boot_info_reg = config_32; 2178 } else { 2179 WARN("Incorrect boot information register (%u).\n", 2180 config_32); 2181 } 2182 } 2183 2184 ret = fdt_read_uint32(sp_manifest, node, 2185 "vm-availability-messages", &config_32); 2186 if (ret != 0) { 2187 WARN("Missing VM availability messaging.\n"); 2188 } else if ((sp->properties & FFA_PARTITION_DIRECT_REQ_RECV) == 0) { 2189 ERROR("VM availability messaging requested without " 2190 "direct message receive support.\n"); 2191 return -EINVAL; 2192 } else { 2193 /* Validate this entry. */ 2194 if ((config_32 & ~(FFA_VM_AVAILABILITY_CREATED | 2195 FFA_VM_AVAILABILITY_DESTROYED)) != 0U) { 2196 WARN("Invalid VM availability messaging (0x%x)\n", 2197 config_32); 2198 return -EINVAL; 2199 } 2200 2201 if ((config_32 & FFA_VM_AVAILABILITY_CREATED) != 0U) { 2202 sp->properties |= FFA_PARTITION_VM_CREATED; 2203 } 2204 if ((config_32 & FFA_VM_AVAILABILITY_DESTROYED) != 0U) { 2205 sp->properties |= FFA_PARTITION_VM_DESTROYED; 2206 } 2207 } 2208 2209 return 0; 2210 } 2211 2212 /******************************************************************************* 2213 * This function gets the Secure Partition Manifest base and maps the manifest 2214 * region. 2215 * Currently only one Secure Partition manifest is considered which is used to 2216 * prepare the context for the single Secure Partition. 2217 ******************************************************************************/ 2218 static int find_and_prepare_sp_context(void) 2219 { 2220 void *sp_manifest; 2221 uintptr_t manifest_base; 2222 uintptr_t manifest_base_align __maybe_unused; 2223 entry_point_info_t *next_image_ep_info; 2224 int32_t ret, boot_info_reg = -1; 2225 struct secure_partition_desc *sp; 2226 struct transfer_list_header *tl __maybe_unused; 2227 struct transfer_list_entry *te __maybe_unused; 2228 2229 next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 2230 if (next_image_ep_info == NULL) { 2231 WARN("No Secure Partition image provided by BL2.\n"); 2232 return -ENOENT; 2233 } 2234 2235 2236 #if TRANSFER_LIST && !RESET_TO_BL31 2237 tl = (struct transfer_list_header *)next_image_ep_info->args.arg3; 2238 te = transfer_list_find(tl, TL_TAG_DT_FFA_MANIFEST); 2239 if (te == NULL) { 2240 WARN("Secure Partition manifest absent.\n"); 2241 return -ENOENT; 2242 } 2243 2244 sp_manifest = (void *)transfer_list_entry_data(te); 2245 manifest_base = (uintptr_t)sp_manifest; 2246 #else 2247 sp_manifest = (void *)next_image_ep_info->args.arg0; 2248 if (sp_manifest == NULL) { 2249 WARN("Secure Partition manifest absent.\n"); 2250 return -ENOENT; 2251 } 2252 2253 manifest_base = (uintptr_t)sp_manifest; 2254 manifest_base_align = page_align(manifest_base, DOWN); 2255 2256 /* 2257 * Map the secure partition manifest region in the EL3 translation 2258 * regime. 2259 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base 2260 * alignment the region of 1 PAGE_SIZE from manifest align base may 2261 * not completely accommodate the secure partition manifest region. 2262 */ 2263 ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align, 2264 manifest_base_align, 2265 PAGE_SIZE * 2, 2266 MT_RO_DATA); 2267 if (ret != 0) { 2268 ERROR("Error while mapping SP manifest (%d).\n", ret); 2269 return ret; 2270 } 2271 #endif 2272 2273 ret = fdt_node_offset_by_compatible(sp_manifest, -1, 2274 "arm,ffa-manifest-1.0"); 2275 if (ret < 0) { 2276 ERROR("Error happened in SP manifest reading.\n"); 2277 return -EINVAL; 2278 } 2279 2280 /* 2281 * Store the size of the manifest so that it can be used later to pass 2282 * the manifest as boot information later. 2283 */ 2284 next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest); 2285 INFO("Manifest adr = %lx , size = %lu bytes\n", manifest_base, 2286 next_image_ep_info->args.arg1); 2287 2288 /* 2289 * Select an SP descriptor for initialising the partition's execution 2290 * context on the primary CPU. 2291 */ 2292 sp = spmc_get_current_sp_ctx(); 2293 2294 #if SPMC_AT_EL3_SEL0_SP 2295 /* Assign translation tables context. */ 2296 sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context(); 2297 2298 #endif /* SPMC_AT_EL3_SEL0_SP */ 2299 /* Initialize entry point information for the SP */ 2300 SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1, 2301 SECURE | EP_ST_ENABLE); 2302 2303 /* Parse the SP manifest. */ 2304 ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info, 2305 &boot_info_reg); 2306 if (ret != 0) { 2307 ERROR("Error in Secure Partition manifest parsing.\n"); 2308 return ret; 2309 } 2310 2311 /* Perform any common initialisation. */ 2312 spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg); 2313 2314 /* Perform any initialisation specific to S-EL1 SPs. */ 2315 if (sp->runtime_el == S_EL1) { 2316 spmc_el1_sp_setup(sp, next_image_ep_info); 2317 spmc_sp_common_ep_commit(sp, next_image_ep_info); 2318 } 2319 #if SPMC_AT_EL3_SEL0_SP 2320 /* Perform any initialisation specific to S-EL0 SPs. */ 2321 else if (sp->runtime_el == S_EL0) { 2322 /* Setup spsr in endpoint info for common context management routine. */ 2323 spmc_el0_sp_spsr_setup(next_image_ep_info); 2324 2325 spmc_sp_common_ep_commit(sp, next_image_ep_info); 2326 2327 /* 2328 * Perform any initialisation specific to S-EL0 not set by common 2329 * context management routine. 2330 */ 2331 spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest); 2332 } 2333 #endif /* SPMC_AT_EL3_SEL0_SP */ 2334 else { 2335 ERROR("Unexpected runtime EL: %u\n", sp->runtime_el); 2336 return -EINVAL; 2337 } 2338 2339 return 0; 2340 } 2341 2342 /******************************************************************************* 2343 * This function takes an SP context pointer and performs a synchronous entry 2344 * into it. 2345 ******************************************************************************/ 2346 static int32_t logical_sp_init(void) 2347 { 2348 int32_t rc = 0; 2349 struct el3_lp_desc *el3_lp_descs; 2350 2351 /* Perform initial validation of the Logical Partitions. */ 2352 rc = el3_sp_desc_validate(); 2353 if (rc != 0) { 2354 ERROR("Logical Partition validation failed!\n"); 2355 return rc; 2356 } 2357 2358 el3_lp_descs = get_el3_lp_array(); 2359 2360 INFO("Logical Secure Partition init start.\n"); 2361 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) { 2362 rc = el3_lp_descs[i].init(); 2363 if (rc != 0) { 2364 ERROR("Logical SP (0x%x) Failed to Initialize\n", 2365 el3_lp_descs[i].sp_id); 2366 return rc; 2367 } 2368 VERBOSE("Logical SP (0x%x) Initialized\n", 2369 el3_lp_descs[i].sp_id); 2370 } 2371 2372 INFO("Logical Secure Partition init completed.\n"); 2373 2374 return rc; 2375 } 2376 2377 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec) 2378 { 2379 uint64_t rc; 2380 2381 assert(ec != NULL); 2382 2383 /* Assign the context of the SP to this CPU */ 2384 cm_set_context(&(ec->cpu_ctx), SECURE); 2385 2386 /* Restore the context assigned above */ 2387 cm_el1_sysregs_context_restore(SECURE); 2388 cm_set_next_eret_context(SECURE); 2389 2390 /* Invalidate TLBs at EL1. */ 2391 tlbivmalle1(); 2392 dsbish(); 2393 2394 /* Enter Secure Partition */ 2395 rc = spm_secure_partition_enter(&ec->c_rt_ctx); 2396 2397 /* Save secure state */ 2398 cm_el1_sysregs_context_save(SECURE); 2399 2400 return rc; 2401 } 2402 2403 /******************************************************************************* 2404 * SPMC Helper Functions. 2405 ******************************************************************************/ 2406 static int32_t sp_init(void) 2407 { 2408 uint64_t rc; 2409 struct secure_partition_desc *sp; 2410 struct sp_exec_ctx *ec; 2411 2412 sp = spmc_get_current_sp_ctx(); 2413 ec = spmc_get_sp_ec(sp); 2414 ec->rt_model = RT_MODEL_INIT; 2415 ec->rt_state = RT_STATE_RUNNING; 2416 2417 INFO("Secure Partition (0x%x) init start.\n", sp->sp_id); 2418 2419 rc = spmc_sp_synchronous_entry(ec); 2420 if (rc != 0) { 2421 /* Indicate SP init was not successful. */ 2422 ERROR("SP (0x%x) failed to initialize (%lu).\n", 2423 sp->sp_id, rc); 2424 return 0; 2425 } 2426 2427 ec->rt_state = RT_STATE_WAITING; 2428 INFO("Secure Partition initialized.\n"); 2429 2430 return 1; 2431 } 2432 2433 static void initalize_sp_descs(void) 2434 { 2435 struct secure_partition_desc *sp; 2436 2437 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) { 2438 sp = &sp_desc[i]; 2439 sp->sp_id = INV_SP_ID; 2440 sp->mailbox.rx_buffer = NULL; 2441 sp->mailbox.tx_buffer = NULL; 2442 sp->mailbox.state = MAILBOX_STATE_EMPTY; 2443 sp->secondary_ep = 0; 2444 } 2445 } 2446 2447 static void initalize_ns_ep_descs(void) 2448 { 2449 struct ns_endpoint_desc *ns_ep; 2450 2451 for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) { 2452 ns_ep = &ns_ep_desc[i]; 2453 /* 2454 * Clashes with the Hypervisor ID but will not be a 2455 * problem in practice. 2456 */ 2457 ns_ep->ns_ep_id = 0; 2458 ns_ep->ffa_version = 0; 2459 ns_ep->mailbox.rx_buffer = NULL; 2460 ns_ep->mailbox.tx_buffer = NULL; 2461 ns_ep->mailbox.state = MAILBOX_STATE_EMPTY; 2462 } 2463 } 2464 2465 /******************************************************************************* 2466 * Initialize SPMC attributes for the SPMD. 2467 ******************************************************************************/ 2468 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs) 2469 { 2470 spmc_attrs->major_version = FFA_VERSION_SPMC_MAJOR; 2471 spmc_attrs->minor_version = FFA_VERSION_SPMC_MINOR; 2472 spmc_attrs->exec_state = MODE_RW_64; 2473 spmc_attrs->spmc_id = FFA_SPMC_ID; 2474 } 2475 2476 /******************************************************************************* 2477 * Initialize contexts of all Secure Partitions. 2478 ******************************************************************************/ 2479 int32_t spmc_setup(void) 2480 { 2481 int32_t ret; 2482 uint32_t flags; 2483 2484 /* Initialize endpoint descriptors */ 2485 initalize_sp_descs(); 2486 initalize_ns_ep_descs(); 2487 2488 /* 2489 * Retrieve the information of the datastore for tracking shared memory 2490 * requests allocated by platform code and zero the region if available. 2491 */ 2492 ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data, 2493 &spmc_shmem_obj_state.data_size); 2494 if (ret != 0) { 2495 ERROR("Failed to obtain memory descriptor backing store!\n"); 2496 return ret; 2497 } 2498 memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size); 2499 2500 /* Setup logical SPs. */ 2501 ret = logical_sp_init(); 2502 if (ret != 0) { 2503 ERROR("Failed to initialize Logical Partitions.\n"); 2504 return ret; 2505 } 2506 2507 /* Perform physical SP setup. */ 2508 2509 /* Disable MMU at EL1 (initialized by BL2) */ 2510 disable_mmu_icache_el1(); 2511 2512 /* Initialize context of the SP */ 2513 INFO("Secure Partition context setup start.\n"); 2514 2515 ret = find_and_prepare_sp_context(); 2516 if (ret != 0) { 2517 ERROR("Error in SP finding and context preparation.\n"); 2518 return ret; 2519 } 2520 2521 /* Register power management hooks with PSCI */ 2522 psci_register_spd_pm_hook(&spmc_pm); 2523 2524 /* 2525 * Register an interrupt handler for S-EL1 interrupts 2526 * when generated during code executing in the 2527 * non-secure state. 2528 */ 2529 flags = 0; 2530 set_interrupt_rm_flag(flags, NON_SECURE); 2531 ret = register_interrupt_type_handler(INTR_TYPE_S_EL1, 2532 spmc_sp_interrupt_handler, 2533 flags); 2534 if (ret != 0) { 2535 ERROR("Failed to register interrupt handler! (%d)\n", ret); 2536 panic(); 2537 } 2538 2539 /* Register init function for deferred init. */ 2540 bl31_register_bl32_init(&sp_init); 2541 2542 INFO("Secure Partition setup done.\n"); 2543 2544 return 0; 2545 } 2546 2547 /******************************************************************************* 2548 * Secure Partition Manager SMC handler. 2549 ******************************************************************************/ 2550 uint64_t spmc_smc_handler(uint32_t smc_fid, 2551 bool secure_origin, 2552 uint64_t x1, 2553 uint64_t x2, 2554 uint64_t x3, 2555 uint64_t x4, 2556 void *cookie, 2557 void *handle, 2558 uint64_t flags) 2559 { 2560 switch (smc_fid) { 2561 2562 case FFA_VERSION: 2563 return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3, 2564 x4, cookie, handle, flags); 2565 2566 case FFA_SPM_ID_GET: 2567 return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2, 2568 x3, x4, cookie, handle, flags); 2569 2570 case FFA_ID_GET: 2571 return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3, 2572 x4, cookie, handle, flags); 2573 2574 case FFA_FEATURES: 2575 return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3, 2576 x4, cookie, handle, flags); 2577 2578 case FFA_SECONDARY_EP_REGISTER_SMC64: 2579 return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1, 2580 x2, x3, x4, cookie, handle, 2581 flags); 2582 2583 case FFA_MSG_SEND_DIRECT_REQ_SMC32: 2584 case FFA_MSG_SEND_DIRECT_REQ_SMC64: 2585 case FFA_MSG_SEND_DIRECT_REQ2_SMC64: 2586 return direct_req_smc_handler(smc_fid, secure_origin, x1, x2, 2587 x3, x4, cookie, handle, flags); 2588 2589 case FFA_MSG_SEND_DIRECT_RESP_SMC32: 2590 case FFA_MSG_SEND_DIRECT_RESP_SMC64: 2591 case FFA_MSG_SEND_DIRECT_RESP2_SMC64: 2592 return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2, 2593 x3, x4, cookie, handle, flags); 2594 2595 case FFA_RXTX_MAP_SMC32: 2596 case FFA_RXTX_MAP_SMC64: 2597 return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4, 2598 cookie, handle, flags); 2599 2600 case FFA_RXTX_UNMAP: 2601 return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3, 2602 x4, cookie, handle, flags); 2603 2604 case FFA_PARTITION_INFO_GET: 2605 return partition_info_get_handler(smc_fid, secure_origin, x1, 2606 x2, x3, x4, cookie, handle, 2607 flags); 2608 2609 case FFA_RX_RELEASE: 2610 return rx_release_handler(smc_fid, secure_origin, x1, x2, x3, 2611 x4, cookie, handle, flags); 2612 2613 case FFA_MSG_WAIT: 2614 return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4, 2615 cookie, handle, flags); 2616 2617 case FFA_ERROR: 2618 return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4, 2619 cookie, handle, flags); 2620 2621 case FFA_MSG_RUN: 2622 return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4, 2623 cookie, handle, flags); 2624 2625 case FFA_MEM_SHARE_SMC32: 2626 case FFA_MEM_SHARE_SMC64: 2627 case FFA_MEM_LEND_SMC32: 2628 case FFA_MEM_LEND_SMC64: 2629 return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4, 2630 cookie, handle, flags); 2631 2632 case FFA_MEM_FRAG_TX: 2633 return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3, 2634 x4, cookie, handle, flags); 2635 2636 case FFA_MEM_FRAG_RX: 2637 return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3, 2638 x4, cookie, handle, flags); 2639 2640 case FFA_MEM_RETRIEVE_REQ_SMC32: 2641 case FFA_MEM_RETRIEVE_REQ_SMC64: 2642 return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2, 2643 x3, x4, cookie, handle, flags); 2644 2645 case FFA_MEM_RELINQUISH: 2646 return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2, 2647 x3, x4, cookie, handle, flags); 2648 2649 case FFA_MEM_RECLAIM: 2650 return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3, 2651 x4, cookie, handle, flags); 2652 case FFA_CONSOLE_LOG_SMC32: 2653 case FFA_CONSOLE_LOG_SMC64: 2654 return spmc_ffa_console_log(smc_fid, secure_origin, x1, x2, x3, 2655 x4, cookie, handle, flags); 2656 2657 case FFA_MEM_PERM_GET_SMC32: 2658 case FFA_MEM_PERM_GET_SMC64: 2659 return ffa_mem_perm_get_handler(smc_fid, secure_origin, x1, x2, 2660 x3, x4, cookie, handle, flags); 2661 2662 case FFA_MEM_PERM_SET_SMC32: 2663 case FFA_MEM_PERM_SET_SMC64: 2664 return ffa_mem_perm_set_handler(smc_fid, secure_origin, x1, x2, 2665 x3, x4, cookie, handle, flags); 2666 2667 default: 2668 WARN("Unsupported FF-A call 0x%08x.\n", smc_fid); 2669 break; 2670 } 2671 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED); 2672 } 2673 2674 /******************************************************************************* 2675 * This function is the handler registered for S-EL1 interrupts by the SPMC. It 2676 * validates the interrupt and upon success arranges entry into the SP for 2677 * handling the interrupt. 2678 ******************************************************************************/ 2679 static uint64_t spmc_sp_interrupt_handler(uint32_t id, 2680 uint32_t flags, 2681 void *handle, 2682 void *cookie) 2683 { 2684 struct secure_partition_desc *sp = spmc_get_current_sp_ctx(); 2685 struct sp_exec_ctx *ec; 2686 uint32_t linear_id = plat_my_core_pos(); 2687 2688 /* Sanity check for a NULL pointer dereference. */ 2689 assert(sp != NULL); 2690 2691 /* Check the security state when the exception was generated. */ 2692 assert(get_interrupt_src_ss(flags) == NON_SECURE); 2693 2694 /* Panic if not an S-EL1 Partition. */ 2695 if (sp->runtime_el != S_EL1) { 2696 ERROR("Interrupt received for a non S-EL1 SP on core%u.\n", 2697 linear_id); 2698 panic(); 2699 } 2700 2701 /* Obtain a reference to the SP execution context. */ 2702 ec = spmc_get_sp_ec(sp); 2703 2704 /* Ensure that the execution context is in waiting state else panic. */ 2705 if (ec->rt_state != RT_STATE_WAITING) { 2706 ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n", 2707 linear_id, RT_STATE_WAITING, ec->rt_state); 2708 panic(); 2709 } 2710 2711 /* Update the runtime model and state of the partition. */ 2712 ec->rt_model = RT_MODEL_INTR; 2713 ec->rt_state = RT_STATE_RUNNING; 2714 2715 VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id); 2716 2717 /* 2718 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not 2719 * populated as the SP can determine this by itself. 2720 * The flags field is forced to 0 mainly to pass the SVE hint bit 2721 * cleared for consumption by the lower EL. 2722 */ 2723 return spmd_smc_switch_state(FFA_INTERRUPT, false, 2724 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 2725 FFA_PARAM_MBZ, FFA_PARAM_MBZ, 2726 handle, 0ULL, sp->ffa_version); 2727 } 2728