1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2025, Linaro Limited. 4 * Copyright (c) 2019-2024, Arm Limited. All rights reserved. 5 */ 6 7 #include <assert.h> 8 #include <ffa.h> 9 #include <initcall.h> 10 #include <io.h> 11 #include <kernel/dt.h> 12 #include <kernel/interrupt.h> 13 #include <kernel/notif.h> 14 #include <kernel/panic.h> 15 #include <kernel/secure_partition.h> 16 #include <kernel/spinlock.h> 17 #include <kernel/spmc_sp_handler.h> 18 #include <kernel/tee_misc.h> 19 #include <kernel/thread.h> 20 #include <kernel/thread_private.h> 21 #include <kernel/thread_spmc.h> 22 #include <kernel/virtualization.h> 23 #include <libfdt.h> 24 #include <mm/core_mmu.h> 25 #include <mm/mobj.h> 26 #include <optee_ffa.h> 27 #include <optee_msg.h> 28 #include <optee_rpc_cmd.h> 29 #include <sm/optee_smc.h> 30 #include <string.h> 31 #include <sys/queue.h> 32 #include <tee/entry_std.h> 33 #include <tee/uuid.h> 34 #include <tee_api_types.h> 35 #include <types_ext.h> 36 #include <util.h> 37 38 #if defined(CFG_CORE_SEL1_SPMC) 39 struct mem_op_state { 40 bool mem_share; 41 struct mobj_ffa *mf; 42 unsigned int page_count; 43 unsigned int region_count; 44 unsigned int current_page_idx; 45 }; 46 47 struct mem_frag_state { 48 struct mem_op_state op; 49 tee_mm_entry_t *mm; 50 unsigned int frag_offset; 51 SLIST_ENTRY(mem_frag_state) link; 52 }; 53 #endif 54 55 struct notif_vm_bitmap { 56 bool initialized; 57 int do_bottom_half_value; 58 uint64_t pending; 59 uint64_t bound; 60 }; 61 62 STAILQ_HEAD(spmc_lsp_desc_head, spmc_lsp_desc); 63 64 static struct spmc_lsp_desc_head lsp_head __nex_data = 65 STAILQ_HEAD_INITIALIZER(lsp_head); 66 67 static unsigned int spmc_notif_lock __nex_data = SPINLOCK_UNLOCK; 68 static bool spmc_notif_is_ready __nex_bss; 69 static int notif_intid __nex_data __maybe_unused = -1; 70 71 /* Id used to look up the guest specific struct notif_vm_bitmap */ 72 static unsigned int notif_vm_bitmap_id __nex_bss; 73 /* Notification state when ns-virtualization isn't enabled */ 74 static struct notif_vm_bitmap default_notif_vm_bitmap; 75 76 /* Initialized in spmc_init() below */ 77 static struct spmc_lsp_desc optee_core_lsp; 78 #ifdef CFG_CORE_SEL1_SPMC 79 /* 80 * Representation of the internal SPMC when OP-TEE is the S-EL1 SPMC. 81 * Initialized in spmc_init() below. 82 */ 83 static struct spmc_lsp_desc optee_spmc_lsp; 84 /* FF-A ID of the SPMD. This is only valid when OP-TEE is the S-EL1 SPMC. */ 85 static uint16_t spmd_id __nex_bss; 86 87 /* 88 * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized. 89 * 90 * struct ffa_rxtx::spin_lock protects the variables below from concurrent 91 * access this includes the use of content of struct ffa_rxtx::rx and 92 * @frag_state_head. 93 * 94 * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct 95 * ffa_rxtx::tx and false when it is owned by normal world. 96 * 97 * Note that we can't prevent normal world from updating the content of 98 * these buffers so we must always be careful when reading. while we hold 99 * the lock. 100 */ 101 102 static struct ffa_rxtx my_rxtx __nex_bss; 103 104 static bool is_nw_buf(struct ffa_rxtx *rxtx) 105 { 106 return rxtx == &my_rxtx; 107 } 108 109 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head = 110 SLIST_HEAD_INITIALIZER(&frag_state_head); 111 112 #else 113 /* FF-A ID of the external SPMC */ 114 static uint16_t spmc_id __nex_bss; 115 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss; 116 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss; 117 static struct ffa_rxtx my_rxtx __nex_data = { 118 .rx = __rx_buf, 119 .tx = __tx_buf, 120 .size = sizeof(__rx_buf), 121 }; 122 #endif 123 124 bool spmc_is_reserved_id(uint16_t id) 125 { 126 #ifdef CFG_CORE_SEL1_SPMC 127 return id == spmd_id; 128 #else 129 return id == spmc_id; 130 #endif 131 } 132 133 struct spmc_lsp_desc *spmc_find_lsp_by_sp_id(uint16_t sp_id) 134 { 135 struct spmc_lsp_desc *desc = NULL; 136 137 STAILQ_FOREACH(desc, &lsp_head, link) 138 if (desc->sp_id == sp_id) 139 return desc; 140 141 return NULL; 142 } 143 144 static uint32_t swap_src_dst(uint32_t src_dst) 145 { 146 return (src_dst >> 16) | (src_dst << 16); 147 } 148 149 static uint16_t get_sender_id(uint32_t src_dst) 150 { 151 return src_dst >> 16; 152 } 153 154 void spmc_set_args(struct thread_smc_1_2_regs *args, uint32_t fid, 155 uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4, 156 uint32_t w5) 157 { 158 *args = (struct thread_smc_1_2_regs){ 159 .a0 = fid, 160 .a1 = src_dst, 161 .a2 = w2, 162 .a3 = w3, 163 .a4 = w4, 164 .a5 = w5, 165 }; 166 } 167 168 static void set_simple_ret_val(struct thread_smc_1_2_regs *args, int ffa_ret) 169 { 170 if (ffa_ret) 171 spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0); 172 else 173 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0); 174 } 175 176 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx) 177 { 178 uint32_t major_vers = FFA_GET_MAJOR_VERSION(vers); 179 uint32_t minor_vers = FFA_GET_MINOR_VERSION(vers); 180 uint32_t my_vers = FFA_VERSION_1_2; 181 uint32_t my_major_vers = 0; 182 uint32_t my_minor_vers = 0; 183 184 my_major_vers = FFA_GET_MAJOR_VERSION(my_vers); 185 my_minor_vers = FFA_GET_MINOR_VERSION(my_vers); 186 187 /* 188 * No locking, if the caller does concurrent calls to this it's 189 * only making a mess for itself. We must be able to renegotiate 190 * the FF-A version in order to support differing versions between 191 * the loader and the driver. 192 * 193 * Callers should use the version requested if we return a matching 194 * major version and a matching or larger minor version. The caller 195 * should downgrade to our minor version if our minor version is 196 * smaller. Regardless, always return our version as recommended by 197 * the specification. 198 */ 199 if (major_vers == my_major_vers) { 200 if (minor_vers > my_minor_vers) 201 rxtx->ffa_vers = my_vers; 202 else 203 rxtx->ffa_vers = vers; 204 } 205 206 return my_vers; 207 } 208 209 static bool is_ffa_success(uint32_t fid) 210 { 211 #ifdef ARM64 212 if (fid == FFA_SUCCESS_64) 213 return true; 214 #endif 215 return fid == FFA_SUCCESS_32; 216 } 217 218 static int32_t get_ffa_ret_code(const struct thread_smc_args *args) 219 { 220 if (is_ffa_success(args->a0)) 221 return FFA_OK; 222 if (args->a0 == FFA_ERROR && args->a2) 223 return args->a2; 224 return FFA_NOT_SUPPORTED; 225 } 226 227 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2, 228 unsigned long a3, unsigned long a4) 229 { 230 struct thread_smc_args args = { 231 .a0 = fid, 232 .a1 = a1, 233 .a2 = a2, 234 .a3 = a3, 235 .a4 = a4, 236 }; 237 238 thread_smccc(&args); 239 240 return get_ffa_ret_code(&args); 241 } 242 243 static int __maybe_unused ffa_features(uint32_t id) 244 { 245 return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0); 246 } 247 248 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src, 249 uint32_t flags, uint64_t bitmap) 250 { 251 return ffa_simple_call(FFA_NOTIFICATION_SET, 252 SHIFT_U32(src, 16) | dst, flags, 253 low32_from_64(bitmap), high32_from_64(bitmap)); 254 } 255 256 #if defined(CFG_CORE_SEL1_SPMC) 257 static void handle_features(struct thread_smc_1_2_regs *args) 258 { 259 uint32_t ret_fid = FFA_ERROR; 260 uint32_t ret_w2 = FFA_NOT_SUPPORTED; 261 262 switch (args->a1) { 263 case FFA_FEATURE_SCHEDULE_RECV_INTR: 264 if (spmc_notif_is_ready) { 265 ret_fid = FFA_SUCCESS_32; 266 ret_w2 = notif_intid; 267 } 268 break; 269 270 #ifdef ARM64 271 case FFA_RXTX_MAP_64: 272 #endif 273 case FFA_RXTX_MAP_32: 274 ret_fid = FFA_SUCCESS_32; 275 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */ 276 break; 277 #ifdef ARM64 278 case FFA_MEM_SHARE_64: 279 #endif 280 case FFA_MEM_SHARE_32: 281 ret_fid = FFA_SUCCESS_32; 282 /* 283 * Partition manager supports transmission of a memory 284 * transaction descriptor in a buffer dynamically allocated 285 * by the endpoint. 286 */ 287 ret_w2 = BIT(0); 288 break; 289 290 case FFA_ERROR: 291 case FFA_VERSION: 292 case FFA_SUCCESS_32: 293 #ifdef ARM64 294 case FFA_SUCCESS_64: 295 #endif 296 case FFA_FEATURES: 297 case FFA_SPM_ID_GET: 298 case FFA_MEM_FRAG_TX: 299 case FFA_MEM_RECLAIM: 300 case FFA_MSG_SEND_DIRECT_REQ_64: 301 case FFA_MSG_SEND_DIRECT_REQ_32: 302 case FFA_INTERRUPT: 303 case FFA_PARTITION_INFO_GET: 304 case FFA_RXTX_UNMAP: 305 case FFA_RX_RELEASE: 306 case FFA_FEATURE_MANAGED_EXIT_INTR: 307 case FFA_NOTIFICATION_BITMAP_CREATE: 308 case FFA_NOTIFICATION_BITMAP_DESTROY: 309 case FFA_NOTIFICATION_BIND: 310 case FFA_NOTIFICATION_UNBIND: 311 case FFA_NOTIFICATION_SET: 312 case FFA_NOTIFICATION_GET: 313 case FFA_NOTIFICATION_INFO_GET_32: 314 #ifdef ARM64 315 case FFA_NOTIFICATION_INFO_GET_64: 316 #endif 317 ret_fid = FFA_SUCCESS_32; 318 ret_w2 = FFA_PARAM_MBZ; 319 break; 320 default: 321 break; 322 } 323 324 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ, 325 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 326 } 327 328 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret) 329 { 330 tee_mm_entry_t *mm = NULL; 331 332 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz)) 333 return FFA_INVALID_PARAMETERS; 334 335 mm = tee_mm_alloc(&core_virt_shm_pool, sz); 336 if (!mm) 337 return FFA_NO_MEMORY; 338 339 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa, 340 sz / SMALL_PAGE_SIZE, 341 MEM_AREA_NSEC_SHM)) { 342 tee_mm_free(mm); 343 return FFA_INVALID_PARAMETERS; 344 } 345 346 *va_ret = (void *)tee_mm_get_smem(mm); 347 return 0; 348 } 349 350 void spmc_handle_spm_id_get(struct thread_smc_1_2_regs *args) 351 { 352 spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, optee_spmc_lsp.sp_id, 353 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 354 } 355 356 static void unmap_buf(void *va, size_t sz) 357 { 358 tee_mm_entry_t *mm = tee_mm_find(&core_virt_shm_pool, (vaddr_t)va); 359 360 assert(mm); 361 core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE); 362 tee_mm_free(mm); 363 } 364 365 void spmc_handle_rxtx_map(struct thread_smc_1_2_regs *args, 366 struct ffa_rxtx *rxtx) 367 { 368 int rc = 0; 369 unsigned int sz = 0; 370 paddr_t rx_pa = 0; 371 paddr_t tx_pa = 0; 372 void *rx = NULL; 373 void *tx = NULL; 374 375 cpu_spin_lock(&rxtx->spinlock); 376 377 if (args->a3 & GENMASK_64(63, 6)) { 378 rc = FFA_INVALID_PARAMETERS; 379 goto out; 380 } 381 382 sz = args->a3 * SMALL_PAGE_SIZE; 383 if (!sz) { 384 rc = FFA_INVALID_PARAMETERS; 385 goto out; 386 } 387 /* TX/RX are swapped compared to the caller */ 388 tx_pa = args->a2; 389 rx_pa = args->a1; 390 391 if (rxtx->size) { 392 rc = FFA_DENIED; 393 goto out; 394 } 395 396 /* 397 * If the buffer comes from a SP the address is virtual and already 398 * mapped. 399 */ 400 if (is_nw_buf(rxtx)) { 401 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 402 enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM; 403 bool tx_alloced = false; 404 405 /* 406 * With virtualization we establish this mapping in 407 * the nexus mapping which then is replicated to 408 * each partition. 409 * 410 * This means that this mapping must be done before 411 * any partition is created and then must not be 412 * changed. 413 */ 414 415 /* 416 * core_mmu_add_mapping() may reuse previous 417 * mappings. First check if there's any mappings to 418 * reuse so we know how to clean up in case of 419 * failure. 420 */ 421 tx = phys_to_virt(tx_pa, mt, sz); 422 rx = phys_to_virt(rx_pa, mt, sz); 423 if (!tx) { 424 tx = core_mmu_add_mapping(mt, tx_pa, sz); 425 if (!tx) { 426 rc = FFA_NO_MEMORY; 427 goto out; 428 } 429 tx_alloced = true; 430 } 431 if (!rx) 432 rx = core_mmu_add_mapping(mt, rx_pa, sz); 433 434 if (!rx) { 435 if (tx_alloced && tx) 436 core_mmu_remove_mapping(mt, tx, sz); 437 rc = FFA_NO_MEMORY; 438 goto out; 439 } 440 } else { 441 rc = map_buf(tx_pa, sz, &tx); 442 if (rc) 443 goto out; 444 rc = map_buf(rx_pa, sz, &rx); 445 if (rc) { 446 unmap_buf(tx, sz); 447 goto out; 448 } 449 } 450 rxtx->tx = tx; 451 rxtx->rx = rx; 452 } else { 453 if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) { 454 rc = FFA_INVALID_PARAMETERS; 455 goto out; 456 } 457 458 if (!virt_to_phys((void *)tx_pa) || 459 !virt_to_phys((void *)rx_pa)) { 460 rc = FFA_INVALID_PARAMETERS; 461 goto out; 462 } 463 464 rxtx->tx = (void *)tx_pa; 465 rxtx->rx = (void *)rx_pa; 466 } 467 468 rxtx->size = sz; 469 rxtx->tx_is_mine = true; 470 DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx); 471 DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx); 472 out: 473 cpu_spin_unlock(&rxtx->spinlock); 474 set_simple_ret_val(args, rc); 475 } 476 477 void spmc_handle_rxtx_unmap(struct thread_smc_1_2_regs *args, 478 struct ffa_rxtx *rxtx) 479 { 480 int rc = FFA_INVALID_PARAMETERS; 481 482 cpu_spin_lock(&rxtx->spinlock); 483 484 if (!rxtx->size) 485 goto out; 486 487 /* 488 * We don't unmap the SP memory as the SP might still use it. 489 * We avoid to make changes to nexus mappings at this stage since 490 * there currently isn't a way to replicate those changes to all 491 * partitions. 492 */ 493 if (is_nw_buf(rxtx) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 494 unmap_buf(rxtx->rx, rxtx->size); 495 unmap_buf(rxtx->tx, rxtx->size); 496 } 497 rxtx->size = 0; 498 rxtx->rx = NULL; 499 rxtx->tx = NULL; 500 rc = 0; 501 out: 502 cpu_spin_unlock(&rxtx->spinlock); 503 set_simple_ret_val(args, rc); 504 } 505 506 void spmc_handle_rx_release(struct thread_smc_1_2_regs *args, 507 struct ffa_rxtx *rxtx) 508 { 509 int rc = 0; 510 511 cpu_spin_lock(&rxtx->spinlock); 512 /* The senders RX is our TX */ 513 if (!rxtx->size || rxtx->tx_is_mine) { 514 rc = FFA_DENIED; 515 } else { 516 rc = 0; 517 rxtx->tx_is_mine = true; 518 } 519 cpu_spin_unlock(&rxtx->spinlock); 520 521 set_simple_ret_val(args, rc); 522 } 523 524 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 525 { 526 return !w0 && !w1 && !w2 && !w3; 527 } 528 529 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen, 530 size_t idx, uint16_t endpoint_id, 531 uint16_t execution_context, 532 uint32_t part_props, 533 const uint32_t uuid_words[4]) 534 { 535 struct ffa_partition_info_x *fpi = NULL; 536 size_t fpi_size = sizeof(*fpi); 537 538 if (ffa_vers >= FFA_VERSION_1_1) 539 fpi_size += FFA_UUID_SIZE; 540 541 if ((idx + 1) * fpi_size > blen) 542 return TEE_ERROR_OUT_OF_MEMORY; 543 544 fpi = (void *)((vaddr_t)buf + idx * fpi_size); 545 fpi->id = endpoint_id; 546 /* Number of execution contexts implemented by this partition */ 547 fpi->execution_context = execution_context; 548 549 fpi->partition_properties = part_props; 550 551 /* In FF-A 1.0 only bits [2:0] are defined, let's mask others */ 552 if (ffa_vers < FFA_VERSION_1_1) 553 fpi->partition_properties &= FFA_PART_PROP_DIRECT_REQ_RECV | 554 FFA_PART_PROP_DIRECT_REQ_SEND | 555 FFA_PART_PROP_INDIRECT_MSGS; 556 557 if (ffa_vers >= FFA_VERSION_1_1) { 558 if (uuid_words) 559 memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE); 560 else 561 memset(fpi->uuid, 0, FFA_UUID_SIZE); 562 } 563 564 return TEE_SUCCESS; 565 } 566 567 static TEE_Result lsp_partition_info_get(uint32_t ffa_vers, void *buf, 568 size_t buf_size, size_t *elem_count, 569 const uint32_t uuid_words[4], 570 bool count_only) 571 { 572 struct spmc_lsp_desc *desc = NULL; 573 TEE_Result res = TEE_SUCCESS; 574 size_t c = *elem_count; 575 576 STAILQ_FOREACH(desc, &lsp_head, link) { 577 /* 578 * LSPs (OP-TEE SPMC) without an assigned UUID are not 579 * proper LSPs and shouldn't be reported here. 580 */ 581 if (is_nil_uuid(desc->uuid_words[0], desc->uuid_words[1], 582 desc->uuid_words[2], desc->uuid_words[3])) 583 continue; 584 585 if (uuid_words && memcmp(uuid_words, desc->uuid_words, 586 sizeof(desc->uuid_words))) 587 continue; 588 589 if (!count_only && !res) 590 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size, 591 c, desc->sp_id, 592 CFG_TEE_CORE_NB_CORE, 593 desc->properties, 594 desc->uuid_words); 595 c++; 596 } 597 598 *elem_count = c; 599 600 return res; 601 } 602 603 void spmc_handle_partition_info_get(struct thread_smc_1_2_regs *args, 604 struct ffa_rxtx *rxtx) 605 { 606 TEE_Result res = TEE_SUCCESS; 607 uint32_t ret_fid = FFA_ERROR; 608 uint32_t fpi_size = 0; 609 uint32_t rc = 0; 610 bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG; 611 uint32_t uuid_words[4] = { args->a1, args->a2, args->a3, args->a4, }; 612 uint32_t *uuid = uuid_words; 613 size_t count = 0; 614 615 if (!count_only) { 616 cpu_spin_lock(&rxtx->spinlock); 617 618 if (!rxtx->size || !rxtx->tx_is_mine) { 619 rc = FFA_BUSY; 620 goto out; 621 } 622 } 623 624 if (is_nil_uuid(uuid[0], uuid[1], uuid[2], uuid[3])) 625 uuid = NULL; 626 627 if (lsp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size, 628 &count, uuid, count_only)) { 629 ret_fid = FFA_ERROR; 630 rc = FFA_INVALID_PARAMETERS; 631 goto out; 632 } 633 if (IS_ENABLED(CFG_SECURE_PARTITION)) { 634 res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, 635 rxtx->size, uuid, &count, 636 count_only); 637 if (res != TEE_SUCCESS) { 638 ret_fid = FFA_ERROR; 639 rc = FFA_INVALID_PARAMETERS; 640 goto out; 641 } 642 } 643 644 rc = count; 645 ret_fid = FFA_SUCCESS_32; 646 out: 647 if (ret_fid == FFA_SUCCESS_32 && !count_only && 648 rxtx->ffa_vers >= FFA_VERSION_1_1) 649 fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE; 650 651 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size, 652 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 653 if (!count_only) { 654 rxtx->tx_is_mine = false; 655 cpu_spin_unlock(&rxtx->spinlock); 656 } 657 } 658 659 static void spmc_handle_run(struct thread_smc_1_2_regs *args) 660 { 661 uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1); 662 uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1); 663 uint32_t rc = FFA_INVALID_PARAMETERS; 664 665 /* 666 * OP-TEE core threads are only preemted using controlled exit so 667 * FFA_RUN mustn't be used to resume such threads. 668 * 669 * The OP-TEE SPMC is not preemted at all, it's an error to try to 670 * resume that ID. 671 */ 672 if (spmc_find_lsp_by_sp_id(endpoint)) 673 goto out; 674 675 /* 676 * The endpoint should be a S-EL0 SP, try to resume the SP from 677 * preempted into busy state. 678 */ 679 rc = spmc_sp_resume_from_preempted(endpoint); 680 if (rc) 681 goto out; 682 thread_resume_from_rpc(thread_id, 0, 0, 0, 0); 683 /* 684 * thread_resume_from_rpc() only returns if the thread_id 685 * is invalid. 686 */ 687 rc = FFA_INVALID_PARAMETERS; 688 689 out: 690 set_simple_ret_val(args, rc); 691 } 692 #endif /*CFG_CORE_SEL1_SPMC*/ 693 694 static struct notif_vm_bitmap *get_notif_vm_bitmap(struct guest_partition *prtn, 695 uint16_t vm_id) 696 { 697 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 698 if (!prtn) 699 return NULL; 700 assert(vm_id == virt_get_guest_id(prtn)); 701 return virt_get_guest_spec_data(prtn, notif_vm_bitmap_id); 702 } 703 if (vm_id) 704 return NULL; 705 return &default_notif_vm_bitmap; 706 } 707 708 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value, 709 uint16_t vm_id) 710 { 711 struct guest_partition *prtn = NULL; 712 struct notif_vm_bitmap *nvb = NULL; 713 uint32_t old_itr_status = 0; 714 uint32_t res = 0; 715 716 if (!spmc_notif_is_ready) { 717 /* 718 * This should never happen, not if normal world respects the 719 * exchanged capabilities. 720 */ 721 EMSG("Asynchronous notifications are not ready"); 722 return TEE_ERROR_NOT_IMPLEMENTED; 723 } 724 725 if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) { 726 EMSG("Invalid bottom half value %"PRIu32, bottom_half_value); 727 return TEE_ERROR_BAD_PARAMETERS; 728 } 729 730 prtn = virt_get_guest(vm_id); 731 nvb = get_notif_vm_bitmap(prtn, vm_id); 732 if (!nvb) { 733 res = TEE_ERROR_BAD_PARAMETERS; 734 goto out; 735 } 736 737 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 738 nvb->do_bottom_half_value = bottom_half_value; 739 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 740 741 notif_deliver_atomic_event(NOTIF_EVENT_STARTED, vm_id); 742 res = TEE_SUCCESS; 743 out: 744 virt_put_guest(prtn); 745 return res; 746 } 747 748 static uint32_t get_direct_resp_fid(uint32_t fid) 749 { 750 assert(fid == FFA_MSG_SEND_DIRECT_REQ_64 || 751 fid == FFA_MSG_SEND_DIRECT_REQ_32); 752 753 if (OPTEE_SMC_IS_64(fid)) 754 return FFA_MSG_SEND_DIRECT_RESP_64; 755 return FFA_MSG_SEND_DIRECT_RESP_32; 756 } 757 758 static void handle_yielding_call(struct thread_smc_1_2_regs *args) 759 { 760 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0); 761 TEE_Result res = TEE_SUCCESS; 762 763 thread_check_canaries(); 764 765 #ifdef ARM64 766 /* Saving this for an eventual RPC */ 767 thread_get_core_local()->direct_resp_fid = direct_resp_fid; 768 #endif 769 770 if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) { 771 /* Note connection to struct thread_rpc_arg::ret */ 772 thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6, 773 0); 774 res = TEE_ERROR_BAD_PARAMETERS; 775 } else { 776 thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5, 777 args->a6, args->a7); 778 res = TEE_ERROR_BUSY; 779 } 780 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 781 0, res, 0, 0); 782 } 783 784 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5) 785 { 786 uint64_t cookie = reg_pair_to_64(a5, a4); 787 uint32_t res = 0; 788 789 res = mobj_ffa_unregister_by_cookie(cookie); 790 switch (res) { 791 case TEE_SUCCESS: 792 case TEE_ERROR_ITEM_NOT_FOUND: 793 return 0; 794 case TEE_ERROR_BUSY: 795 EMSG("res %#"PRIx32, res); 796 return FFA_BUSY; 797 default: 798 EMSG("res %#"PRIx32, res); 799 return FFA_INVALID_PARAMETERS; 800 } 801 } 802 803 static void handle_blocking_call(struct thread_smc_1_2_regs *args) 804 { 805 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0); 806 uint32_t sec_caps = 0; 807 808 switch (args->a3) { 809 case OPTEE_FFA_GET_API_VERSION: 810 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 811 OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR, 812 0); 813 break; 814 case OPTEE_FFA_GET_OS_VERSION: 815 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 816 CFG_OPTEE_REVISION_MAJOR, 817 CFG_OPTEE_REVISION_MINOR, 818 TEE_IMPL_GIT_SHA1 >> 32); 819 break; 820 case OPTEE_FFA_EXCHANGE_CAPABILITIES: 821 sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET; 822 if (spmc_notif_is_ready) 823 sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF; 824 if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP)) 825 sec_caps |= OPTEE_FFA_SEC_CAP_RPMB_PROBE; 826 if (IS_ENABLED(CFG_CORE_DYN_PROTMEM)) 827 sec_caps |= OPTEE_FFA_SEC_CAP_PROTMEM; 828 spmc_set_args(args, direct_resp_fid, 829 swap_src_dst(args->a1), 0, 0, 830 THREAD_RPC_MAX_NUM_PARAMS, sec_caps); 831 break; 832 case OPTEE_FFA_UNREGISTER_SHM: 833 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 834 handle_unregister_shm(args->a4, args->a5), 0, 0); 835 break; 836 case OPTEE_FFA_ENABLE_ASYNC_NOTIF: 837 spmc_set_args(args, direct_resp_fid, 838 swap_src_dst(args->a1), 0, 839 spmc_enable_async_notif(args->a4, 840 FFA_SRC(args->a1)), 841 0, 0); 842 break; 843 #ifdef CFG_CORE_DYN_PROTMEM 844 case OPTEE_FFA_RELEASE_PROTMEM: 845 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 846 handle_unregister_shm(args->a4, args->a5), 0, 0); 847 break; 848 #endif 849 default: 850 EMSG("Unhandled blocking service ID %#"PRIx32, 851 (uint32_t)args->a3); 852 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 853 TEE_ERROR_BAD_PARAMETERS, 0, 0); 854 } 855 } 856 857 static void handle_framework_direct_request(struct thread_smc_1_2_regs *args) 858 { 859 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0); 860 uint32_t w0 = FFA_ERROR; 861 uint32_t w1 = FFA_PARAM_MBZ; 862 uint32_t w2 = FFA_NOT_SUPPORTED; 863 uint32_t w3 = FFA_PARAM_MBZ; 864 865 switch (args->a2 & FFA_MSG_TYPE_MASK) { 866 case FFA_MSG_SEND_VM_CREATED: 867 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 868 uint16_t guest_id = args->a5; 869 TEE_Result res = virt_guest_created(guest_id); 870 871 w0 = direct_resp_fid; 872 w1 = swap_src_dst(args->a1); 873 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED; 874 if (res == TEE_SUCCESS) 875 w3 = FFA_OK; 876 else if (res == TEE_ERROR_OUT_OF_MEMORY) 877 w3 = FFA_DENIED; 878 else 879 w3 = FFA_INVALID_PARAMETERS; 880 } 881 break; 882 case FFA_MSG_SEND_VM_DESTROYED: 883 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 884 uint16_t guest_id = args->a5; 885 TEE_Result res = virt_guest_destroyed(guest_id); 886 887 w0 = direct_resp_fid; 888 w1 = swap_src_dst(args->a1); 889 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED; 890 if (res == TEE_SUCCESS) 891 w3 = FFA_OK; 892 else 893 w3 = FFA_INVALID_PARAMETERS; 894 } 895 break; 896 case FFA_MSG_VERSION_REQ: 897 w0 = direct_resp_fid; 898 w1 = swap_src_dst(args->a1); 899 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP; 900 w3 = spmc_exchange_version(args->a3, &my_rxtx); 901 break; 902 default: 903 break; 904 } 905 spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 906 } 907 908 static void optee_lsp_handle_direct_request(struct thread_smc_1_2_regs *args) 909 { 910 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) { 911 handle_framework_direct_request(args); 912 return; 913 } 914 915 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 916 virt_set_guest(get_sender_id(args->a1))) { 917 spmc_set_args(args, get_direct_resp_fid(args->a0), 918 swap_src_dst(args->a1), 0, 919 TEE_ERROR_ITEM_NOT_FOUND, 0, 0); 920 return; 921 } 922 923 if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT)) 924 handle_yielding_call(args); 925 else 926 handle_blocking_call(args); 927 928 /* 929 * Note that handle_yielding_call() typically only returns if a 930 * thread cannot be allocated or found. virt_unset_guest() is also 931 * called from thread_state_suspend() and thread_state_free(). 932 */ 933 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 934 virt_unset_guest(); 935 } 936 937 static void __maybe_unused 938 optee_spmc_lsp_handle_direct_request(struct thread_smc_1_2_regs *args) 939 { 940 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) 941 handle_framework_direct_request(args); 942 else 943 set_simple_ret_val(args, FFA_INVALID_PARAMETERS); 944 } 945 946 static void handle_direct_request(struct thread_smc_1_2_regs *args) 947 { 948 struct spmc_lsp_desc *lsp = spmc_find_lsp_by_sp_id(FFA_DST(args->a1)); 949 950 if (lsp) { 951 lsp->direct_req(args); 952 } else { 953 int rc = spmc_sp_start_thread(args); 954 955 /* 956 * spmc_sp_start_thread() returns here if the SPs aren't 957 * supported or if all threads are busy. 958 */ 959 set_simple_ret_val(args, rc); 960 } 961 } 962 963 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen, 964 struct ffa_mem_transaction_x *trans) 965 { 966 uint16_t mem_reg_attr = 0; 967 uint32_t flags = 0; 968 uint32_t count = 0; 969 uint32_t offs = 0; 970 uint32_t size = 0; 971 size_t n = 0; 972 973 if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t)) 974 return FFA_INVALID_PARAMETERS; 975 976 if (ffa_vers >= FFA_VERSION_1_1) { 977 struct ffa_mem_transaction_1_1 *descr = NULL; 978 979 if (blen < sizeof(*descr)) 980 return FFA_INVALID_PARAMETERS; 981 982 descr = buf; 983 trans->sender_id = READ_ONCE(descr->sender_id); 984 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 985 flags = READ_ONCE(descr->flags); 986 trans->global_handle = READ_ONCE(descr->global_handle); 987 trans->tag = READ_ONCE(descr->tag); 988 989 count = READ_ONCE(descr->mem_access_count); 990 size = READ_ONCE(descr->mem_access_size); 991 offs = READ_ONCE(descr->mem_access_offs); 992 } else { 993 struct ffa_mem_transaction_1_0 *descr = NULL; 994 995 if (blen < sizeof(*descr)) 996 return FFA_INVALID_PARAMETERS; 997 998 descr = buf; 999 trans->sender_id = READ_ONCE(descr->sender_id); 1000 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 1001 flags = READ_ONCE(descr->flags); 1002 trans->global_handle = READ_ONCE(descr->global_handle); 1003 trans->tag = READ_ONCE(descr->tag); 1004 1005 count = READ_ONCE(descr->mem_access_count); 1006 size = sizeof(struct ffa_mem_access); 1007 offs = offsetof(struct ffa_mem_transaction_1_0, 1008 mem_access_array); 1009 } 1010 1011 if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX || 1012 size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX) 1013 return FFA_INVALID_PARAMETERS; 1014 1015 /* Check that the endpoint memory access descriptor array fits */ 1016 if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) || 1017 n > blen) 1018 return FFA_INVALID_PARAMETERS; 1019 1020 trans->mem_reg_attr = mem_reg_attr; 1021 trans->flags = flags; 1022 trans->mem_access_size = size; 1023 trans->mem_access_count = count; 1024 trans->mem_access_offs = offs; 1025 return 0; 1026 } 1027 1028 #if defined(CFG_CORE_SEL1_SPMC) 1029 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size, 1030 unsigned int mem_access_count, uint8_t *acc_perms, 1031 unsigned int *region_offs) 1032 { 1033 struct ffa_mem_access_perm *descr = NULL; 1034 struct ffa_mem_access *mem_acc = NULL; 1035 unsigned int n = 0; 1036 1037 for (n = 0; n < mem_access_count; n++) { 1038 mem_acc = (void *)(mem_acc_base + mem_access_size * n); 1039 descr = &mem_acc->access_perm; 1040 if (READ_ONCE(descr->endpoint_id) == optee_core_lsp.sp_id) { 1041 *acc_perms = READ_ONCE(descr->perm); 1042 *region_offs = READ_ONCE(mem_acc[n].region_offs); 1043 return 0; 1044 } 1045 } 1046 1047 return FFA_INVALID_PARAMETERS; 1048 } 1049 1050 static int mem_op_init(bool mem_share, struct ffa_mem_transaction_x *mem_trans, 1051 void *buf, size_t blen, unsigned int *page_count, 1052 unsigned int *region_count, size_t *addr_range_offs) 1053 { 1054 const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW; 1055 struct ffa_mem_region *region_descr = NULL; 1056 unsigned int region_descr_offs = 0; 1057 uint16_t exp_mem_reg_attr = 0; 1058 uint8_t mem_acc_perm = 0; 1059 size_t n = 0; 1060 1061 if (mem_share) 1062 exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 1063 if (mem_trans->mem_reg_attr != exp_mem_reg_attr) 1064 return FFA_INVALID_PARAMETERS; 1065 1066 /* Check that the access permissions matches what's expected */ 1067 if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs, 1068 mem_trans->mem_access_size, 1069 mem_trans->mem_access_count, 1070 &mem_acc_perm, ®ion_descr_offs) || 1071 mem_acc_perm != exp_mem_acc_perm) 1072 return FFA_INVALID_PARAMETERS; 1073 1074 /* Check that the Composite memory region descriptor fits */ 1075 if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) || 1076 n > blen) 1077 return FFA_INVALID_PARAMETERS; 1078 1079 if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs, 1080 struct ffa_mem_region)) 1081 return FFA_INVALID_PARAMETERS; 1082 1083 region_descr = (struct ffa_mem_region *)((vaddr_t)buf + 1084 region_descr_offs); 1085 *page_count = READ_ONCE(region_descr->total_page_count); 1086 *region_count = READ_ONCE(region_descr->address_range_count); 1087 *addr_range_offs = n; 1088 return 0; 1089 } 1090 1091 static int add_mem_op_helper(struct mem_op_state *s, void *buf, size_t flen) 1092 { 1093 unsigned int region_count = flen / sizeof(struct ffa_address_range); 1094 struct ffa_address_range *arange = NULL; 1095 unsigned int n = 0; 1096 1097 if (region_count > s->region_count) 1098 region_count = s->region_count; 1099 1100 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range)) 1101 return FFA_INVALID_PARAMETERS; 1102 arange = buf; 1103 1104 for (n = 0; n < region_count; n++) { 1105 unsigned int page_count = READ_ONCE(arange[n].page_count); 1106 uint64_t addr = READ_ONCE(arange[n].address); 1107 1108 if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx, 1109 addr, page_count)) 1110 return FFA_INVALID_PARAMETERS; 1111 } 1112 1113 s->region_count -= region_count; 1114 if (s->region_count) 1115 return region_count * sizeof(*arange); 1116 1117 if (s->current_page_idx != s->page_count) 1118 return FFA_INVALID_PARAMETERS; 1119 1120 return 0; 1121 } 1122 1123 static int add_mem_op_frag(struct mem_frag_state *s, void *buf, size_t flen) 1124 { 1125 int rc = 0; 1126 1127 rc = add_mem_op_helper(&s->op, buf, flen); 1128 if (rc >= 0) { 1129 if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) { 1130 /* We're not at the end of the descriptor yet */ 1131 if (s->op.region_count) 1132 return s->frag_offset; 1133 1134 /* We're done */ 1135 rc = 0; 1136 } else { 1137 rc = FFA_INVALID_PARAMETERS; 1138 } 1139 } 1140 1141 SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link); 1142 if (rc < 0) { 1143 mobj_ffa_sel1_spmc_delete(s->op.mf); 1144 } else { 1145 if (mobj_ffa_push_to_inactive(s->op.mf)) { 1146 rc = FFA_INVALID_PARAMETERS; 1147 mobj_ffa_sel1_spmc_delete(s->op.mf); 1148 } 1149 } 1150 free(s); 1151 1152 return rc; 1153 } 1154 1155 static bool is_sp_op(struct ffa_mem_transaction_x *mem_trans, void *buf) 1156 { 1157 struct ffa_mem_access_perm *perm = NULL; 1158 struct ffa_mem_access *mem_acc = NULL; 1159 1160 if (!IS_ENABLED(CFG_SECURE_PARTITION)) 1161 return false; 1162 1163 if (mem_trans->mem_access_count < 1) 1164 return false; 1165 1166 mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs); 1167 perm = &mem_acc->access_perm; 1168 1169 /* 1170 * perm->endpoint_id is read here only to check if the endpoint is 1171 * OP-TEE. We do read it later on again, but there are some additional 1172 * checks there to make sure that the data is correct. 1173 */ 1174 return READ_ONCE(perm->endpoint_id) != optee_core_lsp.sp_id; 1175 } 1176 1177 static int add_mem_op(bool mem_share, struct ffa_mem_transaction_x *mem_trans, 1178 tee_mm_entry_t *mm, void *buf, size_t blen, size_t flen, 1179 uint64_t *global_handle) 1180 { 1181 int rc = 0; 1182 struct mem_op_state op = { .mem_share = mem_share, }; 1183 size_t addr_range_offs = 0; 1184 uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 1185 enum mobj_use_case use_case = MOBJ_USE_CASE_NS_SHM; 1186 size_t n = 0; 1187 1188 rc = mem_op_init(mem_share, mem_trans, buf, flen, &op.page_count, 1189 &op.region_count, &addr_range_offs); 1190 if (rc) 1191 return rc; 1192 1193 if (!op.page_count || !op.region_count) 1194 return FFA_INVALID_PARAMETERS; 1195 1196 if (MUL_OVERFLOW(op.region_count, 1197 sizeof(struct ffa_address_range), &n) || 1198 ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen) 1199 return FFA_INVALID_PARAMETERS; 1200 1201 if (mem_trans->global_handle) 1202 cookie = mem_trans->global_handle; 1203 if (!mem_share) 1204 use_case = mem_trans->tag; 1205 op.mf = mobj_ffa_sel1_spmc_new(cookie, op.page_count, use_case); 1206 if (!op.mf) 1207 return FFA_NO_MEMORY; 1208 1209 if (flen != blen) { 1210 struct mem_frag_state *s = calloc(1, sizeof(*s)); 1211 1212 if (!s) { 1213 rc = FFA_NO_MEMORY; 1214 goto err; 1215 } 1216 s->op = op; 1217 s->mm = mm; 1218 s->frag_offset = addr_range_offs; 1219 1220 SLIST_INSERT_HEAD(&frag_state_head, s, link); 1221 rc = add_mem_op_frag(s, (char *)buf + addr_range_offs, 1222 flen - addr_range_offs); 1223 1224 if (rc >= 0) 1225 *global_handle = mobj_ffa_get_cookie(op.mf); 1226 1227 return rc; 1228 } 1229 1230 rc = add_mem_op_helper(&op, (char *)buf + addr_range_offs, 1231 flen - addr_range_offs); 1232 if (rc) { 1233 /* 1234 * Number of consumed bytes may be returned instead of 0 for 1235 * done. 1236 */ 1237 rc = FFA_INVALID_PARAMETERS; 1238 goto err; 1239 } 1240 1241 if (mobj_ffa_push_to_inactive(op.mf)) { 1242 rc = FFA_INVALID_PARAMETERS; 1243 goto err; 1244 } 1245 *global_handle = mobj_ffa_get_cookie(op.mf); 1246 1247 return 0; 1248 err: 1249 mobj_ffa_sel1_spmc_delete(op.mf); 1250 return rc; 1251 } 1252 1253 static int handle_mem_op_tmem(bool share_mem, paddr_t pbuf, size_t blen, 1254 size_t flen, unsigned int page_count, 1255 uint64_t *global_handle, struct ffa_rxtx *rxtx) 1256 { 1257 struct ffa_mem_transaction_x mem_trans = { }; 1258 int rc = 0; 1259 size_t len = 0; 1260 void *buf = NULL; 1261 tee_mm_entry_t *mm = NULL; 1262 vaddr_t offs = pbuf & SMALL_PAGE_MASK; 1263 1264 if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len)) 1265 return FFA_INVALID_PARAMETERS; 1266 if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len)) 1267 return FFA_INVALID_PARAMETERS; 1268 1269 /* 1270 * Check that the length reported in flen is covered by len even 1271 * if the offset is taken into account. 1272 */ 1273 if (len < flen || len - offs < flen) 1274 return FFA_INVALID_PARAMETERS; 1275 1276 mm = tee_mm_alloc(&core_virt_shm_pool, len); 1277 if (!mm) 1278 return FFA_NO_MEMORY; 1279 1280 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf, 1281 page_count, MEM_AREA_NSEC_SHM)) { 1282 rc = FFA_INVALID_PARAMETERS; 1283 goto out; 1284 } 1285 buf = (void *)(tee_mm_get_smem(mm) + offs); 1286 1287 cpu_spin_lock(&rxtx->spinlock); 1288 rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans); 1289 if (rc) 1290 goto unlock; 1291 1292 if (is_sp_op(&mem_trans, buf)) { 1293 if (!share_mem) { 1294 rc = FFA_DENIED; 1295 goto unlock; 1296 } 1297 rc = spmc_sp_add_share(&mem_trans, buf, blen, flen, 1298 global_handle, NULL); 1299 goto unlock; 1300 } 1301 1302 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1303 virt_set_guest(mem_trans.sender_id)) { 1304 rc = FFA_DENIED; 1305 goto unlock; 1306 } 1307 1308 rc = add_mem_op(share_mem, &mem_trans, mm, buf, blen, flen, 1309 global_handle); 1310 1311 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1312 virt_unset_guest(); 1313 1314 unlock: 1315 cpu_spin_unlock(&rxtx->spinlock); 1316 if (rc > 0) 1317 return rc; 1318 1319 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1320 out: 1321 tee_mm_free(mm); 1322 return rc; 1323 } 1324 1325 static int handle_mem_op_rxbuf(bool share_mem, size_t blen, size_t flen, 1326 uint64_t *global_handle, struct ffa_rxtx *rxtx) 1327 { 1328 struct ffa_mem_transaction_x mem_trans = { }; 1329 int rc = FFA_DENIED; 1330 1331 cpu_spin_lock(&rxtx->spinlock); 1332 1333 if (!rxtx->rx || flen > rxtx->size) 1334 goto out; 1335 1336 rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen, 1337 &mem_trans); 1338 if (rc) 1339 goto out; 1340 if (is_sp_op(&mem_trans, rxtx->rx)) { 1341 if (!share_mem) { 1342 rc = FFA_DENIED; 1343 goto out; 1344 } 1345 rc = spmc_sp_add_share(&mem_trans, rxtx, blen, flen, 1346 global_handle, NULL); 1347 goto out; 1348 } 1349 1350 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1351 virt_set_guest(mem_trans.sender_id)) 1352 goto out; 1353 1354 rc = add_mem_op(share_mem, &mem_trans, NULL, rxtx->rx, blen, flen, 1355 global_handle); 1356 1357 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1358 virt_unset_guest(); 1359 1360 out: 1361 cpu_spin_unlock(&rxtx->spinlock); 1362 1363 return rc; 1364 } 1365 1366 static void handle_mem_op(struct thread_smc_1_2_regs *args, 1367 struct ffa_rxtx *rxtx) 1368 { 1369 uint32_t tot_len = args->a1; 1370 uint32_t frag_len = args->a2; 1371 uint64_t addr = args->a3; 1372 uint32_t page_count = args->a4; 1373 uint32_t ret_w1 = 0; 1374 uint32_t ret_w2 = FFA_INVALID_PARAMETERS; 1375 uint32_t ret_w3 = 0; 1376 uint32_t ret_fid = FFA_ERROR; 1377 uint64_t global_handle = 0; 1378 bool share_mem = false; 1379 int rc = 0; 1380 1381 /* Check that the MBZs are indeed 0 */ 1382 if (args->a5 || args->a6 || args->a7) 1383 goto out; 1384 1385 /* Check that fragment length doesn't exceed total length */ 1386 if (frag_len > tot_len) 1387 goto out; 1388 1389 /* Check for 32-bit calling convention */ 1390 if (!OPTEE_SMC_IS_64(args->a0)) 1391 addr &= UINT32_MAX; 1392 1393 if (args->a0 == FFA_MEM_SHARE_32 || args->a0 == FFA_MEM_SHARE_64) 1394 share_mem = true; 1395 else 1396 share_mem = false; 1397 1398 if (!addr) { 1399 /* 1400 * The memory transaction descriptor is passed via our rx 1401 * buffer. 1402 */ 1403 if (page_count) 1404 goto out; 1405 rc = handle_mem_op_rxbuf(share_mem, tot_len, frag_len, 1406 &global_handle, rxtx); 1407 } else { 1408 rc = handle_mem_op_tmem(share_mem, addr, tot_len, frag_len, 1409 page_count, &global_handle, rxtx); 1410 } 1411 if (rc < 0) { 1412 ret_w2 = rc; 1413 } else if (rc > 0) { 1414 ret_fid = FFA_MEM_FRAG_RX; 1415 ret_w3 = rc; 1416 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1417 } else { 1418 ret_fid = FFA_SUCCESS_32; 1419 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1420 } 1421 out: 1422 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1423 } 1424 1425 static struct mem_frag_state *get_frag_state(uint64_t global_handle) 1426 { 1427 struct mem_frag_state *s = NULL; 1428 1429 SLIST_FOREACH(s, &frag_state_head, link) 1430 if (mobj_ffa_get_cookie(s->op.mf) == global_handle) 1431 return s; 1432 1433 return NULL; 1434 } 1435 1436 static void handle_mem_frag_tx(struct thread_smc_1_2_regs *args, 1437 struct ffa_rxtx *rxtx) 1438 { 1439 uint64_t global_handle = reg_pair_to_64(args->a2, args->a1); 1440 size_t flen = args->a3; 1441 uint32_t endpoint_id = args->a4; 1442 struct mem_frag_state *s = NULL; 1443 tee_mm_entry_t *mm = NULL; 1444 unsigned int page_count = 0; 1445 void *buf = NULL; 1446 uint32_t ret_w1 = 0; 1447 uint32_t ret_w2 = 0; 1448 uint32_t ret_w3 = 0; 1449 uint32_t ret_fid = 0; 1450 int rc = 0; 1451 1452 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1453 uint16_t guest_id = endpoint_id >> 16; 1454 1455 if (!guest_id || virt_set_guest(guest_id)) { 1456 rc = FFA_INVALID_PARAMETERS; 1457 goto out_set_rc; 1458 } 1459 } 1460 1461 /* 1462 * Currently we're only doing this for fragmented FFA_MEM_SHARE_* 1463 * requests. 1464 */ 1465 1466 cpu_spin_lock(&rxtx->spinlock); 1467 1468 s = get_frag_state(global_handle); 1469 if (!s) { 1470 rc = FFA_INVALID_PARAMETERS; 1471 goto out; 1472 } 1473 1474 mm = s->mm; 1475 if (mm) { 1476 if (flen > tee_mm_get_bytes(mm)) { 1477 rc = FFA_INVALID_PARAMETERS; 1478 goto out; 1479 } 1480 page_count = s->op.page_count; 1481 buf = (void *)tee_mm_get_smem(mm); 1482 } else { 1483 if (flen > rxtx->size) { 1484 rc = FFA_INVALID_PARAMETERS; 1485 goto out; 1486 } 1487 buf = rxtx->rx; 1488 } 1489 1490 rc = add_mem_op_frag(s, buf, flen); 1491 out: 1492 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1493 virt_unset_guest(); 1494 1495 cpu_spin_unlock(&rxtx->spinlock); 1496 1497 if (rc <= 0 && mm) { 1498 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1499 tee_mm_free(mm); 1500 } 1501 1502 out_set_rc: 1503 if (rc < 0) { 1504 ret_fid = FFA_ERROR; 1505 ret_w2 = rc; 1506 } else if (rc > 0) { 1507 ret_fid = FFA_MEM_FRAG_RX; 1508 ret_w3 = rc; 1509 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1510 } else { 1511 ret_fid = FFA_SUCCESS_32; 1512 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1513 } 1514 1515 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1516 } 1517 1518 static void handle_mem_reclaim(struct thread_smc_1_2_regs *args) 1519 { 1520 int rc = FFA_INVALID_PARAMETERS; 1521 uint64_t cookie = 0; 1522 1523 if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7) 1524 goto out; 1525 1526 cookie = reg_pair_to_64(args->a2, args->a1); 1527 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1528 uint16_t guest_id = 0; 1529 1530 if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) { 1531 guest_id = virt_find_guest_by_cookie(cookie); 1532 } else { 1533 guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) & 1534 FFA_MEMORY_HANDLE_PRTN_MASK; 1535 } 1536 if (!guest_id) 1537 goto out; 1538 if (virt_set_guest(guest_id)) { 1539 if (!virt_reclaim_cookie_from_destroyed_guest(guest_id, 1540 cookie)) 1541 rc = FFA_OK; 1542 goto out; 1543 } 1544 } 1545 1546 switch (mobj_ffa_sel1_spmc_reclaim(cookie)) { 1547 case TEE_SUCCESS: 1548 rc = FFA_OK; 1549 break; 1550 case TEE_ERROR_ITEM_NOT_FOUND: 1551 DMSG("cookie %#"PRIx64" not found", cookie); 1552 rc = FFA_INVALID_PARAMETERS; 1553 break; 1554 default: 1555 DMSG("cookie %#"PRIx64" busy", cookie); 1556 rc = FFA_DENIED; 1557 break; 1558 } 1559 1560 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1561 virt_unset_guest(); 1562 1563 out: 1564 set_simple_ret_val(args, rc); 1565 } 1566 1567 static void handle_notification_bitmap_create(struct thread_smc_1_2_regs *args) 1568 { 1569 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1570 uint32_t ret_fid = FFA_ERROR; 1571 uint32_t old_itr_status = 0; 1572 1573 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 && 1574 !args->a5 && !args->a6 && !args->a7) { 1575 struct guest_partition *prtn = NULL; 1576 struct notif_vm_bitmap *nvb = NULL; 1577 uint16_t vm_id = args->a1; 1578 1579 prtn = virt_get_guest(vm_id); 1580 nvb = get_notif_vm_bitmap(prtn, vm_id); 1581 if (!nvb) { 1582 ret_val = FFA_INVALID_PARAMETERS; 1583 goto out_virt_put; 1584 } 1585 1586 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1587 1588 if (nvb->initialized) { 1589 ret_val = FFA_DENIED; 1590 goto out_unlock; 1591 } 1592 1593 nvb->initialized = true; 1594 nvb->do_bottom_half_value = -1; 1595 ret_val = FFA_OK; 1596 ret_fid = FFA_SUCCESS_32; 1597 out_unlock: 1598 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1599 out_virt_put: 1600 virt_put_guest(prtn); 1601 } 1602 1603 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1604 } 1605 1606 static void handle_notification_bitmap_destroy(struct thread_smc_1_2_regs *args) 1607 { 1608 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1609 uint32_t ret_fid = FFA_ERROR; 1610 uint32_t old_itr_status = 0; 1611 1612 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 && 1613 !args->a5 && !args->a6 && !args->a7) { 1614 struct guest_partition *prtn = NULL; 1615 struct notif_vm_bitmap *nvb = NULL; 1616 uint16_t vm_id = args->a1; 1617 1618 prtn = virt_get_guest(vm_id); 1619 nvb = get_notif_vm_bitmap(prtn, vm_id); 1620 if (!nvb) { 1621 ret_val = FFA_INVALID_PARAMETERS; 1622 goto out_virt_put; 1623 } 1624 1625 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1626 1627 if (nvb->pending || nvb->bound) { 1628 ret_val = FFA_DENIED; 1629 goto out_unlock; 1630 } 1631 1632 memset(nvb, 0, sizeof(*nvb)); 1633 ret_val = FFA_OK; 1634 ret_fid = FFA_SUCCESS_32; 1635 out_unlock: 1636 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1637 out_virt_put: 1638 virt_put_guest(prtn); 1639 } 1640 1641 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1642 } 1643 1644 static void handle_notification_bind(struct thread_smc_1_2_regs *args) 1645 { 1646 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1647 struct guest_partition *prtn = NULL; 1648 struct notif_vm_bitmap *nvb = NULL; 1649 uint32_t ret_fid = FFA_ERROR; 1650 uint32_t old_itr_status = 0; 1651 uint64_t bitmap = 0; 1652 uint16_t vm_id = 0; 1653 1654 if (args->a5 || args->a6 || args->a7) 1655 goto out; 1656 if (args->a2) { 1657 /* We only deal with global notifications */ 1658 ret_val = FFA_DENIED; 1659 goto out; 1660 } 1661 1662 /* The destination of the eventual notification */ 1663 vm_id = FFA_DST(args->a1); 1664 bitmap = reg_pair_to_64(args->a4, args->a3); 1665 1666 prtn = virt_get_guest(vm_id); 1667 nvb = get_notif_vm_bitmap(prtn, vm_id); 1668 if (!nvb) { 1669 ret_val = FFA_INVALID_PARAMETERS; 1670 goto out_virt_put; 1671 } 1672 1673 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1674 1675 if ((bitmap & nvb->bound)) { 1676 ret_val = FFA_DENIED; 1677 } else { 1678 nvb->bound |= bitmap; 1679 ret_val = FFA_OK; 1680 ret_fid = FFA_SUCCESS_32; 1681 } 1682 1683 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1684 out_virt_put: 1685 virt_put_guest(prtn); 1686 out: 1687 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1688 } 1689 1690 static void handle_notification_unbind(struct thread_smc_1_2_regs *args) 1691 { 1692 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1693 struct guest_partition *prtn = NULL; 1694 struct notif_vm_bitmap *nvb = NULL; 1695 uint32_t ret_fid = FFA_ERROR; 1696 uint32_t old_itr_status = 0; 1697 uint64_t bitmap = 0; 1698 uint16_t vm_id = 0; 1699 1700 if (args->a2 || args->a5 || args->a6 || args->a7) 1701 goto out; 1702 1703 /* The destination of the eventual notification */ 1704 vm_id = FFA_DST(args->a1); 1705 bitmap = reg_pair_to_64(args->a4, args->a3); 1706 1707 prtn = virt_get_guest(vm_id); 1708 nvb = get_notif_vm_bitmap(prtn, vm_id); 1709 if (!nvb) { 1710 ret_val = FFA_INVALID_PARAMETERS; 1711 goto out_virt_put; 1712 } 1713 1714 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1715 1716 if (bitmap & nvb->pending) { 1717 ret_val = FFA_DENIED; 1718 } else { 1719 nvb->bound &= ~bitmap; 1720 ret_val = FFA_OK; 1721 ret_fid = FFA_SUCCESS_32; 1722 } 1723 1724 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1725 out_virt_put: 1726 virt_put_guest(prtn); 1727 out: 1728 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1729 } 1730 1731 static void handle_notification_get(struct thread_smc_1_2_regs *args) 1732 { 1733 uint32_t w2 = FFA_INVALID_PARAMETERS; 1734 struct guest_partition *prtn = NULL; 1735 struct notif_vm_bitmap *nvb = NULL; 1736 uint32_t ret_fid = FFA_ERROR; 1737 uint32_t old_itr_status = 0; 1738 uint16_t vm_id = 0; 1739 uint32_t w3 = 0; 1740 1741 if (args->a5 || args->a6 || args->a7) 1742 goto out; 1743 if (!(args->a2 & 0x1)) { 1744 ret_fid = FFA_SUCCESS_32; 1745 w2 = 0; 1746 goto out; 1747 } 1748 vm_id = FFA_DST(args->a1); 1749 1750 prtn = virt_get_guest(vm_id); 1751 nvb = get_notif_vm_bitmap(prtn, vm_id); 1752 if (!nvb) 1753 goto out_virt_put; 1754 1755 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1756 1757 reg_pair_from_64(nvb->pending, &w3, &w2); 1758 nvb->pending = 0; 1759 ret_fid = FFA_SUCCESS_32; 1760 1761 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1762 out_virt_put: 1763 virt_put_guest(prtn); 1764 out: 1765 spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0); 1766 } 1767 1768 struct notif_info_get_state { 1769 struct thread_smc_1_2_regs *args; 1770 unsigned int ids_per_reg; 1771 unsigned int ids_count; 1772 unsigned int id_pos; 1773 unsigned int count; 1774 unsigned int max_list_count; 1775 unsigned int list_count; 1776 }; 1777 1778 static bool add_id_in_regs(struct notif_info_get_state *state, 1779 uint16_t id) 1780 { 1781 unsigned int reg_idx = state->id_pos / state->ids_per_reg + 3; 1782 unsigned int reg_shift = (state->id_pos % state->ids_per_reg) * 16; 1783 1784 if (reg_idx > 7) 1785 return false; 1786 1787 state->args->a[reg_idx] &= ~SHIFT_U64(0xffff, reg_shift); 1788 state->args->a[reg_idx] |= (unsigned long)id << reg_shift; 1789 1790 state->id_pos++; 1791 state->count++; 1792 return true; 1793 } 1794 1795 static bool add_id_count(struct notif_info_get_state *state) 1796 { 1797 assert(state->list_count < state->max_list_count && 1798 state->count >= 1 && state->count <= 4); 1799 1800 state->ids_count |= (state->count - 1) << (state->list_count * 2 + 12); 1801 state->list_count++; 1802 state->count = 0; 1803 1804 return state->list_count < state->max_list_count; 1805 } 1806 1807 static bool add_nvb_to_state(struct notif_info_get_state *state, 1808 uint16_t guest_id, struct notif_vm_bitmap *nvb) 1809 { 1810 if (!nvb->pending) 1811 return true; 1812 /* 1813 * Add only the guest_id, meaning a global notification for this 1814 * guest. 1815 * 1816 * If notifications for one or more specific vCPUs we'd add those 1817 * before calling add_id_count(), but that's not supported. 1818 */ 1819 return add_id_in_regs(state, guest_id) && add_id_count(state); 1820 } 1821 1822 static void handle_notification_info_get(struct thread_smc_1_2_regs *args) 1823 { 1824 struct notif_info_get_state state = { .args = args }; 1825 uint32_t ffa_res = FFA_INVALID_PARAMETERS; 1826 struct guest_partition *prtn = NULL; 1827 struct notif_vm_bitmap *nvb = NULL; 1828 uint32_t more_pending_flag = 0; 1829 uint32_t itr_state = 0; 1830 uint16_t guest_id = 0; 1831 1832 if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 || 1833 args->a6 || args->a7) 1834 goto err; 1835 1836 if (OPTEE_SMC_IS_64(args->a0)) { 1837 spmc_set_args(args, FFA_SUCCESS_64, 0, 0, 0, 0, 0); 1838 state.ids_per_reg = 4; 1839 state.max_list_count = 31; 1840 } else { 1841 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0); 1842 state.ids_per_reg = 2; 1843 state.max_list_count = 15; 1844 } 1845 1846 while (true) { 1847 /* 1848 * With NS-Virtualization we need to go through all 1849 * partitions to collect the notification bitmaps, without 1850 * we just check the only notification bitmap we have. 1851 */ 1852 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1853 prtn = virt_next_guest(prtn); 1854 if (!prtn) 1855 break; 1856 guest_id = virt_get_guest_id(prtn); 1857 } 1858 nvb = get_notif_vm_bitmap(prtn, guest_id); 1859 1860 itr_state = cpu_spin_lock_xsave(&spmc_notif_lock); 1861 if (!add_nvb_to_state(&state, guest_id, nvb)) 1862 more_pending_flag = BIT(0); 1863 cpu_spin_unlock_xrestore(&spmc_notif_lock, itr_state); 1864 1865 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || more_pending_flag) 1866 break; 1867 } 1868 virt_put_guest(prtn); 1869 1870 if (!state.id_pos) { 1871 ffa_res = FFA_NO_DATA; 1872 goto err; 1873 } 1874 args->a2 = (state.list_count << FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT) | 1875 (state.ids_count << FFA_NOTIF_INFO_GET_ID_LIST_SHIFT) | 1876 more_pending_flag; 1877 return; 1878 err: 1879 spmc_set_args(args, FFA_ERROR, 0, ffa_res, 0, 0, 0); 1880 } 1881 1882 void thread_spmc_set_async_notif_intid(int intid) 1883 { 1884 assert(interrupt_can_raise_sgi(interrupt_get_main_chip())); 1885 notif_intid = intid; 1886 spmc_notif_is_ready = true; 1887 DMSG("Asynchronous notifications are ready"); 1888 } 1889 1890 void notif_send_async(uint32_t value, uint16_t guest_id) 1891 { 1892 struct guest_partition *prtn = NULL; 1893 struct notif_vm_bitmap *nvb = NULL; 1894 uint32_t old_itr_status = 0; 1895 1896 prtn = virt_get_guest(guest_id); 1897 nvb = get_notif_vm_bitmap(prtn, guest_id); 1898 1899 if (nvb) { 1900 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1901 assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && 1902 spmc_notif_is_ready && nvb->do_bottom_half_value >= 0 && 1903 notif_intid >= 0); 1904 nvb->pending |= BIT64(nvb->do_bottom_half_value); 1905 interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid, 1906 ITR_CPU_MASK_TO_THIS_CPU); 1907 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1908 } 1909 1910 virt_put_guest(prtn); 1911 } 1912 #else 1913 void notif_send_async(uint32_t value, uint16_t guest_id) 1914 { 1915 struct guest_partition *prtn = NULL; 1916 struct notif_vm_bitmap *nvb = NULL; 1917 /* global notification, delay notification interrupt */ 1918 uint32_t flags = BIT32(1); 1919 int res = 0; 1920 1921 prtn = virt_get_guest(guest_id); 1922 nvb = get_notif_vm_bitmap(prtn, guest_id); 1923 1924 if (nvb) { 1925 assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && 1926 spmc_notif_is_ready && nvb->do_bottom_half_value >= 0); 1927 res = ffa_set_notification(guest_id, optee_core_lsp.sp_id, 1928 flags, 1929 BIT64(nvb->do_bottom_half_value)); 1930 if (res) { 1931 EMSG("notification set failed with error %d", res); 1932 panic(); 1933 } 1934 } 1935 1936 virt_put_guest(prtn); 1937 } 1938 #endif 1939 1940 /* Only called from assembly */ 1941 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args); 1942 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args) 1943 { 1944 assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL); 1945 switch (args->a0) { 1946 #if defined(CFG_CORE_SEL1_SPMC) 1947 case FFA_FEATURES: 1948 handle_features(args); 1949 break; 1950 case FFA_SPM_ID_GET: 1951 spmc_handle_spm_id_get(args); 1952 break; 1953 #ifdef ARM64 1954 case FFA_RXTX_MAP_64: 1955 #endif 1956 case FFA_RXTX_MAP_32: 1957 spmc_handle_rxtx_map(args, &my_rxtx); 1958 break; 1959 case FFA_RXTX_UNMAP: 1960 spmc_handle_rxtx_unmap(args, &my_rxtx); 1961 break; 1962 case FFA_RX_RELEASE: 1963 spmc_handle_rx_release(args, &my_rxtx); 1964 break; 1965 case FFA_PARTITION_INFO_GET: 1966 spmc_handle_partition_info_get(args, &my_rxtx); 1967 break; 1968 case FFA_RUN: 1969 spmc_handle_run(args); 1970 break; 1971 #endif /*CFG_CORE_SEL1_SPMC*/ 1972 case FFA_INTERRUPT: 1973 if (IS_ENABLED(CFG_CORE_SEL1_SPMC)) 1974 spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0, 1975 0, 0); 1976 else 1977 spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0); 1978 break; 1979 #ifdef ARM64 1980 case FFA_MSG_SEND_DIRECT_REQ_64: 1981 #endif 1982 case FFA_MSG_SEND_DIRECT_REQ_32: 1983 handle_direct_request(args); 1984 break; 1985 #if defined(CFG_CORE_SEL1_SPMC) 1986 #ifdef ARM64 1987 case FFA_MEM_SHARE_64: 1988 #endif 1989 case FFA_MEM_SHARE_32: 1990 #ifdef ARM64 1991 case FFA_MEM_LEND_64: 1992 #endif 1993 case FFA_MEM_LEND_32: 1994 handle_mem_op(args, &my_rxtx); 1995 break; 1996 case FFA_MEM_RECLAIM: 1997 if (!IS_ENABLED(CFG_SECURE_PARTITION) || 1998 !ffa_mem_reclaim(args, NULL)) 1999 handle_mem_reclaim(args); 2000 break; 2001 case FFA_MEM_FRAG_TX: 2002 handle_mem_frag_tx(args, &my_rxtx); 2003 break; 2004 case FFA_NOTIFICATION_BITMAP_CREATE: 2005 handle_notification_bitmap_create(args); 2006 break; 2007 case FFA_NOTIFICATION_BITMAP_DESTROY: 2008 handle_notification_bitmap_destroy(args); 2009 break; 2010 case FFA_NOTIFICATION_BIND: 2011 handle_notification_bind(args); 2012 break; 2013 case FFA_NOTIFICATION_UNBIND: 2014 handle_notification_unbind(args); 2015 break; 2016 case FFA_NOTIFICATION_GET: 2017 handle_notification_get(args); 2018 break; 2019 #ifdef ARM64 2020 case FFA_NOTIFICATION_INFO_GET_64: 2021 #endif 2022 case FFA_NOTIFICATION_INFO_GET_32: 2023 handle_notification_info_get(args); 2024 break; 2025 #endif /*CFG_CORE_SEL1_SPMC*/ 2026 case FFA_ERROR: 2027 EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2); 2028 if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) { 2029 /* 2030 * The SPMC will return an FFA_ERROR back so better 2031 * panic() now than flooding the log. 2032 */ 2033 panic("FFA_ERROR from SPMC is fatal"); 2034 } 2035 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED, 2036 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 2037 break; 2038 default: 2039 EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0); 2040 set_simple_ret_val(args, FFA_NOT_SUPPORTED); 2041 } 2042 } 2043 2044 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset) 2045 { 2046 size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 2047 struct thread_ctx *thr = threads + thread_get_id(); 2048 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 2049 struct optee_msg_arg *arg = NULL; 2050 struct mobj *mobj = NULL; 2051 uint32_t num_params = 0; 2052 size_t sz = 0; 2053 2054 mobj = mobj_ffa_get_by_cookie(cookie, 0); 2055 if (!mobj) { 2056 EMSG("Can't find cookie %#"PRIx64, cookie); 2057 return TEE_ERROR_BAD_PARAMETERS; 2058 } 2059 2060 res = mobj_inc_map(mobj); 2061 if (res) 2062 goto out_put_mobj; 2063 2064 res = TEE_ERROR_BAD_PARAMETERS; 2065 arg = mobj_get_va(mobj, offset, sizeof(*arg)); 2066 if (!arg) 2067 goto out_dec_map; 2068 2069 num_params = READ_ONCE(arg->num_params); 2070 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS) 2071 goto out_dec_map; 2072 2073 sz = OPTEE_MSG_GET_ARG_SIZE(num_params); 2074 2075 thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc); 2076 if (!thr->rpc_arg) 2077 goto out_dec_map; 2078 2079 virt_on_stdcall(); 2080 res = tee_entry_std(arg, num_params); 2081 2082 thread_rpc_shm_cache_clear(&thr->shm_cache); 2083 thr->rpc_arg = NULL; 2084 2085 out_dec_map: 2086 mobj_dec_map(mobj); 2087 out_put_mobj: 2088 mobj_put(mobj); 2089 return res; 2090 } 2091 2092 /* 2093 * Helper routine for the assembly function thread_std_smc_entry() 2094 * 2095 * Note: this function is weak just to make link_dummies_paged.c happy. 2096 */ 2097 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1, 2098 uint32_t a2, uint32_t a3, 2099 uint32_t a4, uint32_t a5 __unused) 2100 { 2101 /* 2102 * Arguments are supplied from handle_yielding_call() as: 2103 * a0 <- w1 2104 * a1 <- w3 2105 * a2 <- w4 2106 * a3 <- w5 2107 * a4 <- w6 2108 * a5 <- w7 2109 */ 2110 thread_get_tsd()->rpc_target_info = swap_src_dst(a0); 2111 if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG) 2112 return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4); 2113 return FFA_DENIED; 2114 } 2115 2116 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm) 2117 { 2118 uint64_t offs = tpm->u.memref.offs; 2119 2120 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN + 2121 OPTEE_MSG_ATTR_TYPE_FMEM_INPUT; 2122 2123 param->u.fmem.offs_low = offs; 2124 param->u.fmem.offs_high = offs >> 32; 2125 if (param->u.fmem.offs_high != offs >> 32) 2126 return false; 2127 2128 param->u.fmem.size = tpm->u.memref.size; 2129 if (tpm->u.memref.mobj) { 2130 uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj); 2131 2132 /* If a mobj is passed it better be one with a valid cookie. */ 2133 if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) 2134 return false; 2135 param->u.fmem.global_id = cookie; 2136 } else { 2137 param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 2138 } 2139 2140 return true; 2141 } 2142 2143 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params, 2144 struct thread_param *params, 2145 struct optee_msg_arg **arg_ret) 2146 { 2147 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 2148 struct thread_ctx *thr = threads + thread_get_id(); 2149 struct optee_msg_arg *arg = thr->rpc_arg; 2150 2151 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 2152 return TEE_ERROR_BAD_PARAMETERS; 2153 2154 if (!arg) { 2155 EMSG("rpc_arg not set"); 2156 return TEE_ERROR_GENERIC; 2157 } 2158 2159 memset(arg, 0, sz); 2160 arg->cmd = cmd; 2161 arg->num_params = num_params; 2162 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 2163 2164 for (size_t n = 0; n < num_params; n++) { 2165 switch (params[n].attr) { 2166 case THREAD_PARAM_ATTR_NONE: 2167 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE; 2168 break; 2169 case THREAD_PARAM_ATTR_VALUE_IN: 2170 case THREAD_PARAM_ATTR_VALUE_OUT: 2171 case THREAD_PARAM_ATTR_VALUE_INOUT: 2172 arg->params[n].attr = params[n].attr - 2173 THREAD_PARAM_ATTR_VALUE_IN + 2174 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 2175 arg->params[n].u.value.a = params[n].u.value.a; 2176 arg->params[n].u.value.b = params[n].u.value.b; 2177 arg->params[n].u.value.c = params[n].u.value.c; 2178 break; 2179 case THREAD_PARAM_ATTR_MEMREF_IN: 2180 case THREAD_PARAM_ATTR_MEMREF_OUT: 2181 case THREAD_PARAM_ATTR_MEMREF_INOUT: 2182 if (!set_fmem(arg->params + n, params + n)) 2183 return TEE_ERROR_BAD_PARAMETERS; 2184 break; 2185 default: 2186 return TEE_ERROR_BAD_PARAMETERS; 2187 } 2188 } 2189 2190 if (arg_ret) 2191 *arg_ret = arg; 2192 2193 return TEE_SUCCESS; 2194 } 2195 2196 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params, 2197 struct thread_param *params) 2198 { 2199 for (size_t n = 0; n < num_params; n++) { 2200 switch (params[n].attr) { 2201 case THREAD_PARAM_ATTR_VALUE_OUT: 2202 case THREAD_PARAM_ATTR_VALUE_INOUT: 2203 params[n].u.value.a = arg->params[n].u.value.a; 2204 params[n].u.value.b = arg->params[n].u.value.b; 2205 params[n].u.value.c = arg->params[n].u.value.c; 2206 break; 2207 case THREAD_PARAM_ATTR_MEMREF_OUT: 2208 case THREAD_PARAM_ATTR_MEMREF_INOUT: 2209 params[n].u.memref.size = arg->params[n].u.fmem.size; 2210 break; 2211 default: 2212 break; 2213 } 2214 } 2215 2216 return arg->ret; 2217 } 2218 2219 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 2220 struct thread_param *params) 2221 { 2222 struct thread_rpc_arg rpc_arg = { .call = { 2223 .w1 = thread_get_tsd()->rpc_target_info, 2224 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 2225 }, 2226 }; 2227 struct optee_msg_arg *arg = NULL; 2228 uint32_t ret = 0; 2229 2230 ret = get_rpc_arg(cmd, num_params, params, &arg); 2231 if (ret) 2232 return ret; 2233 2234 thread_rpc(&rpc_arg); 2235 2236 return get_rpc_arg_res(arg, num_params, params); 2237 } 2238 2239 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 2240 { 2241 struct thread_rpc_arg rpc_arg = { .call = { 2242 .w1 = thread_get_tsd()->rpc_target_info, 2243 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 2244 }, 2245 }; 2246 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0); 2247 uint32_t res2 = 0; 2248 uint32_t res = 0; 2249 2250 DMSG("freeing cookie %#"PRIx64, cookie); 2251 2252 res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m, NULL); 2253 2254 mobj_put(mobj); 2255 res2 = mobj_ffa_unregister_by_cookie(cookie); 2256 if (res2) 2257 DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32, 2258 cookie, res2); 2259 if (!res) 2260 thread_rpc(&rpc_arg); 2261 } 2262 2263 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt) 2264 { 2265 struct thread_rpc_arg rpc_arg = { .call = { 2266 .w1 = thread_get_tsd()->rpc_target_info, 2267 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 2268 }, 2269 }; 2270 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align); 2271 struct optee_msg_arg *arg = NULL; 2272 unsigned int internal_offset = 0; 2273 struct mobj *mobj = NULL; 2274 uint64_t cookie = 0; 2275 2276 if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m, &arg)) 2277 return NULL; 2278 2279 thread_rpc(&rpc_arg); 2280 2281 if (arg->num_params != 1 || 2282 arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT) 2283 return NULL; 2284 2285 internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs); 2286 cookie = READ_ONCE(arg->params->u.fmem.global_id); 2287 mobj = mobj_ffa_get_by_cookie(cookie, internal_offset); 2288 if (!mobj) { 2289 DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed", 2290 cookie, internal_offset); 2291 return NULL; 2292 } 2293 2294 assert(mobj_is_nonsec(mobj)); 2295 2296 if (mobj->size < size) { 2297 DMSG("Mobj %#"PRIx64": wrong size", cookie); 2298 mobj_put(mobj); 2299 return NULL; 2300 } 2301 2302 if (mobj_inc_map(mobj)) { 2303 DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie); 2304 mobj_put(mobj); 2305 return NULL; 2306 } 2307 2308 return mobj; 2309 } 2310 2311 struct mobj *thread_rpc_alloc_payload(size_t size) 2312 { 2313 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL); 2314 } 2315 2316 struct mobj *thread_rpc_alloc_kernel_payload(size_t size) 2317 { 2318 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL); 2319 } 2320 2321 void thread_rpc_free_kernel_payload(struct mobj *mobj) 2322 { 2323 if (mobj) 2324 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, 2325 mobj_get_cookie(mobj), mobj); 2326 } 2327 2328 void thread_rpc_free_payload(struct mobj *mobj) 2329 { 2330 if (mobj) 2331 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj), 2332 mobj); 2333 } 2334 2335 struct mobj *thread_rpc_alloc_global_payload(size_t size) 2336 { 2337 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL); 2338 } 2339 2340 void thread_rpc_free_global_payload(struct mobj *mobj) 2341 { 2342 if (mobj) 2343 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, 2344 mobj_get_cookie(mobj), mobj); 2345 } 2346 2347 void thread_spmc_register_secondary_ep(vaddr_t ep) 2348 { 2349 unsigned long ret = 0; 2350 2351 /* Let the SPM know the entry point for secondary CPUs */ 2352 ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0); 2353 2354 if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64) 2355 EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret); 2356 } 2357 2358 static uint16_t ffa_id_get(void) 2359 { 2360 /* 2361 * Ask the SPM component running at a higher EL to return our FF-A ID. 2362 * This can either be the SPMC ID (if the SPMC is enabled in OP-TEE) or 2363 * the partition ID (if not). 2364 */ 2365 struct thread_smc_args args = { 2366 .a0 = FFA_ID_GET, 2367 }; 2368 2369 thread_smccc(&args); 2370 if (!is_ffa_success(args.a0)) { 2371 if (args.a0 == FFA_ERROR) 2372 EMSG("Get id failed with error %ld", args.a2); 2373 else 2374 EMSG("Get id failed"); 2375 panic(); 2376 } 2377 2378 return args.a2; 2379 } 2380 2381 static uint16_t ffa_spm_id_get(void) 2382 { 2383 /* 2384 * Ask the SPM component running at a higher EL to return its ID. 2385 * If OP-TEE implements the S-EL1 SPMC, this will get the SPMD ID. 2386 * If not, the ID of the SPMC will be returned. 2387 */ 2388 struct thread_smc_args args = { 2389 .a0 = FFA_SPM_ID_GET, 2390 }; 2391 2392 thread_smccc(&args); 2393 if (!is_ffa_success(args.a0)) { 2394 if (args.a0 == FFA_ERROR) 2395 EMSG("Get spm id failed with error %ld", args.a2); 2396 else 2397 EMSG("Get spm id failed"); 2398 panic(); 2399 } 2400 2401 return args.a2; 2402 } 2403 2404 #ifdef CFG_CORE_DYN_PROTMEM 2405 TEE_Result thread_spmc_get_protmem_config(enum mobj_use_case use_case, 2406 void *buf, size_t *buf_sz, 2407 size_t *min_mem_sz, 2408 size_t *min_mem_align) 2409 { 2410 TEE_Result res = TEE_SUCCESS; 2411 struct ffa_mem_access_perm mem_acc_list[] = { 2412 { 2413 .endpoint_id = optee_core_lsp.sp_id, 2414 .perm = FFA_MEM_ACC_RW, 2415 }, 2416 }; 2417 2418 res = plat_get_protmem_config(use_case, min_mem_sz, min_mem_align); 2419 if (res) 2420 return res; 2421 2422 if (!buf || *buf_sz < sizeof(mem_acc_list)) { 2423 *buf_sz = sizeof(mem_acc_list); 2424 return TEE_ERROR_SHORT_BUFFER; 2425 } 2426 2427 memcpy(buf, mem_acc_list, sizeof(mem_acc_list)); 2428 *buf_sz = sizeof(mem_acc_list); 2429 2430 return TEE_SUCCESS; 2431 } 2432 #endif /*CFG_CORE_DYN_PROTMEM*/ 2433 2434 static TEE_Result check_desc(struct spmc_lsp_desc *d) 2435 { 2436 uint32_t accept_props = FFA_PART_PROP_DIRECT_REQ_RECV | 2437 FFA_PART_PROP_DIRECT_REQ_SEND | 2438 FFA_PART_PROP_NOTIF_CREATED | 2439 FFA_PART_PROP_NOTIF_DESTROYED | 2440 FFA_PART_PROP_AARCH64_STATE; 2441 uint32_t id = d->sp_id; 2442 2443 if (id && (spmc_is_reserved_id(id) || spmc_find_lsp_by_sp_id(id) || 2444 id < FFA_SWD_ID_MIN || id > FFA_SWD_ID_MAX)) { 2445 EMSG("Conflicting SP id for SP \"%s\" id %#"PRIx32, 2446 d->name, id); 2447 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2448 panic(); 2449 return TEE_ERROR_BAD_FORMAT; 2450 } 2451 2452 if (d->properties & ~accept_props) { 2453 EMSG("Unexpected properties in %#"PRIx32" for LSP \"%s\" %#"PRIx16, 2454 d->properties, d->name, d->sp_id); 2455 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2456 panic(); 2457 d->properties &= accept_props; 2458 } 2459 2460 if (!d->direct_req) { 2461 EMSG("Missing direct request callback for LSP \"%s\" %#"PRIx16, 2462 d->name, d->sp_id); 2463 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2464 panic(); 2465 return TEE_ERROR_BAD_FORMAT; 2466 } 2467 2468 if (!d->uuid_words[0] && !d->uuid_words[1] && 2469 !d->uuid_words[2] && !d->uuid_words[3]) { 2470 EMSG("Found NULL UUID for LSP \"%s\" %#"PRIx16, 2471 d->name, d->sp_id); 2472 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2473 panic(); 2474 return TEE_ERROR_BAD_FORMAT; 2475 } 2476 2477 return TEE_SUCCESS; 2478 } 2479 2480 static uint16_t find_unused_sp_id(void) 2481 { 2482 uint32_t id = FFA_SWD_ID_MIN; 2483 2484 while (spmc_is_reserved_id(id) || spmc_find_lsp_by_sp_id(id)) { 2485 id++; 2486 assert(id <= FFA_SWD_ID_MAX); 2487 } 2488 2489 return id; 2490 } 2491 2492 TEE_Result spmc_register_lsp(struct spmc_lsp_desc *desc) 2493 { 2494 TEE_Result res = TEE_SUCCESS; 2495 2496 res = check_desc(desc); 2497 if (res) 2498 return res; 2499 2500 if (STAILQ_EMPTY(&lsp_head)) { 2501 DMSG("Cannot add Logical SP \"%s\": LSP framework not initialized yet", 2502 desc->name); 2503 return TEE_ERROR_ITEM_NOT_FOUND; 2504 } 2505 2506 if (!desc->sp_id) 2507 desc->sp_id = find_unused_sp_id(); 2508 2509 DMSG("Adding Logical SP \"%s\" with id %#"PRIx16, 2510 desc->name, desc->sp_id); 2511 2512 STAILQ_INSERT_TAIL(&lsp_head, desc, link); 2513 2514 return TEE_SUCCESS; 2515 } 2516 2517 static struct spmc_lsp_desc optee_core_lsp __nex_data = { 2518 .name = "OP-TEE", 2519 .direct_req = optee_lsp_handle_direct_request, 2520 .properties = FFA_PART_PROP_DIRECT_REQ_RECV | 2521 FFA_PART_PROP_DIRECT_REQ_SEND | 2522 #ifdef CFG_NS_VIRTUALIZATION 2523 FFA_PART_PROP_NOTIF_CREATED | 2524 FFA_PART_PROP_NOTIF_DESTROYED | 2525 #endif 2526 FFA_PART_PROP_AARCH64_STATE | 2527 FFA_PART_PROP_IS_PE_ID, 2528 /* 2529 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1 2530 * SP, or 2531 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a 2532 * logical partition, residing in the same exception level as the 2533 * SPMC 2534 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 2535 */ 2536 .uuid_words = { 0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5, }, 2537 }; 2538 2539 #if defined(CFG_CORE_SEL1_SPMC) 2540 static struct spmc_lsp_desc optee_spmc_lsp __nex_data = { 2541 .name = "OP-TEE SPMC", 2542 .direct_req = optee_spmc_lsp_handle_direct_request, 2543 }; 2544 2545 static TEE_Result spmc_init(void) 2546 { 2547 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 2548 virt_add_guest_spec_data(¬if_vm_bitmap_id, 2549 sizeof(struct notif_vm_bitmap), NULL)) 2550 panic("virt_add_guest_spec_data"); 2551 spmd_id = ffa_spm_id_get(); 2552 DMSG("SPMD ID %#"PRIx16, spmd_id); 2553 2554 optee_spmc_lsp.sp_id = ffa_id_get(); 2555 DMSG("SPMC ID %#"PRIx16, optee_spmc_lsp.sp_id); 2556 STAILQ_INSERT_HEAD(&lsp_head, &optee_spmc_lsp, link); 2557 2558 optee_core_lsp.sp_id = find_unused_sp_id(); 2559 DMSG("OP-TEE endpoint ID %#"PRIx16, optee_core_lsp.sp_id); 2560 STAILQ_INSERT_HEAD(&lsp_head, &optee_core_lsp, link); 2561 2562 /* 2563 * If SPMD think we are version 1.0 it will report version 1.0 to 2564 * normal world regardless of what version we query the SPM with. 2565 * However, if SPMD think we are version 1.1 it will forward 2566 * queries from normal world to let us negotiate version. So by 2567 * setting version 1.0 here we should be compatible. 2568 * 2569 * Note that disagreement on negotiated version means that we'll 2570 * have communication problems with normal world. 2571 */ 2572 my_rxtx.ffa_vers = FFA_VERSION_1_0; 2573 2574 return TEE_SUCCESS; 2575 } 2576 #else /* !defined(CFG_CORE_SEL1_SPMC) */ 2577 static void spmc_rxtx_map(struct ffa_rxtx *rxtx) 2578 { 2579 struct thread_smc_args args = { 2580 #ifdef ARM64 2581 .a0 = FFA_RXTX_MAP_64, 2582 #else 2583 .a0 = FFA_RXTX_MAP_32, 2584 #endif 2585 .a1 = virt_to_phys(rxtx->tx), 2586 .a2 = virt_to_phys(rxtx->rx), 2587 .a3 = 1, 2588 }; 2589 2590 thread_smccc(&args); 2591 if (!is_ffa_success(args.a0)) { 2592 if (args.a0 == FFA_ERROR) 2593 EMSG("rxtx map failed with error %ld", args.a2); 2594 else 2595 EMSG("rxtx map failed"); 2596 panic(); 2597 } 2598 } 2599 2600 static uint32_t get_ffa_version(uint32_t my_version) 2601 { 2602 struct thread_smc_args args = { 2603 .a0 = FFA_VERSION, 2604 .a1 = my_version, 2605 }; 2606 2607 thread_smccc(&args); 2608 if (args.a0 & BIT(31)) { 2609 EMSG("FF-A version failed with error %ld", args.a0); 2610 panic(); 2611 } 2612 2613 return args.a0; 2614 } 2615 2616 static void *spmc_retrieve_req(struct ffa_mem_transaction_x *trans) 2617 { 2618 uint64_t cookie __maybe_unused = trans->global_handle; 2619 struct ffa_mem_access *acc_descr_array = NULL; 2620 struct ffa_mem_access_perm *perm_descr = NULL; 2621 struct thread_smc_args args = { 2622 .a0 = FFA_MEM_RETRIEVE_REQ_32, 2623 .a3 = 0, /* Address, Using TX -> MBZ */ 2624 .a4 = 0, /* Using TX -> MBZ */ 2625 }; 2626 size_t size = 0; 2627 int rc = 0; 2628 2629 if (my_rxtx.ffa_vers == FFA_VERSION_1_0) { 2630 struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx; 2631 2632 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 2633 memset(trans_descr, 0, size); 2634 trans_descr->sender_id = trans->sender_id; 2635 trans_descr->mem_reg_attr = trans->mem_reg_attr; 2636 trans_descr->global_handle = trans->global_handle; 2637 trans_descr->tag = trans->tag; 2638 trans_descr->flags = trans->flags; 2639 trans_descr->mem_access_count = 1; 2640 acc_descr_array = trans_descr->mem_access_array; 2641 } else { 2642 struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx; 2643 2644 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 2645 memset(trans_descr, 0, size); 2646 trans_descr->sender_id = trans->sender_id; 2647 trans_descr->mem_reg_attr = trans->mem_reg_attr; 2648 trans_descr->global_handle = trans->global_handle; 2649 trans_descr->tag = trans->tag; 2650 trans_descr->flags = trans->flags; 2651 trans_descr->mem_access_count = 1; 2652 trans_descr->mem_access_offs = sizeof(*trans_descr); 2653 trans_descr->mem_access_size = sizeof(struct ffa_mem_access); 2654 acc_descr_array = (void *)((vaddr_t)my_rxtx.tx + 2655 sizeof(*trans_descr)); 2656 } 2657 acc_descr_array->region_offs = 0; 2658 acc_descr_array->reserved = 0; 2659 perm_descr = &acc_descr_array->access_perm; 2660 perm_descr->endpoint_id = optee_core_lsp.sp_id; 2661 perm_descr->perm = FFA_MEM_ACC_RW; 2662 perm_descr->flags = 0; 2663 2664 args.a1 = size; /* Total Length */ 2665 args.a2 = size; /* Frag Length == Total length */ 2666 thread_smccc(&args); 2667 if (args.a0 != FFA_MEM_RETRIEVE_RESP) { 2668 if (args.a0 == FFA_ERROR) 2669 EMSG("Failed to fetch cookie %#"PRIx64" error code %d", 2670 cookie, (int)args.a2); 2671 else 2672 EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64, 2673 cookie, args.a0); 2674 return NULL; 2675 } 2676 rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx, 2677 my_rxtx.size, trans); 2678 if (rc) { 2679 EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d", 2680 cookie, rc); 2681 return NULL; 2682 } 2683 2684 return my_rxtx.rx; 2685 } 2686 2687 void thread_spmc_relinquish(uint64_t cookie) 2688 { 2689 struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx; 2690 struct thread_smc_args args = { 2691 .a0 = FFA_MEM_RELINQUISH, 2692 }; 2693 2694 memset(relinquish_desc, 0, sizeof(*relinquish_desc)); 2695 relinquish_desc->handle = cookie; 2696 relinquish_desc->flags = 0; 2697 relinquish_desc->endpoint_count = 1; 2698 relinquish_desc->endpoint_id_array[0] = optee_core_lsp.sp_id; 2699 thread_smccc(&args); 2700 if (!is_ffa_success(args.a0)) 2701 EMSG("Failed to relinquish cookie %#"PRIx64, cookie); 2702 } 2703 2704 static int set_pages(struct ffa_address_range *regions, 2705 unsigned int num_regions, unsigned int num_pages, 2706 struct mobj_ffa *mf) 2707 { 2708 unsigned int n = 0; 2709 unsigned int idx = 0; 2710 2711 for (n = 0; n < num_regions; n++) { 2712 unsigned int page_count = READ_ONCE(regions[n].page_count); 2713 uint64_t addr = READ_ONCE(regions[n].address); 2714 2715 if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count)) 2716 return FFA_INVALID_PARAMETERS; 2717 } 2718 2719 if (idx != num_pages) 2720 return FFA_INVALID_PARAMETERS; 2721 2722 return 0; 2723 } 2724 2725 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie, 2726 enum mobj_use_case use_case) 2727 { 2728 struct mobj_ffa *ret = NULL; 2729 struct ffa_mem_transaction_x retrieve_desc = { .tag = use_case}; 2730 struct ffa_mem_access *descr_array = NULL; 2731 struct ffa_mem_region *descr = NULL; 2732 struct mobj_ffa *mf = NULL; 2733 unsigned int num_pages = 0; 2734 unsigned int offs = 0; 2735 void *buf = NULL; 2736 struct thread_smc_args ffa_rx_release_args = { 2737 .a0 = FFA_RX_RELEASE 2738 }; 2739 2740 if (use_case == MOBJ_USE_CASE_NS_SHM) 2741 retrieve_desc.flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE; 2742 else 2743 retrieve_desc.flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND; 2744 retrieve_desc.flags |= FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 2745 retrieve_desc.global_handle = cookie; 2746 retrieve_desc.sender_id = thread_get_tsd()->rpc_target_info; 2747 retrieve_desc.mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 2748 2749 /* 2750 * OP-TEE is only supporting a single mem_region while the 2751 * specification allows for more than one. 2752 */ 2753 buf = spmc_retrieve_req(&retrieve_desc); 2754 if (!buf) { 2755 EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64, 2756 cookie); 2757 return NULL; 2758 } 2759 2760 descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs); 2761 offs = READ_ONCE(descr_array->region_offs); 2762 descr = (struct ffa_mem_region *)((vaddr_t)buf + offs); 2763 2764 num_pages = READ_ONCE(descr->total_page_count); 2765 mf = mobj_ffa_spmc_new(cookie, num_pages, use_case); 2766 if (!mf) 2767 goto out; 2768 2769 if (set_pages(descr->address_range_array, 2770 READ_ONCE(descr->address_range_count), num_pages, mf)) { 2771 mobj_ffa_spmc_delete(mf); 2772 goto out; 2773 } 2774 2775 ret = mf; 2776 2777 out: 2778 /* Release RX buffer after the mem retrieve request. */ 2779 thread_smccc(&ffa_rx_release_args); 2780 2781 return ret; 2782 } 2783 2784 static uint32_t get_ffa_version_from_manifest(void *fdt) 2785 { 2786 int ret = 0; 2787 uint32_t vers = 0; 2788 2789 ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0"); 2790 if (ret < 0) { 2791 EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret); 2792 panic(); 2793 } 2794 2795 ret = fdt_read_uint32(fdt, 0, "ffa-version", &vers); 2796 if (ret < 0) { 2797 EMSG("Can't read \"ffa-version\" from FF-A manifest at %p: error %d", 2798 fdt, ret); 2799 panic(); 2800 } 2801 2802 return vers; 2803 } 2804 2805 static TEE_Result spmc_init(void) 2806 { 2807 uint32_t my_vers = 0; 2808 uint32_t vers = 0; 2809 2810 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 2811 virt_add_guest_spec_data(¬if_vm_bitmap_id, 2812 sizeof(struct notif_vm_bitmap), NULL)) 2813 panic("virt_add_guest_spec_data"); 2814 2815 my_vers = get_ffa_version_from_manifest(get_manifest_dt()); 2816 if (my_vers < FFA_VERSION_1_0 || my_vers > FFA_VERSION_1_2) { 2817 EMSG("Unsupported version %"PRIu32".%"PRIu32" from manifest", 2818 FFA_GET_MAJOR_VERSION(my_vers), 2819 FFA_GET_MINOR_VERSION(my_vers)); 2820 panic(); 2821 } 2822 vers = get_ffa_version(my_vers); 2823 DMSG("SPMC reported version %"PRIu32".%"PRIu32, 2824 FFA_GET_MAJOR_VERSION(vers), FFA_GET_MINOR_VERSION(vers)); 2825 if (FFA_GET_MAJOR_VERSION(vers) != FFA_GET_MAJOR_VERSION(my_vers)) { 2826 EMSG("Incompatible major version %"PRIu32", expected %"PRIu32"", 2827 FFA_GET_MAJOR_VERSION(vers), 2828 FFA_GET_MAJOR_VERSION(my_vers)); 2829 panic(); 2830 } 2831 if (vers < my_vers) 2832 my_vers = vers; 2833 DMSG("Using version %"PRIu32".%"PRIu32"", 2834 FFA_GET_MAJOR_VERSION(my_vers), FFA_GET_MINOR_VERSION(my_vers)); 2835 my_rxtx.ffa_vers = my_vers; 2836 2837 spmc_rxtx_map(&my_rxtx); 2838 2839 spmc_id = ffa_spm_id_get(); 2840 DMSG("SPMC ID %#"PRIx16, spmc_id); 2841 2842 optee_core_lsp.sp_id = ffa_id_get(); 2843 DMSG("OP-TEE endpoint ID %#"PRIx16, optee_core_lsp.sp_id); 2844 STAILQ_INSERT_HEAD(&lsp_head, &optee_core_lsp, link); 2845 2846 if (!ffa_features(FFA_NOTIFICATION_SET)) { 2847 spmc_notif_is_ready = true; 2848 DMSG("Asynchronous notifications are ready"); 2849 } 2850 2851 return TEE_SUCCESS; 2852 } 2853 #endif /* !defined(CFG_CORE_SEL1_SPMC) */ 2854 2855 nex_service_init(spmc_init); 2856