1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2025, Linaro Limited. 4 * Copyright (c) 2019-2024, Arm Limited. All rights reserved. 5 */ 6 7 #include <assert.h> 8 #include <ffa.h> 9 #include <initcall.h> 10 #include <io.h> 11 #include <kernel/dt.h> 12 #include <kernel/interrupt.h> 13 #include <kernel/notif.h> 14 #include <kernel/panic.h> 15 #include <kernel/secure_partition.h> 16 #include <kernel/spinlock.h> 17 #include <kernel/spmc_sp_handler.h> 18 #include <kernel/tee_misc.h> 19 #include <kernel/thread.h> 20 #include <kernel/thread_private.h> 21 #include <kernel/thread_spmc.h> 22 #include <kernel/virtualization.h> 23 #include <libfdt.h> 24 #include <mm/core_mmu.h> 25 #include <mm/mobj.h> 26 #include <optee_ffa.h> 27 #include <optee_msg.h> 28 #include <optee_rpc_cmd.h> 29 #include <sm/optee_smc.h> 30 #include <string.h> 31 #include <sys/queue.h> 32 #include <tee/entry_std.h> 33 #include <tee/uuid.h> 34 #include <tee_api_types.h> 35 #include <types_ext.h> 36 #include <util.h> 37 38 #if defined(CFG_CORE_SEL1_SPMC) 39 struct mem_op_state { 40 bool mem_share; 41 struct mobj_ffa *mf; 42 unsigned int page_count; 43 unsigned int region_count; 44 unsigned int current_page_idx; 45 }; 46 47 struct mem_frag_state { 48 struct mem_op_state op; 49 tee_mm_entry_t *mm; 50 unsigned int frag_offset; 51 SLIST_ENTRY(mem_frag_state) link; 52 }; 53 #endif 54 55 struct notif_vm_bitmap { 56 bool initialized; 57 int do_bottom_half_value; 58 uint64_t pending; 59 uint64_t bound; 60 }; 61 62 STAILQ_HEAD(spmc_lsp_desc_head, spmc_lsp_desc); 63 64 static struct spmc_lsp_desc_head lsp_head __nex_data = 65 STAILQ_HEAD_INITIALIZER(lsp_head); 66 67 static unsigned int spmc_notif_lock __nex_data = SPINLOCK_UNLOCK; 68 static bool spmc_notif_is_ready __nex_bss; 69 static int notif_intid __nex_data __maybe_unused = -1; 70 71 /* Id used to look up the guest specific struct notif_vm_bitmap */ 72 static unsigned int notif_vm_bitmap_id __nex_bss; 73 /* Notification state when ns-virtualization isn't enabled */ 74 static struct notif_vm_bitmap default_notif_vm_bitmap; 75 76 /* Initialized in spmc_init() below */ 77 static struct spmc_lsp_desc optee_core_lsp; 78 #ifdef CFG_CORE_SEL1_SPMC 79 /* 80 * Representation of the internal SPMC when OP-TEE is the S-EL1 SPMC. 81 * Initialized in spmc_init() below. 82 */ 83 static struct spmc_lsp_desc optee_spmc_lsp; 84 /* FF-A ID of the SPMD. This is only valid when OP-TEE is the S-EL1 SPMC. */ 85 static uint16_t spmd_id __nex_bss; 86 87 /* 88 * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized. 89 * 90 * struct ffa_rxtx::spin_lock protects the variables below from concurrent 91 * access this includes the use of content of struct ffa_rxtx::rx and 92 * @frag_state_head. 93 * 94 * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct 95 * ffa_rxtx::tx and false when it is owned by normal world. 96 * 97 * Note that we can't prevent normal world from updating the content of 98 * these buffers so we must always be careful when reading. while we hold 99 * the lock. 100 */ 101 102 static struct ffa_rxtx my_rxtx __nex_bss; 103 104 static bool is_nw_buf(struct ffa_rxtx *rxtx) 105 { 106 return rxtx == &my_rxtx; 107 } 108 109 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head = 110 SLIST_HEAD_INITIALIZER(&frag_state_head); 111 112 #else 113 /* FF-A ID of the external SPMC */ 114 static uint16_t spmc_id __nex_bss; 115 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss; 116 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss; 117 static struct ffa_rxtx my_rxtx __nex_data = { 118 .rx = __rx_buf, 119 .tx = __tx_buf, 120 .size = sizeof(__rx_buf), 121 }; 122 #endif 123 124 bool spmc_is_reserved_id(uint16_t id) 125 { 126 #ifdef CFG_CORE_SEL1_SPMC 127 return id == spmd_id; 128 #else 129 return id == spmc_id; 130 #endif 131 } 132 133 struct spmc_lsp_desc *spmc_find_lsp_by_sp_id(uint16_t sp_id) 134 { 135 struct spmc_lsp_desc *desc = NULL; 136 137 STAILQ_FOREACH(desc, &lsp_head, link) 138 if (desc->sp_id == sp_id) 139 return desc; 140 141 return NULL; 142 } 143 144 static uint32_t swap_src_dst(uint32_t src_dst) 145 { 146 return (src_dst >> 16) | (src_dst << 16); 147 } 148 149 static uint16_t get_sender_id(uint32_t src_dst) 150 { 151 return src_dst >> 16; 152 } 153 154 void spmc_set_args(struct thread_smc_1_2_regs *args, uint32_t fid, 155 uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4, 156 uint32_t w5) 157 { 158 *args = (struct thread_smc_1_2_regs){ 159 .a0 = fid, 160 .a1 = src_dst, 161 .a2 = w2, 162 .a3 = w3, 163 .a4 = w4, 164 .a5 = w5, 165 }; 166 } 167 168 static void set_simple_ret_val(struct thread_smc_1_2_regs *args, int ffa_ret) 169 { 170 if (ffa_ret) 171 spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0); 172 else 173 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0); 174 } 175 176 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx) 177 { 178 uint32_t major_vers = FFA_GET_MAJOR_VERSION(vers); 179 uint32_t minor_vers = FFA_GET_MINOR_VERSION(vers); 180 uint32_t my_vers = FFA_VERSION_1_2; 181 uint32_t my_major_vers = 0; 182 uint32_t my_minor_vers = 0; 183 184 my_major_vers = FFA_GET_MAJOR_VERSION(my_vers); 185 my_minor_vers = FFA_GET_MINOR_VERSION(my_vers); 186 187 /* 188 * No locking, if the caller does concurrent calls to this it's 189 * only making a mess for itself. We must be able to renegotiate 190 * the FF-A version in order to support differing versions between 191 * the loader and the driver. 192 * 193 * Callers should use the version requested if we return a matching 194 * major version and a matching or larger minor version. The caller 195 * should downgrade to our minor version if our minor version is 196 * smaller. Regardless, always return our version as recommended by 197 * the specification. 198 */ 199 if (major_vers == my_major_vers) { 200 if (minor_vers > my_minor_vers) 201 rxtx->ffa_vers = my_vers; 202 else 203 rxtx->ffa_vers = vers; 204 } 205 206 return my_vers; 207 } 208 209 static bool is_ffa_success(uint32_t fid) 210 { 211 #ifdef ARM64 212 if (fid == FFA_SUCCESS_64) 213 return true; 214 #endif 215 return fid == FFA_SUCCESS_32; 216 } 217 218 static int32_t get_ffa_ret_code(const struct thread_smc_args *args) 219 { 220 if (is_ffa_success(args->a0)) 221 return FFA_OK; 222 if (args->a0 == FFA_ERROR && args->a2) 223 return args->a2; 224 return FFA_NOT_SUPPORTED; 225 } 226 227 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2, 228 unsigned long a3, unsigned long a4) 229 { 230 struct thread_smc_args args = { 231 .a0 = fid, 232 .a1 = a1, 233 .a2 = a2, 234 .a3 = a3, 235 .a4 = a4, 236 }; 237 238 thread_smccc(&args); 239 240 return get_ffa_ret_code(&args); 241 } 242 243 static int __maybe_unused ffa_features(uint32_t id) 244 { 245 return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0); 246 } 247 248 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src, 249 uint32_t flags, uint64_t bitmap) 250 { 251 return ffa_simple_call(FFA_NOTIFICATION_SET, 252 SHIFT_U32(src, 16) | dst, flags, 253 low32_from_64(bitmap), high32_from_64(bitmap)); 254 } 255 256 #if defined(CFG_CORE_SEL1_SPMC) 257 static void handle_features(struct thread_smc_1_2_regs *args) 258 { 259 uint32_t ret_fid = FFA_ERROR; 260 uint32_t ret_w2 = FFA_NOT_SUPPORTED; 261 262 switch (args->a1) { 263 case FFA_FEATURE_SCHEDULE_RECV_INTR: 264 if (spmc_notif_is_ready) { 265 ret_fid = FFA_SUCCESS_32; 266 ret_w2 = notif_intid; 267 } 268 break; 269 270 #ifdef ARM64 271 case FFA_RXTX_MAP_64: 272 #endif 273 case FFA_RXTX_MAP_32: 274 ret_fid = FFA_SUCCESS_32; 275 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */ 276 break; 277 #ifdef ARM64 278 case FFA_MEM_SHARE_64: 279 #endif 280 case FFA_MEM_SHARE_32: 281 ret_fid = FFA_SUCCESS_32; 282 /* 283 * Partition manager supports transmission of a memory 284 * transaction descriptor in a buffer dynamically allocated 285 * by the endpoint. 286 */ 287 ret_w2 = BIT(0); 288 break; 289 290 case FFA_ERROR: 291 case FFA_VERSION: 292 case FFA_SUCCESS_32: 293 #ifdef ARM64 294 case FFA_SUCCESS_64: 295 #endif 296 case FFA_FEATURES: 297 case FFA_SPM_ID_GET: 298 case FFA_MEM_FRAG_TX: 299 case FFA_MEM_RECLAIM: 300 case FFA_MSG_SEND_DIRECT_REQ_64: 301 case FFA_MSG_SEND_DIRECT_REQ_32: 302 case FFA_INTERRUPT: 303 case FFA_PARTITION_INFO_GET: 304 case FFA_RXTX_UNMAP: 305 case FFA_RX_RELEASE: 306 case FFA_FEATURE_MANAGED_EXIT_INTR: 307 case FFA_NOTIFICATION_BITMAP_CREATE: 308 case FFA_NOTIFICATION_BITMAP_DESTROY: 309 case FFA_NOTIFICATION_BIND: 310 case FFA_NOTIFICATION_UNBIND: 311 case FFA_NOTIFICATION_SET: 312 case FFA_NOTIFICATION_GET: 313 case FFA_NOTIFICATION_INFO_GET_32: 314 #ifdef ARM64 315 case FFA_NOTIFICATION_INFO_GET_64: 316 #endif 317 ret_fid = FFA_SUCCESS_32; 318 ret_w2 = FFA_PARAM_MBZ; 319 break; 320 default: 321 break; 322 } 323 324 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ, 325 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 326 } 327 328 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret) 329 { 330 tee_mm_entry_t *mm = NULL; 331 332 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz)) 333 return FFA_INVALID_PARAMETERS; 334 335 mm = tee_mm_alloc(&core_virt_shm_pool, sz); 336 if (!mm) 337 return FFA_NO_MEMORY; 338 339 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa, 340 sz / SMALL_PAGE_SIZE, 341 MEM_AREA_NSEC_SHM)) { 342 tee_mm_free(mm); 343 return FFA_INVALID_PARAMETERS; 344 } 345 346 *va_ret = (void *)tee_mm_get_smem(mm); 347 return 0; 348 } 349 350 void spmc_handle_spm_id_get(struct thread_smc_1_2_regs *args) 351 { 352 spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, optee_spmc_lsp.sp_id, 353 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 354 } 355 356 static void unmap_buf(void *va, size_t sz) 357 { 358 tee_mm_entry_t *mm = tee_mm_find(&core_virt_shm_pool, (vaddr_t)va); 359 360 assert(mm); 361 core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE); 362 tee_mm_free(mm); 363 } 364 365 void spmc_handle_rxtx_map(struct thread_smc_1_2_regs *args, 366 struct ffa_rxtx *rxtx) 367 { 368 int rc = 0; 369 unsigned int sz = 0; 370 paddr_t rx_pa = 0; 371 paddr_t tx_pa = 0; 372 void *rx = NULL; 373 void *tx = NULL; 374 375 cpu_spin_lock(&rxtx->spinlock); 376 377 if (args->a3 & GENMASK_64(63, 6)) { 378 rc = FFA_INVALID_PARAMETERS; 379 goto out; 380 } 381 382 sz = args->a3 * SMALL_PAGE_SIZE; 383 if (!sz) { 384 rc = FFA_INVALID_PARAMETERS; 385 goto out; 386 } 387 /* TX/RX are swapped compared to the caller */ 388 tx_pa = args->a2; 389 rx_pa = args->a1; 390 391 if (rxtx->size) { 392 rc = FFA_DENIED; 393 goto out; 394 } 395 396 /* 397 * If the buffer comes from a SP the address is virtual and already 398 * mapped. 399 */ 400 if (is_nw_buf(rxtx)) { 401 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 402 enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM; 403 bool tx_alloced = false; 404 405 /* 406 * With virtualization we establish this mapping in 407 * the nexus mapping which then is replicated to 408 * each partition. 409 * 410 * This means that this mapping must be done before 411 * any partition is created and then must not be 412 * changed. 413 */ 414 415 /* 416 * core_mmu_add_mapping() may reuse previous 417 * mappings. First check if there's any mappings to 418 * reuse so we know how to clean up in case of 419 * failure. 420 */ 421 tx = phys_to_virt(tx_pa, mt, sz); 422 rx = phys_to_virt(rx_pa, mt, sz); 423 if (!tx) { 424 tx = core_mmu_add_mapping(mt, tx_pa, sz); 425 if (!tx) { 426 rc = FFA_NO_MEMORY; 427 goto out; 428 } 429 tx_alloced = true; 430 } 431 if (!rx) 432 rx = core_mmu_add_mapping(mt, rx_pa, sz); 433 434 if (!rx) { 435 if (tx_alloced && tx) 436 core_mmu_remove_mapping(mt, tx, sz); 437 rc = FFA_NO_MEMORY; 438 goto out; 439 } 440 } else { 441 rc = map_buf(tx_pa, sz, &tx); 442 if (rc) 443 goto out; 444 rc = map_buf(rx_pa, sz, &rx); 445 if (rc) { 446 unmap_buf(tx, sz); 447 goto out; 448 } 449 } 450 rxtx->tx = tx; 451 rxtx->rx = rx; 452 } else { 453 if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) { 454 rc = FFA_INVALID_PARAMETERS; 455 goto out; 456 } 457 458 if (!virt_to_phys((void *)tx_pa) || 459 !virt_to_phys((void *)rx_pa)) { 460 rc = FFA_INVALID_PARAMETERS; 461 goto out; 462 } 463 464 rxtx->tx = (void *)tx_pa; 465 rxtx->rx = (void *)rx_pa; 466 } 467 468 rxtx->size = sz; 469 rxtx->tx_is_mine = true; 470 DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx); 471 DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx); 472 out: 473 cpu_spin_unlock(&rxtx->spinlock); 474 set_simple_ret_val(args, rc); 475 } 476 477 void spmc_handle_rxtx_unmap(struct thread_smc_1_2_regs *args, 478 struct ffa_rxtx *rxtx) 479 { 480 int rc = FFA_INVALID_PARAMETERS; 481 482 cpu_spin_lock(&rxtx->spinlock); 483 484 if (!rxtx->size) 485 goto out; 486 487 /* 488 * We don't unmap the SP memory as the SP might still use it. 489 * We avoid to make changes to nexus mappings at this stage since 490 * there currently isn't a way to replicate those changes to all 491 * partitions. 492 */ 493 if (is_nw_buf(rxtx) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 494 unmap_buf(rxtx->rx, rxtx->size); 495 unmap_buf(rxtx->tx, rxtx->size); 496 } 497 rxtx->size = 0; 498 rxtx->rx = NULL; 499 rxtx->tx = NULL; 500 rc = 0; 501 out: 502 cpu_spin_unlock(&rxtx->spinlock); 503 set_simple_ret_val(args, rc); 504 } 505 506 void spmc_handle_rx_release(struct thread_smc_1_2_regs *args, 507 struct ffa_rxtx *rxtx) 508 { 509 int rc = 0; 510 511 cpu_spin_lock(&rxtx->spinlock); 512 /* The senders RX is our TX */ 513 if (!rxtx->size || rxtx->tx_is_mine) { 514 rc = FFA_DENIED; 515 } else { 516 rc = 0; 517 rxtx->tx_is_mine = true; 518 } 519 cpu_spin_unlock(&rxtx->spinlock); 520 521 set_simple_ret_val(args, rc); 522 } 523 524 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 525 { 526 return !w0 && !w1 && !w2 && !w3; 527 } 528 529 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen, 530 size_t idx, uint16_t endpoint_id, 531 uint16_t execution_context, 532 uint32_t part_props, 533 const uint32_t uuid_words[4]) 534 { 535 struct ffa_partition_info_x *fpi = NULL; 536 size_t fpi_size = sizeof(*fpi); 537 538 if (ffa_vers >= FFA_VERSION_1_1) 539 fpi_size += FFA_UUID_SIZE; 540 541 if ((idx + 1) * fpi_size > blen) 542 return TEE_ERROR_OUT_OF_MEMORY; 543 544 fpi = (void *)((vaddr_t)buf + idx * fpi_size); 545 fpi->id = endpoint_id; 546 /* Number of execution contexts implemented by this partition */ 547 fpi->execution_context = execution_context; 548 549 fpi->partition_properties = part_props; 550 551 /* In FF-A 1.0 only bits [2:0] are defined, let's mask others */ 552 if (ffa_vers < FFA_VERSION_1_1) 553 fpi->partition_properties &= FFA_PART_PROP_DIRECT_REQ_RECV | 554 FFA_PART_PROP_DIRECT_REQ_SEND | 555 FFA_PART_PROP_INDIRECT_MSGS; 556 557 if (ffa_vers >= FFA_VERSION_1_1) { 558 if (uuid_words) 559 memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE); 560 else 561 memset(fpi->uuid, 0, FFA_UUID_SIZE); 562 } 563 564 return TEE_SUCCESS; 565 } 566 567 static TEE_Result lsp_partition_info_get(uint32_t ffa_vers, void *buf, 568 size_t buf_size, size_t *elem_count, 569 const uint32_t uuid_words[4], 570 bool count_only) 571 { 572 struct spmc_lsp_desc *desc = NULL; 573 TEE_Result res = TEE_SUCCESS; 574 size_t c = *elem_count; 575 576 STAILQ_FOREACH(desc, &lsp_head, link) { 577 /* 578 * LSPs (OP-TEE SPMC) without an assigned UUID are not 579 * proper LSPs and shouldn't be reported here. 580 */ 581 if (is_nil_uuid(desc->uuid_words[0], desc->uuid_words[1], 582 desc->uuid_words[2], desc->uuid_words[3])) 583 continue; 584 585 if (uuid_words && memcmp(uuid_words, desc->uuid_words, 586 sizeof(desc->uuid_words))) 587 continue; 588 589 if (!count_only && !res) 590 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size, 591 c, desc->sp_id, 592 CFG_TEE_CORE_NB_CORE, 593 desc->properties, 594 desc->uuid_words); 595 c++; 596 } 597 598 *elem_count = c; 599 600 return res; 601 } 602 603 void spmc_handle_partition_info_get(struct thread_smc_1_2_regs *args, 604 struct ffa_rxtx *rxtx) 605 { 606 TEE_Result res = TEE_SUCCESS; 607 uint32_t ret_fid = FFA_ERROR; 608 uint32_t fpi_size = 0; 609 uint32_t rc = 0; 610 bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG; 611 uint32_t uuid_words[4] = { args->a1, args->a2, args->a3, args->a4, }; 612 uint32_t *uuid = uuid_words; 613 size_t count = 0; 614 615 if (!count_only) { 616 cpu_spin_lock(&rxtx->spinlock); 617 618 if (!rxtx->size || !rxtx->tx_is_mine) { 619 rc = FFA_BUSY; 620 goto out; 621 } 622 } 623 624 if (is_nil_uuid(uuid[0], uuid[1], uuid[2], uuid[3])) 625 uuid = NULL; 626 627 if (lsp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size, 628 &count, uuid, count_only)) { 629 ret_fid = FFA_ERROR; 630 rc = FFA_INVALID_PARAMETERS; 631 goto out; 632 } 633 if (IS_ENABLED(CFG_SECURE_PARTITION)) { 634 res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, 635 rxtx->size, uuid, &count, 636 count_only); 637 if (res != TEE_SUCCESS) { 638 ret_fid = FFA_ERROR; 639 rc = FFA_INVALID_PARAMETERS; 640 goto out; 641 } 642 } 643 644 rc = count; 645 ret_fid = FFA_SUCCESS_32; 646 out: 647 if (ret_fid == FFA_SUCCESS_32 && !count_only && 648 rxtx->ffa_vers >= FFA_VERSION_1_1) 649 fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE; 650 651 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size, 652 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 653 if (!count_only) { 654 rxtx->tx_is_mine = false; 655 cpu_spin_unlock(&rxtx->spinlock); 656 } 657 } 658 659 static void spmc_handle_run(struct thread_smc_1_2_regs *args) 660 { 661 uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1); 662 uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1); 663 uint32_t rc = FFA_INVALID_PARAMETERS; 664 665 /* 666 * OP-TEE core threads are only preemted using controlled exit so 667 * FFA_RUN mustn't be used to resume such threads. 668 * 669 * The OP-TEE SPMC is not preemted at all, it's an error to try to 670 * resume that ID. 671 */ 672 if (spmc_find_lsp_by_sp_id(endpoint)) 673 goto out; 674 675 /* 676 * The endpoint should be a S-EL0 SP, try to resume the SP from 677 * preempted into busy state. 678 */ 679 rc = spmc_sp_resume_from_preempted(endpoint); 680 if (rc) 681 goto out; 682 thread_resume_from_rpc(thread_id, 0, 0, 0, 0); 683 /* 684 * thread_resume_from_rpc() only returns if the thread_id 685 * is invalid. 686 */ 687 rc = FFA_INVALID_PARAMETERS; 688 689 out: 690 set_simple_ret_val(args, rc); 691 } 692 #endif /*CFG_CORE_SEL1_SPMC*/ 693 694 static struct notif_vm_bitmap *get_notif_vm_bitmap(struct guest_partition *prtn, 695 uint16_t vm_id) 696 { 697 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 698 if (!prtn) 699 return NULL; 700 assert(vm_id == virt_get_guest_id(prtn)); 701 return virt_get_guest_spec_data(prtn, notif_vm_bitmap_id); 702 } 703 if (vm_id) 704 return NULL; 705 return &default_notif_vm_bitmap; 706 } 707 708 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value, 709 uint16_t vm_id) 710 { 711 struct guest_partition *prtn = NULL; 712 struct notif_vm_bitmap *nvb = NULL; 713 uint32_t old_itr_status = 0; 714 uint32_t res = 0; 715 716 if (!spmc_notif_is_ready) { 717 /* 718 * This should never happen, not if normal world respects the 719 * exchanged capabilities. 720 */ 721 EMSG("Asynchronous notifications are not ready"); 722 return TEE_ERROR_NOT_IMPLEMENTED; 723 } 724 725 if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) { 726 EMSG("Invalid bottom half value %"PRIu32, bottom_half_value); 727 return TEE_ERROR_BAD_PARAMETERS; 728 } 729 730 prtn = virt_get_guest(vm_id); 731 nvb = get_notif_vm_bitmap(prtn, vm_id); 732 if (!nvb) { 733 res = TEE_ERROR_BAD_PARAMETERS; 734 goto out; 735 } 736 737 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 738 nvb->do_bottom_half_value = bottom_half_value; 739 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 740 741 notif_deliver_atomic_event(NOTIF_EVENT_STARTED, vm_id); 742 res = TEE_SUCCESS; 743 out: 744 virt_put_guest(prtn); 745 return res; 746 } 747 748 static uint32_t get_direct_resp_fid(uint32_t fid) 749 { 750 assert(fid == FFA_MSG_SEND_DIRECT_REQ_64 || 751 fid == FFA_MSG_SEND_DIRECT_REQ_32); 752 753 if (OPTEE_SMC_IS_64(fid)) 754 return FFA_MSG_SEND_DIRECT_RESP_64; 755 return FFA_MSG_SEND_DIRECT_RESP_32; 756 } 757 758 static void handle_yielding_call(struct thread_smc_1_2_regs *args) 759 { 760 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0); 761 TEE_Result res = TEE_SUCCESS; 762 763 thread_check_canaries(); 764 765 #ifdef ARM64 766 /* Saving this for an eventual RPC */ 767 thread_get_core_local()->direct_resp_fid = direct_resp_fid; 768 #endif 769 770 if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) { 771 /* Note connection to struct thread_rpc_arg::ret */ 772 thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6, 773 0); 774 res = TEE_ERROR_BAD_PARAMETERS; 775 } else { 776 thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5, 777 args->a6, args->a7); 778 res = TEE_ERROR_BUSY; 779 } 780 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 781 0, res, 0, 0); 782 } 783 784 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5) 785 { 786 uint64_t cookie = reg_pair_to_64(a5, a4); 787 uint32_t res = 0; 788 789 res = mobj_ffa_unregister_by_cookie(cookie); 790 switch (res) { 791 case TEE_SUCCESS: 792 case TEE_ERROR_ITEM_NOT_FOUND: 793 return 0; 794 case TEE_ERROR_BUSY: 795 EMSG("res %#"PRIx32, res); 796 return FFA_BUSY; 797 default: 798 EMSG("res %#"PRIx32, res); 799 return FFA_INVALID_PARAMETERS; 800 } 801 } 802 803 static void handle_blocking_call(struct thread_smc_1_2_regs *args) 804 { 805 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0); 806 uint32_t sec_caps = 0; 807 808 switch (args->a3) { 809 case OPTEE_FFA_GET_API_VERSION: 810 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 811 OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR, 812 0); 813 break; 814 case OPTEE_FFA_GET_OS_VERSION: 815 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 816 CFG_OPTEE_REVISION_MAJOR, 817 CFG_OPTEE_REVISION_MINOR, 818 TEE_IMPL_GIT_SHA1 >> 32); 819 break; 820 case OPTEE_FFA_EXCHANGE_CAPABILITIES: 821 sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET; 822 if (spmc_notif_is_ready) 823 sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF; 824 if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP)) 825 sec_caps |= OPTEE_FFA_SEC_CAP_RPMB_PROBE; 826 if (IS_ENABLED(CFG_CORE_DYN_PROTMEM)) 827 sec_caps |= OPTEE_FFA_SEC_CAP_PROTMEM; 828 spmc_set_args(args, direct_resp_fid, 829 swap_src_dst(args->a1), 0, 0, 830 THREAD_RPC_MAX_NUM_PARAMS, sec_caps); 831 break; 832 case OPTEE_FFA_UNREGISTER_SHM: 833 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 834 handle_unregister_shm(args->a4, args->a5), 0, 0); 835 break; 836 case OPTEE_FFA_ENABLE_ASYNC_NOTIF: 837 spmc_set_args(args, direct_resp_fid, 838 swap_src_dst(args->a1), 0, 839 spmc_enable_async_notif(args->a4, 840 FFA_SRC(args->a1)), 841 0, 0); 842 break; 843 #ifdef CFG_CORE_DYN_PROTMEM 844 case OPTEE_FFA_RELEASE_PROTMEM: 845 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 846 handle_unregister_shm(args->a4, args->a5), 0, 0); 847 break; 848 #endif 849 default: 850 EMSG("Unhandled blocking service ID %#"PRIx32, 851 (uint32_t)args->a3); 852 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 853 TEE_ERROR_BAD_PARAMETERS, 0, 0); 854 } 855 } 856 857 static void handle_framework_direct_request(struct thread_smc_1_2_regs *args) 858 { 859 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0); 860 uint32_t w0 = FFA_ERROR; 861 uint32_t w1 = FFA_PARAM_MBZ; 862 uint32_t w2 = FFA_NOT_SUPPORTED; 863 uint32_t w3 = FFA_PARAM_MBZ; 864 865 switch (args->a2 & FFA_MSG_TYPE_MASK) { 866 case FFA_MSG_SEND_VM_CREATED: 867 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 868 uint16_t guest_id = args->a5; 869 TEE_Result res = virt_guest_created(guest_id); 870 871 w0 = direct_resp_fid; 872 w1 = swap_src_dst(args->a1); 873 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED; 874 if (res == TEE_SUCCESS) 875 w3 = FFA_OK; 876 else if (res == TEE_ERROR_OUT_OF_MEMORY) 877 w3 = FFA_DENIED; 878 else 879 w3 = FFA_INVALID_PARAMETERS; 880 } 881 break; 882 case FFA_MSG_SEND_VM_DESTROYED: 883 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 884 uint16_t guest_id = args->a5; 885 TEE_Result res = virt_guest_destroyed(guest_id); 886 887 w0 = direct_resp_fid; 888 w1 = swap_src_dst(args->a1); 889 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED; 890 if (res == TEE_SUCCESS) 891 w3 = FFA_OK; 892 else 893 w3 = FFA_INVALID_PARAMETERS; 894 } 895 break; 896 case FFA_MSG_VERSION_REQ: 897 w0 = direct_resp_fid; 898 w1 = swap_src_dst(args->a1); 899 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP; 900 w3 = spmc_exchange_version(args->a3, &my_rxtx); 901 break; 902 default: 903 break; 904 } 905 spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 906 } 907 908 static void optee_lsp_handle_direct_request(struct thread_smc_1_2_regs *args) 909 { 910 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) { 911 handle_framework_direct_request(args); 912 return; 913 } 914 915 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 916 virt_set_guest(get_sender_id(args->a1))) { 917 spmc_set_args(args, get_direct_resp_fid(args->a0), 918 swap_src_dst(args->a1), 0, 919 TEE_ERROR_ITEM_NOT_FOUND, 0, 0); 920 return; 921 } 922 923 if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT)) 924 handle_yielding_call(args); 925 else 926 handle_blocking_call(args); 927 928 /* 929 * Note that handle_yielding_call() typically only returns if a 930 * thread cannot be allocated or found. virt_unset_guest() is also 931 * called from thread_state_suspend() and thread_state_free(). 932 */ 933 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 934 virt_unset_guest(); 935 } 936 937 static void __maybe_unused 938 optee_spmc_lsp_handle_direct_request(struct thread_smc_1_2_regs *args) 939 { 940 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) 941 handle_framework_direct_request(args); 942 else 943 set_simple_ret_val(args, FFA_INVALID_PARAMETERS); 944 } 945 946 static void handle_direct_request(struct thread_smc_1_2_regs *args) 947 { 948 struct spmc_lsp_desc *lsp = spmc_find_lsp_by_sp_id(FFA_DST(args->a1)); 949 950 if (lsp) { 951 lsp->direct_req(args); 952 } else { 953 spmc_sp_start_thread(args); 954 /* 955 * spmc_sp_start_thread() returns here if the SP ID is 956 * invalid. 957 */ 958 set_simple_ret_val(args, FFA_INVALID_PARAMETERS); 959 } 960 } 961 962 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen, 963 struct ffa_mem_transaction_x *trans) 964 { 965 uint16_t mem_reg_attr = 0; 966 uint32_t flags = 0; 967 uint32_t count = 0; 968 uint32_t offs = 0; 969 uint32_t size = 0; 970 size_t n = 0; 971 972 if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t)) 973 return FFA_INVALID_PARAMETERS; 974 975 if (ffa_vers >= FFA_VERSION_1_1) { 976 struct ffa_mem_transaction_1_1 *descr = NULL; 977 978 if (blen < sizeof(*descr)) 979 return FFA_INVALID_PARAMETERS; 980 981 descr = buf; 982 trans->sender_id = READ_ONCE(descr->sender_id); 983 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 984 flags = READ_ONCE(descr->flags); 985 trans->global_handle = READ_ONCE(descr->global_handle); 986 trans->tag = READ_ONCE(descr->tag); 987 988 count = READ_ONCE(descr->mem_access_count); 989 size = READ_ONCE(descr->mem_access_size); 990 offs = READ_ONCE(descr->mem_access_offs); 991 } else { 992 struct ffa_mem_transaction_1_0 *descr = NULL; 993 994 if (blen < sizeof(*descr)) 995 return FFA_INVALID_PARAMETERS; 996 997 descr = buf; 998 trans->sender_id = READ_ONCE(descr->sender_id); 999 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 1000 flags = READ_ONCE(descr->flags); 1001 trans->global_handle = READ_ONCE(descr->global_handle); 1002 trans->tag = READ_ONCE(descr->tag); 1003 1004 count = READ_ONCE(descr->mem_access_count); 1005 size = sizeof(struct ffa_mem_access); 1006 offs = offsetof(struct ffa_mem_transaction_1_0, 1007 mem_access_array); 1008 } 1009 1010 if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX || 1011 size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX) 1012 return FFA_INVALID_PARAMETERS; 1013 1014 /* Check that the endpoint memory access descriptor array fits */ 1015 if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) || 1016 n > blen) 1017 return FFA_INVALID_PARAMETERS; 1018 1019 trans->mem_reg_attr = mem_reg_attr; 1020 trans->flags = flags; 1021 trans->mem_access_size = size; 1022 trans->mem_access_count = count; 1023 trans->mem_access_offs = offs; 1024 return 0; 1025 } 1026 1027 #if defined(CFG_CORE_SEL1_SPMC) 1028 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size, 1029 unsigned int mem_access_count, uint8_t *acc_perms, 1030 unsigned int *region_offs) 1031 { 1032 struct ffa_mem_access_perm *descr = NULL; 1033 struct ffa_mem_access *mem_acc = NULL; 1034 unsigned int n = 0; 1035 1036 for (n = 0; n < mem_access_count; n++) { 1037 mem_acc = (void *)(mem_acc_base + mem_access_size * n); 1038 descr = &mem_acc->access_perm; 1039 if (READ_ONCE(descr->endpoint_id) == optee_core_lsp.sp_id) { 1040 *acc_perms = READ_ONCE(descr->perm); 1041 *region_offs = READ_ONCE(mem_acc[n].region_offs); 1042 return 0; 1043 } 1044 } 1045 1046 return FFA_INVALID_PARAMETERS; 1047 } 1048 1049 static int mem_op_init(bool mem_share, struct ffa_mem_transaction_x *mem_trans, 1050 void *buf, size_t blen, unsigned int *page_count, 1051 unsigned int *region_count, size_t *addr_range_offs) 1052 { 1053 const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW; 1054 struct ffa_mem_region *region_descr = NULL; 1055 unsigned int region_descr_offs = 0; 1056 uint16_t exp_mem_reg_attr = 0; 1057 uint8_t mem_acc_perm = 0; 1058 size_t n = 0; 1059 1060 if (mem_share) 1061 exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 1062 if (mem_trans->mem_reg_attr != exp_mem_reg_attr) 1063 return FFA_INVALID_PARAMETERS; 1064 1065 /* Check that the access permissions matches what's expected */ 1066 if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs, 1067 mem_trans->mem_access_size, 1068 mem_trans->mem_access_count, 1069 &mem_acc_perm, ®ion_descr_offs) || 1070 mem_acc_perm != exp_mem_acc_perm) 1071 return FFA_INVALID_PARAMETERS; 1072 1073 /* Check that the Composite memory region descriptor fits */ 1074 if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) || 1075 n > blen) 1076 return FFA_INVALID_PARAMETERS; 1077 1078 if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs, 1079 struct ffa_mem_region)) 1080 return FFA_INVALID_PARAMETERS; 1081 1082 region_descr = (struct ffa_mem_region *)((vaddr_t)buf + 1083 region_descr_offs); 1084 *page_count = READ_ONCE(region_descr->total_page_count); 1085 *region_count = READ_ONCE(region_descr->address_range_count); 1086 *addr_range_offs = n; 1087 return 0; 1088 } 1089 1090 static int add_mem_op_helper(struct mem_op_state *s, void *buf, size_t flen) 1091 { 1092 unsigned int region_count = flen / sizeof(struct ffa_address_range); 1093 struct ffa_address_range *arange = NULL; 1094 unsigned int n = 0; 1095 1096 if (region_count > s->region_count) 1097 region_count = s->region_count; 1098 1099 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range)) 1100 return FFA_INVALID_PARAMETERS; 1101 arange = buf; 1102 1103 for (n = 0; n < region_count; n++) { 1104 unsigned int page_count = READ_ONCE(arange[n].page_count); 1105 uint64_t addr = READ_ONCE(arange[n].address); 1106 1107 if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx, 1108 addr, page_count)) 1109 return FFA_INVALID_PARAMETERS; 1110 } 1111 1112 s->region_count -= region_count; 1113 if (s->region_count) 1114 return region_count * sizeof(*arange); 1115 1116 if (s->current_page_idx != s->page_count) 1117 return FFA_INVALID_PARAMETERS; 1118 1119 return 0; 1120 } 1121 1122 static int add_mem_op_frag(struct mem_frag_state *s, void *buf, size_t flen) 1123 { 1124 int rc = 0; 1125 1126 rc = add_mem_op_helper(&s->op, buf, flen); 1127 if (rc >= 0) { 1128 if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) { 1129 /* We're not at the end of the descriptor yet */ 1130 if (s->op.region_count) 1131 return s->frag_offset; 1132 1133 /* We're done */ 1134 rc = 0; 1135 } else { 1136 rc = FFA_INVALID_PARAMETERS; 1137 } 1138 } 1139 1140 SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link); 1141 if (rc < 0) { 1142 mobj_ffa_sel1_spmc_delete(s->op.mf); 1143 } else { 1144 if (mobj_ffa_push_to_inactive(s->op.mf)) { 1145 rc = FFA_INVALID_PARAMETERS; 1146 mobj_ffa_sel1_spmc_delete(s->op.mf); 1147 } 1148 } 1149 free(s); 1150 1151 return rc; 1152 } 1153 1154 static bool is_sp_op(struct ffa_mem_transaction_x *mem_trans, void *buf) 1155 { 1156 struct ffa_mem_access_perm *perm = NULL; 1157 struct ffa_mem_access *mem_acc = NULL; 1158 1159 if (!IS_ENABLED(CFG_SECURE_PARTITION)) 1160 return false; 1161 1162 if (mem_trans->mem_access_count < 1) 1163 return false; 1164 1165 mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs); 1166 perm = &mem_acc->access_perm; 1167 1168 /* 1169 * perm->endpoint_id is read here only to check if the endpoint is 1170 * OP-TEE. We do read it later on again, but there are some additional 1171 * checks there to make sure that the data is correct. 1172 */ 1173 return READ_ONCE(perm->endpoint_id) != optee_core_lsp.sp_id; 1174 } 1175 1176 static int add_mem_op(bool mem_share, struct ffa_mem_transaction_x *mem_trans, 1177 tee_mm_entry_t *mm, void *buf, size_t blen, size_t flen, 1178 uint64_t *global_handle) 1179 { 1180 int rc = 0; 1181 struct mem_op_state op = { .mem_share = mem_share, }; 1182 size_t addr_range_offs = 0; 1183 uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 1184 enum mobj_use_case use_case = MOBJ_USE_CASE_NS_SHM; 1185 size_t n = 0; 1186 1187 rc = mem_op_init(mem_share, mem_trans, buf, flen, &op.page_count, 1188 &op.region_count, &addr_range_offs); 1189 if (rc) 1190 return rc; 1191 1192 if (!op.page_count || !op.region_count) 1193 return FFA_INVALID_PARAMETERS; 1194 1195 if (MUL_OVERFLOW(op.region_count, 1196 sizeof(struct ffa_address_range), &n) || 1197 ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen) 1198 return FFA_INVALID_PARAMETERS; 1199 1200 if (mem_trans->global_handle) 1201 cookie = mem_trans->global_handle; 1202 if (!mem_share) 1203 use_case = mem_trans->tag; 1204 op.mf = mobj_ffa_sel1_spmc_new(cookie, op.page_count, use_case); 1205 if (!op.mf) 1206 return FFA_NO_MEMORY; 1207 1208 if (flen != blen) { 1209 struct mem_frag_state *s = calloc(1, sizeof(*s)); 1210 1211 if (!s) { 1212 rc = FFA_NO_MEMORY; 1213 goto err; 1214 } 1215 s->op = op; 1216 s->mm = mm; 1217 s->frag_offset = addr_range_offs; 1218 1219 SLIST_INSERT_HEAD(&frag_state_head, s, link); 1220 rc = add_mem_op_frag(s, (char *)buf + addr_range_offs, 1221 flen - addr_range_offs); 1222 1223 if (rc >= 0) 1224 *global_handle = mobj_ffa_get_cookie(op.mf); 1225 1226 return rc; 1227 } 1228 1229 rc = add_mem_op_helper(&op, (char *)buf + addr_range_offs, 1230 flen - addr_range_offs); 1231 if (rc) { 1232 /* 1233 * Number of consumed bytes may be returned instead of 0 for 1234 * done. 1235 */ 1236 rc = FFA_INVALID_PARAMETERS; 1237 goto err; 1238 } 1239 1240 if (mobj_ffa_push_to_inactive(op.mf)) { 1241 rc = FFA_INVALID_PARAMETERS; 1242 goto err; 1243 } 1244 *global_handle = mobj_ffa_get_cookie(op.mf); 1245 1246 return 0; 1247 err: 1248 mobj_ffa_sel1_spmc_delete(op.mf); 1249 return rc; 1250 } 1251 1252 static int handle_mem_op_tmem(bool share_mem, paddr_t pbuf, size_t blen, 1253 size_t flen, unsigned int page_count, 1254 uint64_t *global_handle, struct ffa_rxtx *rxtx) 1255 { 1256 struct ffa_mem_transaction_x mem_trans = { }; 1257 int rc = 0; 1258 size_t len = 0; 1259 void *buf = NULL; 1260 tee_mm_entry_t *mm = NULL; 1261 vaddr_t offs = pbuf & SMALL_PAGE_MASK; 1262 1263 if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len)) 1264 return FFA_INVALID_PARAMETERS; 1265 if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len)) 1266 return FFA_INVALID_PARAMETERS; 1267 1268 /* 1269 * Check that the length reported in flen is covered by len even 1270 * if the offset is taken into account. 1271 */ 1272 if (len < flen || len - offs < flen) 1273 return FFA_INVALID_PARAMETERS; 1274 1275 mm = tee_mm_alloc(&core_virt_shm_pool, len); 1276 if (!mm) 1277 return FFA_NO_MEMORY; 1278 1279 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf, 1280 page_count, MEM_AREA_NSEC_SHM)) { 1281 rc = FFA_INVALID_PARAMETERS; 1282 goto out; 1283 } 1284 buf = (void *)(tee_mm_get_smem(mm) + offs); 1285 1286 cpu_spin_lock(&rxtx->spinlock); 1287 rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans); 1288 if (rc) 1289 goto unlock; 1290 1291 if (is_sp_op(&mem_trans, buf)) { 1292 if (!share_mem) { 1293 rc = FFA_DENIED; 1294 goto unlock; 1295 } 1296 rc = spmc_sp_add_share(&mem_trans, buf, blen, flen, 1297 global_handle, NULL); 1298 goto unlock; 1299 } 1300 1301 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1302 virt_set_guest(mem_trans.sender_id)) { 1303 rc = FFA_DENIED; 1304 goto unlock; 1305 } 1306 1307 rc = add_mem_op(share_mem, &mem_trans, mm, buf, blen, flen, 1308 global_handle); 1309 1310 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1311 virt_unset_guest(); 1312 1313 unlock: 1314 cpu_spin_unlock(&rxtx->spinlock); 1315 if (rc > 0) 1316 return rc; 1317 1318 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1319 out: 1320 tee_mm_free(mm); 1321 return rc; 1322 } 1323 1324 static int handle_mem_op_rxbuf(bool share_mem, size_t blen, size_t flen, 1325 uint64_t *global_handle, struct ffa_rxtx *rxtx) 1326 { 1327 struct ffa_mem_transaction_x mem_trans = { }; 1328 int rc = FFA_DENIED; 1329 1330 cpu_spin_lock(&rxtx->spinlock); 1331 1332 if (!rxtx->rx || flen > rxtx->size) 1333 goto out; 1334 1335 rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen, 1336 &mem_trans); 1337 if (rc) 1338 goto out; 1339 if (is_sp_op(&mem_trans, rxtx->rx)) { 1340 if (!share_mem) { 1341 rc = FFA_DENIED; 1342 goto out; 1343 } 1344 rc = spmc_sp_add_share(&mem_trans, rxtx, blen, flen, 1345 global_handle, NULL); 1346 goto out; 1347 } 1348 1349 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1350 virt_set_guest(mem_trans.sender_id)) 1351 goto out; 1352 1353 rc = add_mem_op(share_mem, &mem_trans, NULL, rxtx->rx, blen, flen, 1354 global_handle); 1355 1356 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1357 virt_unset_guest(); 1358 1359 out: 1360 cpu_spin_unlock(&rxtx->spinlock); 1361 1362 return rc; 1363 } 1364 1365 static void handle_mem_op(struct thread_smc_1_2_regs *args, 1366 struct ffa_rxtx *rxtx) 1367 { 1368 uint32_t tot_len = args->a1; 1369 uint32_t frag_len = args->a2; 1370 uint64_t addr = args->a3; 1371 uint32_t page_count = args->a4; 1372 uint32_t ret_w1 = 0; 1373 uint32_t ret_w2 = FFA_INVALID_PARAMETERS; 1374 uint32_t ret_w3 = 0; 1375 uint32_t ret_fid = FFA_ERROR; 1376 uint64_t global_handle = 0; 1377 bool share_mem = false; 1378 int rc = 0; 1379 1380 /* Check that the MBZs are indeed 0 */ 1381 if (args->a5 || args->a6 || args->a7) 1382 goto out; 1383 1384 /* Check that fragment length doesn't exceed total length */ 1385 if (frag_len > tot_len) 1386 goto out; 1387 1388 /* Check for 32-bit calling convention */ 1389 if (!OPTEE_SMC_IS_64(args->a0)) 1390 addr &= UINT32_MAX; 1391 1392 if (args->a0 == FFA_MEM_SHARE_32 || args->a0 == FFA_MEM_SHARE_64) 1393 share_mem = true; 1394 else 1395 share_mem = false; 1396 1397 if (!addr) { 1398 /* 1399 * The memory transaction descriptor is passed via our rx 1400 * buffer. 1401 */ 1402 if (page_count) 1403 goto out; 1404 rc = handle_mem_op_rxbuf(share_mem, tot_len, frag_len, 1405 &global_handle, rxtx); 1406 } else { 1407 rc = handle_mem_op_tmem(share_mem, addr, tot_len, frag_len, 1408 page_count, &global_handle, rxtx); 1409 } 1410 if (rc < 0) { 1411 ret_w2 = rc; 1412 } else if (rc > 0) { 1413 ret_fid = FFA_MEM_FRAG_RX; 1414 ret_w3 = rc; 1415 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1416 } else { 1417 ret_fid = FFA_SUCCESS_32; 1418 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1419 } 1420 out: 1421 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1422 } 1423 1424 static struct mem_frag_state *get_frag_state(uint64_t global_handle) 1425 { 1426 struct mem_frag_state *s = NULL; 1427 1428 SLIST_FOREACH(s, &frag_state_head, link) 1429 if (mobj_ffa_get_cookie(s->op.mf) == global_handle) 1430 return s; 1431 1432 return NULL; 1433 } 1434 1435 static void handle_mem_frag_tx(struct thread_smc_1_2_regs *args, 1436 struct ffa_rxtx *rxtx) 1437 { 1438 uint64_t global_handle = reg_pair_to_64(args->a2, args->a1); 1439 size_t flen = args->a3; 1440 uint32_t endpoint_id = args->a4; 1441 struct mem_frag_state *s = NULL; 1442 tee_mm_entry_t *mm = NULL; 1443 unsigned int page_count = 0; 1444 void *buf = NULL; 1445 uint32_t ret_w1 = 0; 1446 uint32_t ret_w2 = 0; 1447 uint32_t ret_w3 = 0; 1448 uint32_t ret_fid = 0; 1449 int rc = 0; 1450 1451 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1452 uint16_t guest_id = endpoint_id >> 16; 1453 1454 if (!guest_id || virt_set_guest(guest_id)) { 1455 rc = FFA_INVALID_PARAMETERS; 1456 goto out_set_rc; 1457 } 1458 } 1459 1460 /* 1461 * Currently we're only doing this for fragmented FFA_MEM_SHARE_* 1462 * requests. 1463 */ 1464 1465 cpu_spin_lock(&rxtx->spinlock); 1466 1467 s = get_frag_state(global_handle); 1468 if (!s) { 1469 rc = FFA_INVALID_PARAMETERS; 1470 goto out; 1471 } 1472 1473 mm = s->mm; 1474 if (mm) { 1475 if (flen > tee_mm_get_bytes(mm)) { 1476 rc = FFA_INVALID_PARAMETERS; 1477 goto out; 1478 } 1479 page_count = s->op.page_count; 1480 buf = (void *)tee_mm_get_smem(mm); 1481 } else { 1482 if (flen > rxtx->size) { 1483 rc = FFA_INVALID_PARAMETERS; 1484 goto out; 1485 } 1486 buf = rxtx->rx; 1487 } 1488 1489 rc = add_mem_op_frag(s, buf, flen); 1490 out: 1491 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1492 virt_unset_guest(); 1493 1494 cpu_spin_unlock(&rxtx->spinlock); 1495 1496 if (rc <= 0 && mm) { 1497 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1498 tee_mm_free(mm); 1499 } 1500 1501 out_set_rc: 1502 if (rc < 0) { 1503 ret_fid = FFA_ERROR; 1504 ret_w2 = rc; 1505 } else if (rc > 0) { 1506 ret_fid = FFA_MEM_FRAG_RX; 1507 ret_w3 = rc; 1508 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1509 } else { 1510 ret_fid = FFA_SUCCESS_32; 1511 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1512 } 1513 1514 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1515 } 1516 1517 static void handle_mem_reclaim(struct thread_smc_1_2_regs *args) 1518 { 1519 int rc = FFA_INVALID_PARAMETERS; 1520 uint64_t cookie = 0; 1521 1522 if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7) 1523 goto out; 1524 1525 cookie = reg_pair_to_64(args->a2, args->a1); 1526 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1527 uint16_t guest_id = 0; 1528 1529 if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) { 1530 guest_id = virt_find_guest_by_cookie(cookie); 1531 } else { 1532 guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) & 1533 FFA_MEMORY_HANDLE_PRTN_MASK; 1534 } 1535 if (!guest_id) 1536 goto out; 1537 if (virt_set_guest(guest_id)) { 1538 if (!virt_reclaim_cookie_from_destroyed_guest(guest_id, 1539 cookie)) 1540 rc = FFA_OK; 1541 goto out; 1542 } 1543 } 1544 1545 switch (mobj_ffa_sel1_spmc_reclaim(cookie)) { 1546 case TEE_SUCCESS: 1547 rc = FFA_OK; 1548 break; 1549 case TEE_ERROR_ITEM_NOT_FOUND: 1550 DMSG("cookie %#"PRIx64" not found", cookie); 1551 rc = FFA_INVALID_PARAMETERS; 1552 break; 1553 default: 1554 DMSG("cookie %#"PRIx64" busy", cookie); 1555 rc = FFA_DENIED; 1556 break; 1557 } 1558 1559 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1560 virt_unset_guest(); 1561 1562 out: 1563 set_simple_ret_val(args, rc); 1564 } 1565 1566 static void handle_notification_bitmap_create(struct thread_smc_1_2_regs *args) 1567 { 1568 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1569 uint32_t ret_fid = FFA_ERROR; 1570 uint32_t old_itr_status = 0; 1571 1572 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 && 1573 !args->a5 && !args->a6 && !args->a7) { 1574 struct guest_partition *prtn = NULL; 1575 struct notif_vm_bitmap *nvb = NULL; 1576 uint16_t vm_id = args->a1; 1577 1578 prtn = virt_get_guest(vm_id); 1579 nvb = get_notif_vm_bitmap(prtn, vm_id); 1580 if (!nvb) { 1581 ret_val = FFA_INVALID_PARAMETERS; 1582 goto out_virt_put; 1583 } 1584 1585 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1586 1587 if (nvb->initialized) { 1588 ret_val = FFA_DENIED; 1589 goto out_unlock; 1590 } 1591 1592 nvb->initialized = true; 1593 nvb->do_bottom_half_value = -1; 1594 ret_val = FFA_OK; 1595 ret_fid = FFA_SUCCESS_32; 1596 out_unlock: 1597 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1598 out_virt_put: 1599 virt_put_guest(prtn); 1600 } 1601 1602 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1603 } 1604 1605 static void handle_notification_bitmap_destroy(struct thread_smc_1_2_regs *args) 1606 { 1607 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1608 uint32_t ret_fid = FFA_ERROR; 1609 uint32_t old_itr_status = 0; 1610 1611 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 && 1612 !args->a5 && !args->a6 && !args->a7) { 1613 struct guest_partition *prtn = NULL; 1614 struct notif_vm_bitmap *nvb = NULL; 1615 uint16_t vm_id = args->a1; 1616 1617 prtn = virt_get_guest(vm_id); 1618 nvb = get_notif_vm_bitmap(prtn, vm_id); 1619 if (!nvb) { 1620 ret_val = FFA_INVALID_PARAMETERS; 1621 goto out_virt_put; 1622 } 1623 1624 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1625 1626 if (nvb->pending || nvb->bound) { 1627 ret_val = FFA_DENIED; 1628 goto out_unlock; 1629 } 1630 1631 memset(nvb, 0, sizeof(*nvb)); 1632 ret_val = FFA_OK; 1633 ret_fid = FFA_SUCCESS_32; 1634 out_unlock: 1635 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1636 out_virt_put: 1637 virt_put_guest(prtn); 1638 } 1639 1640 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1641 } 1642 1643 static void handle_notification_bind(struct thread_smc_1_2_regs *args) 1644 { 1645 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1646 struct guest_partition *prtn = NULL; 1647 struct notif_vm_bitmap *nvb = NULL; 1648 uint32_t ret_fid = FFA_ERROR; 1649 uint32_t old_itr_status = 0; 1650 uint64_t bitmap = 0; 1651 uint16_t vm_id = 0; 1652 1653 if (args->a5 || args->a6 || args->a7) 1654 goto out; 1655 if (args->a2) { 1656 /* We only deal with global notifications */ 1657 ret_val = FFA_DENIED; 1658 goto out; 1659 } 1660 1661 /* The destination of the eventual notification */ 1662 vm_id = FFA_DST(args->a1); 1663 bitmap = reg_pair_to_64(args->a4, args->a3); 1664 1665 prtn = virt_get_guest(vm_id); 1666 nvb = get_notif_vm_bitmap(prtn, vm_id); 1667 if (!nvb) { 1668 ret_val = FFA_INVALID_PARAMETERS; 1669 goto out_virt_put; 1670 } 1671 1672 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1673 1674 if ((bitmap & nvb->bound)) { 1675 ret_val = FFA_DENIED; 1676 } else { 1677 nvb->bound |= bitmap; 1678 ret_val = FFA_OK; 1679 ret_fid = FFA_SUCCESS_32; 1680 } 1681 1682 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1683 out_virt_put: 1684 virt_put_guest(prtn); 1685 out: 1686 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1687 } 1688 1689 static void handle_notification_unbind(struct thread_smc_1_2_regs *args) 1690 { 1691 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1692 struct guest_partition *prtn = NULL; 1693 struct notif_vm_bitmap *nvb = NULL; 1694 uint32_t ret_fid = FFA_ERROR; 1695 uint32_t old_itr_status = 0; 1696 uint64_t bitmap = 0; 1697 uint16_t vm_id = 0; 1698 1699 if (args->a2 || args->a5 || args->a6 || args->a7) 1700 goto out; 1701 1702 /* The destination of the eventual notification */ 1703 vm_id = FFA_DST(args->a1); 1704 bitmap = reg_pair_to_64(args->a4, args->a3); 1705 1706 prtn = virt_get_guest(vm_id); 1707 nvb = get_notif_vm_bitmap(prtn, vm_id); 1708 if (!nvb) { 1709 ret_val = FFA_INVALID_PARAMETERS; 1710 goto out_virt_put; 1711 } 1712 1713 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1714 1715 if (bitmap & nvb->pending) { 1716 ret_val = FFA_DENIED; 1717 } else { 1718 nvb->bound &= ~bitmap; 1719 ret_val = FFA_OK; 1720 ret_fid = FFA_SUCCESS_32; 1721 } 1722 1723 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1724 out_virt_put: 1725 virt_put_guest(prtn); 1726 out: 1727 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1728 } 1729 1730 static void handle_notification_get(struct thread_smc_1_2_regs *args) 1731 { 1732 uint32_t w2 = FFA_INVALID_PARAMETERS; 1733 struct guest_partition *prtn = NULL; 1734 struct notif_vm_bitmap *nvb = NULL; 1735 uint32_t ret_fid = FFA_ERROR; 1736 uint32_t old_itr_status = 0; 1737 uint16_t vm_id = 0; 1738 uint32_t w3 = 0; 1739 1740 if (args->a5 || args->a6 || args->a7) 1741 goto out; 1742 if (!(args->a2 & 0x1)) { 1743 ret_fid = FFA_SUCCESS_32; 1744 w2 = 0; 1745 goto out; 1746 } 1747 vm_id = FFA_DST(args->a1); 1748 1749 prtn = virt_get_guest(vm_id); 1750 nvb = get_notif_vm_bitmap(prtn, vm_id); 1751 if (!nvb) 1752 goto out_virt_put; 1753 1754 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1755 1756 reg_pair_from_64(nvb->pending, &w3, &w2); 1757 nvb->pending = 0; 1758 ret_fid = FFA_SUCCESS_32; 1759 1760 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1761 out_virt_put: 1762 virt_put_guest(prtn); 1763 out: 1764 spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0); 1765 } 1766 1767 struct notif_info_get_state { 1768 struct thread_smc_1_2_regs *args; 1769 unsigned int ids_per_reg; 1770 unsigned int ids_count; 1771 unsigned int id_pos; 1772 unsigned int count; 1773 unsigned int max_list_count; 1774 unsigned int list_count; 1775 }; 1776 1777 static bool add_id_in_regs(struct notif_info_get_state *state, 1778 uint16_t id) 1779 { 1780 unsigned int reg_idx = state->id_pos / state->ids_per_reg + 3; 1781 unsigned int reg_shift = (state->id_pos % state->ids_per_reg) * 16; 1782 1783 if (reg_idx > 7) 1784 return false; 1785 1786 state->args->a[reg_idx] &= ~SHIFT_U64(0xffff, reg_shift); 1787 state->args->a[reg_idx] |= (unsigned long)id << reg_shift; 1788 1789 state->id_pos++; 1790 state->count++; 1791 return true; 1792 } 1793 1794 static bool add_id_count(struct notif_info_get_state *state) 1795 { 1796 assert(state->list_count < state->max_list_count && 1797 state->count >= 1 && state->count <= 4); 1798 1799 state->ids_count |= (state->count - 1) << (state->list_count * 2 + 12); 1800 state->list_count++; 1801 state->count = 0; 1802 1803 return state->list_count < state->max_list_count; 1804 } 1805 1806 static bool add_nvb_to_state(struct notif_info_get_state *state, 1807 uint16_t guest_id, struct notif_vm_bitmap *nvb) 1808 { 1809 if (!nvb->pending) 1810 return true; 1811 /* 1812 * Add only the guest_id, meaning a global notification for this 1813 * guest. 1814 * 1815 * If notifications for one or more specific vCPUs we'd add those 1816 * before calling add_id_count(), but that's not supported. 1817 */ 1818 return add_id_in_regs(state, guest_id) && add_id_count(state); 1819 } 1820 1821 static void handle_notification_info_get(struct thread_smc_1_2_regs *args) 1822 { 1823 struct notif_info_get_state state = { .args = args }; 1824 uint32_t ffa_res = FFA_INVALID_PARAMETERS; 1825 struct guest_partition *prtn = NULL; 1826 struct notif_vm_bitmap *nvb = NULL; 1827 uint32_t more_pending_flag = 0; 1828 uint32_t itr_state = 0; 1829 uint16_t guest_id = 0; 1830 1831 if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 || 1832 args->a6 || args->a7) 1833 goto err; 1834 1835 if (OPTEE_SMC_IS_64(args->a0)) { 1836 spmc_set_args(args, FFA_SUCCESS_64, 0, 0, 0, 0, 0); 1837 state.ids_per_reg = 4; 1838 state.max_list_count = 31; 1839 } else { 1840 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0); 1841 state.ids_per_reg = 2; 1842 state.max_list_count = 15; 1843 } 1844 1845 while (true) { 1846 /* 1847 * With NS-Virtualization we need to go through all 1848 * partitions to collect the notification bitmaps, without 1849 * we just check the only notification bitmap we have. 1850 */ 1851 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1852 prtn = virt_next_guest(prtn); 1853 if (!prtn) 1854 break; 1855 guest_id = virt_get_guest_id(prtn); 1856 } 1857 nvb = get_notif_vm_bitmap(prtn, guest_id); 1858 1859 itr_state = cpu_spin_lock_xsave(&spmc_notif_lock); 1860 if (!add_nvb_to_state(&state, guest_id, nvb)) 1861 more_pending_flag = BIT(0); 1862 cpu_spin_unlock_xrestore(&spmc_notif_lock, itr_state); 1863 1864 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || more_pending_flag) 1865 break; 1866 } 1867 virt_put_guest(prtn); 1868 1869 if (!state.id_pos) { 1870 ffa_res = FFA_NO_DATA; 1871 goto err; 1872 } 1873 args->a2 = (state.list_count << FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT) | 1874 (state.ids_count << FFA_NOTIF_INFO_GET_ID_LIST_SHIFT) | 1875 more_pending_flag; 1876 return; 1877 err: 1878 spmc_set_args(args, FFA_ERROR, 0, ffa_res, 0, 0, 0); 1879 } 1880 1881 void thread_spmc_set_async_notif_intid(int intid) 1882 { 1883 assert(interrupt_can_raise_sgi(interrupt_get_main_chip())); 1884 notif_intid = intid; 1885 spmc_notif_is_ready = true; 1886 DMSG("Asynchronous notifications are ready"); 1887 } 1888 1889 void notif_send_async(uint32_t value, uint16_t guest_id) 1890 { 1891 struct guest_partition *prtn = NULL; 1892 struct notif_vm_bitmap *nvb = NULL; 1893 uint32_t old_itr_status = 0; 1894 1895 prtn = virt_get_guest(guest_id); 1896 nvb = get_notif_vm_bitmap(prtn, guest_id); 1897 1898 if (nvb) { 1899 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1900 assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && 1901 spmc_notif_is_ready && nvb->do_bottom_half_value >= 0 && 1902 notif_intid >= 0); 1903 nvb->pending |= BIT64(nvb->do_bottom_half_value); 1904 interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid, 1905 ITR_CPU_MASK_TO_THIS_CPU); 1906 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1907 } 1908 1909 virt_put_guest(prtn); 1910 } 1911 #else 1912 void notif_send_async(uint32_t value, uint16_t guest_id) 1913 { 1914 struct guest_partition *prtn = NULL; 1915 struct notif_vm_bitmap *nvb = NULL; 1916 /* global notification, delay notification interrupt */ 1917 uint32_t flags = BIT32(1); 1918 int res = 0; 1919 1920 prtn = virt_get_guest(guest_id); 1921 nvb = get_notif_vm_bitmap(prtn, guest_id); 1922 1923 if (nvb) { 1924 assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && 1925 spmc_notif_is_ready && nvb->do_bottom_half_value >= 0); 1926 res = ffa_set_notification(guest_id, optee_core_lsp.sp_id, 1927 flags, 1928 BIT64(nvb->do_bottom_half_value)); 1929 if (res) { 1930 EMSG("notification set failed with error %d", res); 1931 panic(); 1932 } 1933 } 1934 1935 virt_put_guest(prtn); 1936 } 1937 #endif 1938 1939 /* Only called from assembly */ 1940 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args); 1941 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args) 1942 { 1943 assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL); 1944 switch (args->a0) { 1945 #if defined(CFG_CORE_SEL1_SPMC) 1946 case FFA_FEATURES: 1947 handle_features(args); 1948 break; 1949 case FFA_SPM_ID_GET: 1950 spmc_handle_spm_id_get(args); 1951 break; 1952 #ifdef ARM64 1953 case FFA_RXTX_MAP_64: 1954 #endif 1955 case FFA_RXTX_MAP_32: 1956 spmc_handle_rxtx_map(args, &my_rxtx); 1957 break; 1958 case FFA_RXTX_UNMAP: 1959 spmc_handle_rxtx_unmap(args, &my_rxtx); 1960 break; 1961 case FFA_RX_RELEASE: 1962 spmc_handle_rx_release(args, &my_rxtx); 1963 break; 1964 case FFA_PARTITION_INFO_GET: 1965 spmc_handle_partition_info_get(args, &my_rxtx); 1966 break; 1967 case FFA_RUN: 1968 spmc_handle_run(args); 1969 break; 1970 #endif /*CFG_CORE_SEL1_SPMC*/ 1971 case FFA_INTERRUPT: 1972 if (IS_ENABLED(CFG_CORE_SEL1_SPMC)) 1973 spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0, 1974 0, 0); 1975 else 1976 spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0); 1977 break; 1978 #ifdef ARM64 1979 case FFA_MSG_SEND_DIRECT_REQ_64: 1980 #endif 1981 case FFA_MSG_SEND_DIRECT_REQ_32: 1982 handle_direct_request(args); 1983 break; 1984 #if defined(CFG_CORE_SEL1_SPMC) 1985 #ifdef ARM64 1986 case FFA_MEM_SHARE_64: 1987 #endif 1988 case FFA_MEM_SHARE_32: 1989 #ifdef ARM64 1990 case FFA_MEM_LEND_64: 1991 #endif 1992 case FFA_MEM_LEND_32: 1993 handle_mem_op(args, &my_rxtx); 1994 break; 1995 case FFA_MEM_RECLAIM: 1996 if (!IS_ENABLED(CFG_SECURE_PARTITION) || 1997 !ffa_mem_reclaim(args, NULL)) 1998 handle_mem_reclaim(args); 1999 break; 2000 case FFA_MEM_FRAG_TX: 2001 handle_mem_frag_tx(args, &my_rxtx); 2002 break; 2003 case FFA_NOTIFICATION_BITMAP_CREATE: 2004 handle_notification_bitmap_create(args); 2005 break; 2006 case FFA_NOTIFICATION_BITMAP_DESTROY: 2007 handle_notification_bitmap_destroy(args); 2008 break; 2009 case FFA_NOTIFICATION_BIND: 2010 handle_notification_bind(args); 2011 break; 2012 case FFA_NOTIFICATION_UNBIND: 2013 handle_notification_unbind(args); 2014 break; 2015 case FFA_NOTIFICATION_GET: 2016 handle_notification_get(args); 2017 break; 2018 #ifdef ARM64 2019 case FFA_NOTIFICATION_INFO_GET_64: 2020 #endif 2021 case FFA_NOTIFICATION_INFO_GET_32: 2022 handle_notification_info_get(args); 2023 break; 2024 #endif /*CFG_CORE_SEL1_SPMC*/ 2025 case FFA_ERROR: 2026 EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2); 2027 if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) { 2028 /* 2029 * The SPMC will return an FFA_ERROR back so better 2030 * panic() now than flooding the log. 2031 */ 2032 panic("FFA_ERROR from SPMC is fatal"); 2033 } 2034 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED, 2035 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 2036 break; 2037 default: 2038 EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0); 2039 set_simple_ret_val(args, FFA_NOT_SUPPORTED); 2040 } 2041 } 2042 2043 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset) 2044 { 2045 size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 2046 struct thread_ctx *thr = threads + thread_get_id(); 2047 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 2048 struct optee_msg_arg *arg = NULL; 2049 struct mobj *mobj = NULL; 2050 uint32_t num_params = 0; 2051 size_t sz = 0; 2052 2053 mobj = mobj_ffa_get_by_cookie(cookie, 0); 2054 if (!mobj) { 2055 EMSG("Can't find cookie %#"PRIx64, cookie); 2056 return TEE_ERROR_BAD_PARAMETERS; 2057 } 2058 2059 res = mobj_inc_map(mobj); 2060 if (res) 2061 goto out_put_mobj; 2062 2063 res = TEE_ERROR_BAD_PARAMETERS; 2064 arg = mobj_get_va(mobj, offset, sizeof(*arg)); 2065 if (!arg) 2066 goto out_dec_map; 2067 2068 num_params = READ_ONCE(arg->num_params); 2069 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS) 2070 goto out_dec_map; 2071 2072 sz = OPTEE_MSG_GET_ARG_SIZE(num_params); 2073 2074 thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc); 2075 if (!thr->rpc_arg) 2076 goto out_dec_map; 2077 2078 virt_on_stdcall(); 2079 res = tee_entry_std(arg, num_params); 2080 2081 thread_rpc_shm_cache_clear(&thr->shm_cache); 2082 thr->rpc_arg = NULL; 2083 2084 out_dec_map: 2085 mobj_dec_map(mobj); 2086 out_put_mobj: 2087 mobj_put(mobj); 2088 return res; 2089 } 2090 2091 /* 2092 * Helper routine for the assembly function thread_std_smc_entry() 2093 * 2094 * Note: this function is weak just to make link_dummies_paged.c happy. 2095 */ 2096 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1, 2097 uint32_t a2, uint32_t a3, 2098 uint32_t a4, uint32_t a5 __unused) 2099 { 2100 /* 2101 * Arguments are supplied from handle_yielding_call() as: 2102 * a0 <- w1 2103 * a1 <- w3 2104 * a2 <- w4 2105 * a3 <- w5 2106 * a4 <- w6 2107 * a5 <- w7 2108 */ 2109 thread_get_tsd()->rpc_target_info = swap_src_dst(a0); 2110 if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG) 2111 return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4); 2112 return FFA_DENIED; 2113 } 2114 2115 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm) 2116 { 2117 uint64_t offs = tpm->u.memref.offs; 2118 2119 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN + 2120 OPTEE_MSG_ATTR_TYPE_FMEM_INPUT; 2121 2122 param->u.fmem.offs_low = offs; 2123 param->u.fmem.offs_high = offs >> 32; 2124 if (param->u.fmem.offs_high != offs >> 32) 2125 return false; 2126 2127 param->u.fmem.size = tpm->u.memref.size; 2128 if (tpm->u.memref.mobj) { 2129 uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj); 2130 2131 /* If a mobj is passed it better be one with a valid cookie. */ 2132 if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) 2133 return false; 2134 param->u.fmem.global_id = cookie; 2135 } else { 2136 param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 2137 } 2138 2139 return true; 2140 } 2141 2142 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params, 2143 struct thread_param *params, 2144 struct optee_msg_arg **arg_ret) 2145 { 2146 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 2147 struct thread_ctx *thr = threads + thread_get_id(); 2148 struct optee_msg_arg *arg = thr->rpc_arg; 2149 2150 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 2151 return TEE_ERROR_BAD_PARAMETERS; 2152 2153 if (!arg) { 2154 EMSG("rpc_arg not set"); 2155 return TEE_ERROR_GENERIC; 2156 } 2157 2158 memset(arg, 0, sz); 2159 arg->cmd = cmd; 2160 arg->num_params = num_params; 2161 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 2162 2163 for (size_t n = 0; n < num_params; n++) { 2164 switch (params[n].attr) { 2165 case THREAD_PARAM_ATTR_NONE: 2166 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE; 2167 break; 2168 case THREAD_PARAM_ATTR_VALUE_IN: 2169 case THREAD_PARAM_ATTR_VALUE_OUT: 2170 case THREAD_PARAM_ATTR_VALUE_INOUT: 2171 arg->params[n].attr = params[n].attr - 2172 THREAD_PARAM_ATTR_VALUE_IN + 2173 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 2174 arg->params[n].u.value.a = params[n].u.value.a; 2175 arg->params[n].u.value.b = params[n].u.value.b; 2176 arg->params[n].u.value.c = params[n].u.value.c; 2177 break; 2178 case THREAD_PARAM_ATTR_MEMREF_IN: 2179 case THREAD_PARAM_ATTR_MEMREF_OUT: 2180 case THREAD_PARAM_ATTR_MEMREF_INOUT: 2181 if (!set_fmem(arg->params + n, params + n)) 2182 return TEE_ERROR_BAD_PARAMETERS; 2183 break; 2184 default: 2185 return TEE_ERROR_BAD_PARAMETERS; 2186 } 2187 } 2188 2189 if (arg_ret) 2190 *arg_ret = arg; 2191 2192 return TEE_SUCCESS; 2193 } 2194 2195 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params, 2196 struct thread_param *params) 2197 { 2198 for (size_t n = 0; n < num_params; n++) { 2199 switch (params[n].attr) { 2200 case THREAD_PARAM_ATTR_VALUE_OUT: 2201 case THREAD_PARAM_ATTR_VALUE_INOUT: 2202 params[n].u.value.a = arg->params[n].u.value.a; 2203 params[n].u.value.b = arg->params[n].u.value.b; 2204 params[n].u.value.c = arg->params[n].u.value.c; 2205 break; 2206 case THREAD_PARAM_ATTR_MEMREF_OUT: 2207 case THREAD_PARAM_ATTR_MEMREF_INOUT: 2208 params[n].u.memref.size = arg->params[n].u.fmem.size; 2209 break; 2210 default: 2211 break; 2212 } 2213 } 2214 2215 return arg->ret; 2216 } 2217 2218 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 2219 struct thread_param *params) 2220 { 2221 struct thread_rpc_arg rpc_arg = { .call = { 2222 .w1 = thread_get_tsd()->rpc_target_info, 2223 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 2224 }, 2225 }; 2226 struct optee_msg_arg *arg = NULL; 2227 uint32_t ret = 0; 2228 2229 ret = get_rpc_arg(cmd, num_params, params, &arg); 2230 if (ret) 2231 return ret; 2232 2233 thread_rpc(&rpc_arg); 2234 2235 return get_rpc_arg_res(arg, num_params, params); 2236 } 2237 2238 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 2239 { 2240 struct thread_rpc_arg rpc_arg = { .call = { 2241 .w1 = thread_get_tsd()->rpc_target_info, 2242 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 2243 }, 2244 }; 2245 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0); 2246 uint32_t res2 = 0; 2247 uint32_t res = 0; 2248 2249 DMSG("freeing cookie %#"PRIx64, cookie); 2250 2251 res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m, NULL); 2252 2253 mobj_put(mobj); 2254 res2 = mobj_ffa_unregister_by_cookie(cookie); 2255 if (res2) 2256 DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32, 2257 cookie, res2); 2258 if (!res) 2259 thread_rpc(&rpc_arg); 2260 } 2261 2262 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt) 2263 { 2264 struct thread_rpc_arg rpc_arg = { .call = { 2265 .w1 = thread_get_tsd()->rpc_target_info, 2266 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 2267 }, 2268 }; 2269 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align); 2270 struct optee_msg_arg *arg = NULL; 2271 unsigned int internal_offset = 0; 2272 struct mobj *mobj = NULL; 2273 uint64_t cookie = 0; 2274 2275 if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m, &arg)) 2276 return NULL; 2277 2278 thread_rpc(&rpc_arg); 2279 2280 if (arg->num_params != 1 || 2281 arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT) 2282 return NULL; 2283 2284 internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs); 2285 cookie = READ_ONCE(arg->params->u.fmem.global_id); 2286 mobj = mobj_ffa_get_by_cookie(cookie, internal_offset); 2287 if (!mobj) { 2288 DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed", 2289 cookie, internal_offset); 2290 return NULL; 2291 } 2292 2293 assert(mobj_is_nonsec(mobj)); 2294 2295 if (mobj->size < size) { 2296 DMSG("Mobj %#"PRIx64": wrong size", cookie); 2297 mobj_put(mobj); 2298 return NULL; 2299 } 2300 2301 if (mobj_inc_map(mobj)) { 2302 DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie); 2303 mobj_put(mobj); 2304 return NULL; 2305 } 2306 2307 return mobj; 2308 } 2309 2310 struct mobj *thread_rpc_alloc_payload(size_t size) 2311 { 2312 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL); 2313 } 2314 2315 struct mobj *thread_rpc_alloc_kernel_payload(size_t size) 2316 { 2317 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL); 2318 } 2319 2320 void thread_rpc_free_kernel_payload(struct mobj *mobj) 2321 { 2322 if (mobj) 2323 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, 2324 mobj_get_cookie(mobj), mobj); 2325 } 2326 2327 void thread_rpc_free_payload(struct mobj *mobj) 2328 { 2329 if (mobj) 2330 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj), 2331 mobj); 2332 } 2333 2334 struct mobj *thread_rpc_alloc_global_payload(size_t size) 2335 { 2336 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL); 2337 } 2338 2339 void thread_rpc_free_global_payload(struct mobj *mobj) 2340 { 2341 if (mobj) 2342 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, 2343 mobj_get_cookie(mobj), mobj); 2344 } 2345 2346 void thread_spmc_register_secondary_ep(vaddr_t ep) 2347 { 2348 unsigned long ret = 0; 2349 2350 /* Let the SPM know the entry point for secondary CPUs */ 2351 ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0); 2352 2353 if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64) 2354 EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret); 2355 } 2356 2357 static uint16_t ffa_id_get(void) 2358 { 2359 /* 2360 * Ask the SPM component running at a higher EL to return our FF-A ID. 2361 * This can either be the SPMC ID (if the SPMC is enabled in OP-TEE) or 2362 * the partition ID (if not). 2363 */ 2364 struct thread_smc_args args = { 2365 .a0 = FFA_ID_GET, 2366 }; 2367 2368 thread_smccc(&args); 2369 if (!is_ffa_success(args.a0)) { 2370 if (args.a0 == FFA_ERROR) 2371 EMSG("Get id failed with error %ld", args.a2); 2372 else 2373 EMSG("Get id failed"); 2374 panic(); 2375 } 2376 2377 return args.a2; 2378 } 2379 2380 static uint16_t ffa_spm_id_get(void) 2381 { 2382 /* 2383 * Ask the SPM component running at a higher EL to return its ID. 2384 * If OP-TEE implements the S-EL1 SPMC, this will get the SPMD ID. 2385 * If not, the ID of the SPMC will be returned. 2386 */ 2387 struct thread_smc_args args = { 2388 .a0 = FFA_SPM_ID_GET, 2389 }; 2390 2391 thread_smccc(&args); 2392 if (!is_ffa_success(args.a0)) { 2393 if (args.a0 == FFA_ERROR) 2394 EMSG("Get spm id failed with error %ld", args.a2); 2395 else 2396 EMSG("Get spm id failed"); 2397 panic(); 2398 } 2399 2400 return args.a2; 2401 } 2402 2403 #ifdef CFG_CORE_DYN_PROTMEM 2404 TEE_Result thread_spmc_get_protmem_config(enum mobj_use_case use_case, 2405 void *buf, size_t *buf_sz, 2406 size_t *min_mem_sz, 2407 size_t *min_mem_align) 2408 { 2409 TEE_Result res = TEE_SUCCESS; 2410 struct ffa_mem_access_perm mem_acc_list[] = { 2411 { 2412 .endpoint_id = optee_core_lsp.sp_id, 2413 .perm = FFA_MEM_ACC_RW, 2414 }, 2415 }; 2416 2417 res = plat_get_protmem_config(use_case, min_mem_sz, min_mem_align); 2418 if (res) 2419 return res; 2420 2421 if (!buf || *buf_sz < sizeof(mem_acc_list)) { 2422 *buf_sz = sizeof(mem_acc_list); 2423 return TEE_ERROR_SHORT_BUFFER; 2424 } 2425 2426 memcpy(buf, mem_acc_list, sizeof(mem_acc_list)); 2427 *buf_sz = sizeof(mem_acc_list); 2428 2429 return TEE_SUCCESS; 2430 } 2431 #endif /*CFG_CORE_DYN_PROTMEM*/ 2432 2433 static TEE_Result check_desc(struct spmc_lsp_desc *d) 2434 { 2435 uint32_t accept_props = FFA_PART_PROP_DIRECT_REQ_RECV | 2436 FFA_PART_PROP_DIRECT_REQ_SEND | 2437 FFA_PART_PROP_NOTIF_CREATED | 2438 FFA_PART_PROP_NOTIF_DESTROYED | 2439 FFA_PART_PROP_AARCH64_STATE; 2440 uint32_t id = d->sp_id; 2441 2442 if (id && (spmc_is_reserved_id(id) || spmc_find_lsp_by_sp_id(id) || 2443 id < FFA_SWD_ID_MIN || id > FFA_SWD_ID_MAX)) { 2444 EMSG("Conflicting SP id for SP \"%s\" id %#"PRIx32, 2445 d->name, id); 2446 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2447 panic(); 2448 return TEE_ERROR_BAD_FORMAT; 2449 } 2450 2451 if (d->properties & ~accept_props) { 2452 EMSG("Unexpected properties in %#"PRIx32" for LSP \"%s\" %#"PRIx16, 2453 d->properties, d->name, d->sp_id); 2454 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2455 panic(); 2456 d->properties &= accept_props; 2457 } 2458 2459 if (!d->direct_req) { 2460 EMSG("Missing direct request callback for LSP \"%s\" %#"PRIx16, 2461 d->name, d->sp_id); 2462 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2463 panic(); 2464 return TEE_ERROR_BAD_FORMAT; 2465 } 2466 2467 if (!d->uuid_words[0] && !d->uuid_words[1] && 2468 !d->uuid_words[2] && !d->uuid_words[3]) { 2469 EMSG("Found NULL UUID for LSP \"%s\" %#"PRIx16, 2470 d->name, d->sp_id); 2471 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 2472 panic(); 2473 return TEE_ERROR_BAD_FORMAT; 2474 } 2475 2476 return TEE_SUCCESS; 2477 } 2478 2479 static uint16_t find_unused_sp_id(void) 2480 { 2481 uint32_t id = FFA_SWD_ID_MIN; 2482 2483 while (spmc_is_reserved_id(id) || spmc_find_lsp_by_sp_id(id)) { 2484 id++; 2485 assert(id <= FFA_SWD_ID_MAX); 2486 } 2487 2488 return id; 2489 } 2490 2491 TEE_Result spmc_register_lsp(struct spmc_lsp_desc *desc) 2492 { 2493 TEE_Result res = TEE_SUCCESS; 2494 2495 res = check_desc(desc); 2496 if (res) 2497 return res; 2498 2499 if (STAILQ_EMPTY(&lsp_head)) { 2500 DMSG("Cannot add Logical SP \"%s\": LSP framework not initialized yet", 2501 desc->name); 2502 return TEE_ERROR_ITEM_NOT_FOUND; 2503 } 2504 2505 if (!desc->sp_id) 2506 desc->sp_id = find_unused_sp_id(); 2507 2508 DMSG("Adding Logical SP \"%s\" with id %#"PRIx16, 2509 desc->name, desc->sp_id); 2510 2511 STAILQ_INSERT_TAIL(&lsp_head, desc, link); 2512 2513 return TEE_SUCCESS; 2514 } 2515 2516 static struct spmc_lsp_desc optee_core_lsp __nex_data = { 2517 .name = "OP-TEE", 2518 .direct_req = optee_lsp_handle_direct_request, 2519 .properties = FFA_PART_PROP_DIRECT_REQ_RECV | 2520 FFA_PART_PROP_DIRECT_REQ_SEND | 2521 #ifdef CFG_NS_VIRTUALIZATION 2522 FFA_PART_PROP_NOTIF_CREATED | 2523 FFA_PART_PROP_NOTIF_DESTROYED | 2524 #endif 2525 FFA_PART_PROP_AARCH64_STATE | 2526 FFA_PART_PROP_IS_PE_ID, 2527 /* 2528 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1 2529 * SP, or 2530 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a 2531 * logical partition, residing in the same exception level as the 2532 * SPMC 2533 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 2534 */ 2535 .uuid_words = { 0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5, }, 2536 }; 2537 2538 #if defined(CFG_CORE_SEL1_SPMC) 2539 static struct spmc_lsp_desc optee_spmc_lsp __nex_data = { 2540 .name = "OP-TEE SPMC", 2541 .direct_req = optee_spmc_lsp_handle_direct_request, 2542 }; 2543 2544 static TEE_Result spmc_init(void) 2545 { 2546 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 2547 virt_add_guest_spec_data(¬if_vm_bitmap_id, 2548 sizeof(struct notif_vm_bitmap), NULL)) 2549 panic("virt_add_guest_spec_data"); 2550 spmd_id = ffa_spm_id_get(); 2551 DMSG("SPMD ID %#"PRIx16, spmd_id); 2552 2553 optee_spmc_lsp.sp_id = ffa_id_get(); 2554 DMSG("SPMC ID %#"PRIx16, optee_spmc_lsp.sp_id); 2555 STAILQ_INSERT_HEAD(&lsp_head, &optee_spmc_lsp, link); 2556 2557 optee_core_lsp.sp_id = find_unused_sp_id(); 2558 DMSG("OP-TEE endpoint ID %#"PRIx16, optee_core_lsp.sp_id); 2559 STAILQ_INSERT_HEAD(&lsp_head, &optee_core_lsp, link); 2560 2561 /* 2562 * If SPMD think we are version 1.0 it will report version 1.0 to 2563 * normal world regardless of what version we query the SPM with. 2564 * However, if SPMD think we are version 1.1 it will forward 2565 * queries from normal world to let us negotiate version. So by 2566 * setting version 1.0 here we should be compatible. 2567 * 2568 * Note that disagreement on negotiated version means that we'll 2569 * have communication problems with normal world. 2570 */ 2571 my_rxtx.ffa_vers = FFA_VERSION_1_0; 2572 2573 return TEE_SUCCESS; 2574 } 2575 #else /* !defined(CFG_CORE_SEL1_SPMC) */ 2576 static void spmc_rxtx_map(struct ffa_rxtx *rxtx) 2577 { 2578 struct thread_smc_args args = { 2579 #ifdef ARM64 2580 .a0 = FFA_RXTX_MAP_64, 2581 #else 2582 .a0 = FFA_RXTX_MAP_32, 2583 #endif 2584 .a1 = virt_to_phys(rxtx->tx), 2585 .a2 = virt_to_phys(rxtx->rx), 2586 .a3 = 1, 2587 }; 2588 2589 thread_smccc(&args); 2590 if (!is_ffa_success(args.a0)) { 2591 if (args.a0 == FFA_ERROR) 2592 EMSG("rxtx map failed with error %ld", args.a2); 2593 else 2594 EMSG("rxtx map failed"); 2595 panic(); 2596 } 2597 } 2598 2599 static uint32_t get_ffa_version(uint32_t my_version) 2600 { 2601 struct thread_smc_args args = { 2602 .a0 = FFA_VERSION, 2603 .a1 = my_version, 2604 }; 2605 2606 thread_smccc(&args); 2607 if (args.a0 & BIT(31)) { 2608 EMSG("FF-A version failed with error %ld", args.a0); 2609 panic(); 2610 } 2611 2612 return args.a0; 2613 } 2614 2615 static void *spmc_retrieve_req(struct ffa_mem_transaction_x *trans) 2616 { 2617 uint64_t cookie __maybe_unused = trans->global_handle; 2618 struct ffa_mem_access *acc_descr_array = NULL; 2619 struct ffa_mem_access_perm *perm_descr = NULL; 2620 struct thread_smc_args args = { 2621 .a0 = FFA_MEM_RETRIEVE_REQ_32, 2622 .a3 = 0, /* Address, Using TX -> MBZ */ 2623 .a4 = 0, /* Using TX -> MBZ */ 2624 }; 2625 size_t size = 0; 2626 int rc = 0; 2627 2628 if (my_rxtx.ffa_vers == FFA_VERSION_1_0) { 2629 struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx; 2630 2631 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 2632 memset(trans_descr, 0, size); 2633 trans_descr->sender_id = trans->sender_id; 2634 trans_descr->mem_reg_attr = trans->mem_reg_attr; 2635 trans_descr->global_handle = trans->global_handle; 2636 trans_descr->tag = trans->tag; 2637 trans_descr->flags = trans->flags; 2638 trans_descr->mem_access_count = 1; 2639 acc_descr_array = trans_descr->mem_access_array; 2640 } else { 2641 struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx; 2642 2643 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 2644 memset(trans_descr, 0, size); 2645 trans_descr->sender_id = trans->sender_id; 2646 trans_descr->mem_reg_attr = trans->mem_reg_attr; 2647 trans_descr->global_handle = trans->global_handle; 2648 trans_descr->tag = trans->tag; 2649 trans_descr->flags = trans->flags; 2650 trans_descr->mem_access_count = 1; 2651 trans_descr->mem_access_offs = sizeof(*trans_descr); 2652 trans_descr->mem_access_size = sizeof(struct ffa_mem_access); 2653 acc_descr_array = (void *)((vaddr_t)my_rxtx.tx + 2654 sizeof(*trans_descr)); 2655 } 2656 acc_descr_array->region_offs = 0; 2657 acc_descr_array->reserved = 0; 2658 perm_descr = &acc_descr_array->access_perm; 2659 perm_descr->endpoint_id = optee_core_lsp.sp_id; 2660 perm_descr->perm = FFA_MEM_ACC_RW; 2661 perm_descr->flags = 0; 2662 2663 args.a1 = size; /* Total Length */ 2664 args.a2 = size; /* Frag Length == Total length */ 2665 thread_smccc(&args); 2666 if (args.a0 != FFA_MEM_RETRIEVE_RESP) { 2667 if (args.a0 == FFA_ERROR) 2668 EMSG("Failed to fetch cookie %#"PRIx64" error code %d", 2669 cookie, (int)args.a2); 2670 else 2671 EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64, 2672 cookie, args.a0); 2673 return NULL; 2674 } 2675 rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx, 2676 my_rxtx.size, trans); 2677 if (rc) { 2678 EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d", 2679 cookie, rc); 2680 return NULL; 2681 } 2682 2683 return my_rxtx.rx; 2684 } 2685 2686 void thread_spmc_relinquish(uint64_t cookie) 2687 { 2688 struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx; 2689 struct thread_smc_args args = { 2690 .a0 = FFA_MEM_RELINQUISH, 2691 }; 2692 2693 memset(relinquish_desc, 0, sizeof(*relinquish_desc)); 2694 relinquish_desc->handle = cookie; 2695 relinquish_desc->flags = 0; 2696 relinquish_desc->endpoint_count = 1; 2697 relinquish_desc->endpoint_id_array[0] = optee_core_lsp.sp_id; 2698 thread_smccc(&args); 2699 if (!is_ffa_success(args.a0)) 2700 EMSG("Failed to relinquish cookie %#"PRIx64, cookie); 2701 } 2702 2703 static int set_pages(struct ffa_address_range *regions, 2704 unsigned int num_regions, unsigned int num_pages, 2705 struct mobj_ffa *mf) 2706 { 2707 unsigned int n = 0; 2708 unsigned int idx = 0; 2709 2710 for (n = 0; n < num_regions; n++) { 2711 unsigned int page_count = READ_ONCE(regions[n].page_count); 2712 uint64_t addr = READ_ONCE(regions[n].address); 2713 2714 if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count)) 2715 return FFA_INVALID_PARAMETERS; 2716 } 2717 2718 if (idx != num_pages) 2719 return FFA_INVALID_PARAMETERS; 2720 2721 return 0; 2722 } 2723 2724 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie, 2725 enum mobj_use_case use_case) 2726 { 2727 struct mobj_ffa *ret = NULL; 2728 struct ffa_mem_transaction_x retrieve_desc = { .tag = use_case}; 2729 struct ffa_mem_access *descr_array = NULL; 2730 struct ffa_mem_region *descr = NULL; 2731 struct mobj_ffa *mf = NULL; 2732 unsigned int num_pages = 0; 2733 unsigned int offs = 0; 2734 void *buf = NULL; 2735 struct thread_smc_args ffa_rx_release_args = { 2736 .a0 = FFA_RX_RELEASE 2737 }; 2738 2739 if (use_case == MOBJ_USE_CASE_NS_SHM) 2740 retrieve_desc.flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE; 2741 else 2742 retrieve_desc.flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND; 2743 retrieve_desc.flags |= FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 2744 retrieve_desc.global_handle = cookie; 2745 retrieve_desc.sender_id = thread_get_tsd()->rpc_target_info; 2746 retrieve_desc.mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 2747 2748 /* 2749 * OP-TEE is only supporting a single mem_region while the 2750 * specification allows for more than one. 2751 */ 2752 buf = spmc_retrieve_req(&retrieve_desc); 2753 if (!buf) { 2754 EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64, 2755 cookie); 2756 return NULL; 2757 } 2758 2759 descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs); 2760 offs = READ_ONCE(descr_array->region_offs); 2761 descr = (struct ffa_mem_region *)((vaddr_t)buf + offs); 2762 2763 num_pages = READ_ONCE(descr->total_page_count); 2764 mf = mobj_ffa_spmc_new(cookie, num_pages, use_case); 2765 if (!mf) 2766 goto out; 2767 2768 if (set_pages(descr->address_range_array, 2769 READ_ONCE(descr->address_range_count), num_pages, mf)) { 2770 mobj_ffa_spmc_delete(mf); 2771 goto out; 2772 } 2773 2774 ret = mf; 2775 2776 out: 2777 /* Release RX buffer after the mem retrieve request. */ 2778 thread_smccc(&ffa_rx_release_args); 2779 2780 return ret; 2781 } 2782 2783 static uint32_t get_ffa_version_from_manifest(void *fdt) 2784 { 2785 int ret = 0; 2786 uint32_t vers = 0; 2787 2788 ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0"); 2789 if (ret < 0) { 2790 EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret); 2791 panic(); 2792 } 2793 2794 ret = fdt_read_uint32(fdt, 0, "ffa-version", &vers); 2795 if (ret < 0) { 2796 EMSG("Can't read \"ffa-version\" from FF-A manifest at %p: error %d", 2797 fdt, ret); 2798 panic(); 2799 } 2800 2801 return vers; 2802 } 2803 2804 static TEE_Result spmc_init(void) 2805 { 2806 uint32_t my_vers = 0; 2807 uint32_t vers = 0; 2808 2809 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 2810 virt_add_guest_spec_data(¬if_vm_bitmap_id, 2811 sizeof(struct notif_vm_bitmap), NULL)) 2812 panic("virt_add_guest_spec_data"); 2813 2814 my_vers = get_ffa_version_from_manifest(get_manifest_dt()); 2815 if (my_vers < FFA_VERSION_1_0 || my_vers > FFA_VERSION_1_2) { 2816 EMSG("Unsupported version %"PRIu32".%"PRIu32" from manifest", 2817 FFA_GET_MAJOR_VERSION(my_vers), 2818 FFA_GET_MINOR_VERSION(my_vers)); 2819 panic(); 2820 } 2821 vers = get_ffa_version(my_vers); 2822 DMSG("SPMC reported version %"PRIu32".%"PRIu32, 2823 FFA_GET_MAJOR_VERSION(vers), FFA_GET_MINOR_VERSION(vers)); 2824 if (FFA_GET_MAJOR_VERSION(vers) != FFA_GET_MAJOR_VERSION(my_vers)) { 2825 EMSG("Incompatible major version %"PRIu32", expected %"PRIu32"", 2826 FFA_GET_MAJOR_VERSION(vers), 2827 FFA_GET_MAJOR_VERSION(my_vers)); 2828 panic(); 2829 } 2830 if (vers < my_vers) 2831 my_vers = vers; 2832 DMSG("Using version %"PRIu32".%"PRIu32"", 2833 FFA_GET_MAJOR_VERSION(my_vers), FFA_GET_MINOR_VERSION(my_vers)); 2834 my_rxtx.ffa_vers = my_vers; 2835 2836 spmc_rxtx_map(&my_rxtx); 2837 2838 spmc_id = ffa_spm_id_get(); 2839 DMSG("SPMC ID %#"PRIx16, spmc_id); 2840 2841 optee_core_lsp.sp_id = ffa_id_get(); 2842 DMSG("OP-TEE endpoint ID %#"PRIx16, optee_core_lsp.sp_id); 2843 STAILQ_INSERT_HEAD(&lsp_head, &optee_core_lsp, link); 2844 2845 if (!ffa_features(FFA_NOTIFICATION_SET)) { 2846 spmc_notif_is_ready = true; 2847 DMSG("Asynchronous notifications are ready"); 2848 } 2849 2850 return TEE_SUCCESS; 2851 } 2852 #endif /* !defined(CFG_CORE_SEL1_SPMC) */ 2853 2854 nex_service_init(spmc_init); 2855