1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2023, Linaro Limited. 4 * Copyright (c) 2019-2024, Arm Limited. All rights reserved. 5 */ 6 7 #include <assert.h> 8 #include <ffa.h> 9 #include <initcall.h> 10 #include <io.h> 11 #include <kernel/interrupt.h> 12 #include <kernel/notif.h> 13 #include <kernel/panic.h> 14 #include <kernel/secure_partition.h> 15 #include <kernel/spinlock.h> 16 #include <kernel/spmc_sp_handler.h> 17 #include <kernel/tee_misc.h> 18 #include <kernel/thread.h> 19 #include <kernel/thread_private.h> 20 #include <kernel/thread_spmc.h> 21 #include <kernel/virtualization.h> 22 #include <mm/core_mmu.h> 23 #include <mm/mobj.h> 24 #include <optee_ffa.h> 25 #include <optee_msg.h> 26 #include <optee_rpc_cmd.h> 27 #include <sm/optee_smc.h> 28 #include <string.h> 29 #include <sys/queue.h> 30 #include <tee/entry_std.h> 31 #include <tee/uuid.h> 32 #include <util.h> 33 34 #if defined(CFG_CORE_SEL1_SPMC) 35 struct mem_share_state { 36 struct mobj_ffa *mf; 37 unsigned int page_count; 38 unsigned int region_count; 39 unsigned int current_page_idx; 40 }; 41 42 struct mem_frag_state { 43 struct mem_share_state share; 44 tee_mm_entry_t *mm; 45 unsigned int frag_offset; 46 SLIST_ENTRY(mem_frag_state) link; 47 }; 48 #endif 49 50 struct notif_vm_bitmap { 51 bool initialized; 52 int do_bottom_half_value; 53 uint64_t pending; 54 uint64_t bound; 55 }; 56 57 static unsigned int spmc_notif_lock __nex_data = SPINLOCK_UNLOCK; 58 static bool spmc_notif_is_ready __nex_bss; 59 static int notif_intid __nex_data __maybe_unused = -1; 60 61 /* Id used to look up the guest specific struct notif_vm_bitmap */ 62 static unsigned int notif_vm_bitmap_id __nex_bss; 63 /* Notification state when ns-virtualization isn't enabled */ 64 static struct notif_vm_bitmap default_notif_vm_bitmap; 65 66 /* Initialized in spmc_init() below */ 67 uint16_t optee_endpoint_id __nex_bss; 68 uint16_t spmc_id __nex_bss; 69 #ifdef CFG_CORE_SEL1_SPMC 70 uint16_t spmd_id __nex_bss; 71 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV | 72 FFA_PART_PROP_DIRECT_REQ_SEND | 73 #ifdef CFG_NS_VIRTUALIZATION 74 FFA_PART_PROP_NOTIF_CREATED | 75 FFA_PART_PROP_NOTIF_DESTROYED | 76 #endif 77 #ifdef ARM64 78 FFA_PART_PROP_AARCH64_STATE | 79 #endif 80 FFA_PART_PROP_IS_PE_ID; 81 82 static uint32_t my_uuid_words[] = { 83 /* 84 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1 85 * SP, or 86 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a 87 * logical partition, residing in the same exception level as the 88 * SPMC 89 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 90 */ 91 0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5, 92 }; 93 94 /* 95 * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized. 96 * 97 * struct ffa_rxtx::spin_lock protects the variables below from concurrent 98 * access this includes the use of content of struct ffa_rxtx::rx and 99 * @frag_state_head. 100 * 101 * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct 102 * ffa_rxtx::tx and false when it is owned by normal world. 103 * 104 * Note that we can't prevent normal world from updating the content of 105 * these buffers so we must always be careful when reading. while we hold 106 * the lock. 107 */ 108 109 static struct ffa_rxtx my_rxtx __nex_bss; 110 111 static bool is_nw_buf(struct ffa_rxtx *rxtx) 112 { 113 return rxtx == &my_rxtx; 114 } 115 116 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head = 117 SLIST_HEAD_INITIALIZER(&frag_state_head); 118 119 #else 120 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 121 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 122 static struct ffa_rxtx my_rxtx = { 123 .rx = __rx_buf, 124 .tx = __tx_buf, 125 .size = sizeof(__rx_buf), 126 }; 127 #endif 128 129 static uint32_t swap_src_dst(uint32_t src_dst) 130 { 131 return (src_dst >> 16) | (src_dst << 16); 132 } 133 134 static uint16_t get_sender_id(uint32_t src_dst) 135 { 136 return src_dst >> 16; 137 } 138 139 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst, 140 uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5) 141 { 142 *args = (struct thread_smc_args){ .a0 = fid, 143 .a1 = src_dst, 144 .a2 = w2, 145 .a3 = w3, 146 .a4 = w4, 147 .a5 = w5, }; 148 } 149 150 static void set_simple_ret_val(struct thread_smc_args *args, int ffa_ret) 151 { 152 if (ffa_ret) 153 spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0); 154 else 155 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0); 156 } 157 158 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx) 159 { 160 /* 161 * No locking, if the caller does concurrent calls to this it's 162 * only making a mess for itself. We must be able to renegotiate 163 * the FF-A version in order to support differing versions between 164 * the loader and the driver. 165 */ 166 if (vers < FFA_VERSION_1_1) 167 rxtx->ffa_vers = FFA_VERSION_1_0; 168 else 169 rxtx->ffa_vers = FFA_VERSION_1_1; 170 171 return rxtx->ffa_vers; 172 } 173 174 static bool is_ffa_success(uint32_t fid) 175 { 176 #ifdef ARM64 177 if (fid == FFA_SUCCESS_64) 178 return true; 179 #endif 180 return fid == FFA_SUCCESS_32; 181 } 182 183 static int32_t get_ffa_ret_code(const struct thread_smc_args *args) 184 { 185 if (is_ffa_success(args->a0)) 186 return FFA_OK; 187 if (args->a0 == FFA_ERROR && args->a2) 188 return args->a2; 189 return FFA_NOT_SUPPORTED; 190 } 191 192 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2, 193 unsigned long a3, unsigned long a4) 194 { 195 struct thread_smc_args args = { 196 .a0 = fid, 197 .a1 = a1, 198 .a2 = a2, 199 .a3 = a3, 200 .a4 = a4, 201 }; 202 203 thread_smccc(&args); 204 205 return get_ffa_ret_code(&args); 206 } 207 208 static int __maybe_unused ffa_features(uint32_t id) 209 { 210 return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0); 211 } 212 213 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src, 214 uint32_t flags, uint64_t bitmap) 215 { 216 return ffa_simple_call(FFA_NOTIFICATION_SET, 217 SHIFT_U32(src, 16) | dst, flags, 218 low32_from_64(bitmap), high32_from_64(bitmap)); 219 } 220 221 #if defined(CFG_CORE_SEL1_SPMC) 222 static void handle_features(struct thread_smc_args *args) 223 { 224 uint32_t ret_fid = FFA_ERROR; 225 uint32_t ret_w2 = FFA_NOT_SUPPORTED; 226 227 switch (args->a1) { 228 case FFA_FEATURE_SCHEDULE_RECV_INTR: 229 if (spmc_notif_is_ready) { 230 ret_fid = FFA_SUCCESS_32; 231 ret_w2 = notif_intid; 232 } 233 break; 234 235 #ifdef ARM64 236 case FFA_RXTX_MAP_64: 237 #endif 238 case FFA_RXTX_MAP_32: 239 ret_fid = FFA_SUCCESS_32; 240 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */ 241 break; 242 #ifdef ARM64 243 case FFA_MEM_SHARE_64: 244 #endif 245 case FFA_MEM_SHARE_32: 246 ret_fid = FFA_SUCCESS_32; 247 /* 248 * Partition manager supports transmission of a memory 249 * transaction descriptor in a buffer dynamically allocated 250 * by the endpoint. 251 */ 252 ret_w2 = BIT(0); 253 break; 254 255 case FFA_ERROR: 256 case FFA_VERSION: 257 case FFA_SUCCESS_32: 258 #ifdef ARM64 259 case FFA_SUCCESS_64: 260 #endif 261 case FFA_FEATURES: 262 case FFA_SPM_ID_GET: 263 case FFA_MEM_FRAG_TX: 264 case FFA_MEM_RECLAIM: 265 case FFA_MSG_SEND_DIRECT_REQ_64: 266 case FFA_MSG_SEND_DIRECT_REQ_32: 267 case FFA_INTERRUPT: 268 case FFA_PARTITION_INFO_GET: 269 case FFA_RXTX_UNMAP: 270 case FFA_RX_RELEASE: 271 case FFA_FEATURE_MANAGED_EXIT_INTR: 272 case FFA_NOTIFICATION_BITMAP_CREATE: 273 case FFA_NOTIFICATION_BITMAP_DESTROY: 274 case FFA_NOTIFICATION_BIND: 275 case FFA_NOTIFICATION_UNBIND: 276 case FFA_NOTIFICATION_SET: 277 case FFA_NOTIFICATION_GET: 278 case FFA_NOTIFICATION_INFO_GET_32: 279 #ifdef ARM64 280 case FFA_NOTIFICATION_INFO_GET_64: 281 #endif 282 ret_fid = FFA_SUCCESS_32; 283 ret_w2 = FFA_PARAM_MBZ; 284 break; 285 default: 286 break; 287 } 288 289 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ, 290 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 291 } 292 293 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret) 294 { 295 tee_mm_entry_t *mm = NULL; 296 297 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz)) 298 return FFA_INVALID_PARAMETERS; 299 300 mm = tee_mm_alloc(&core_virt_shm_pool, sz); 301 if (!mm) 302 return FFA_NO_MEMORY; 303 304 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa, 305 sz / SMALL_PAGE_SIZE, 306 MEM_AREA_NSEC_SHM)) { 307 tee_mm_free(mm); 308 return FFA_INVALID_PARAMETERS; 309 } 310 311 *va_ret = (void *)tee_mm_get_smem(mm); 312 return 0; 313 } 314 315 void spmc_handle_spm_id_get(struct thread_smc_args *args) 316 { 317 spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, spmc_id, 318 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 319 } 320 321 static void unmap_buf(void *va, size_t sz) 322 { 323 tee_mm_entry_t *mm = tee_mm_find(&core_virt_shm_pool, (vaddr_t)va); 324 325 assert(mm); 326 core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE); 327 tee_mm_free(mm); 328 } 329 330 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 331 { 332 int rc = 0; 333 unsigned int sz = 0; 334 paddr_t rx_pa = 0; 335 paddr_t tx_pa = 0; 336 void *rx = NULL; 337 void *tx = NULL; 338 339 cpu_spin_lock(&rxtx->spinlock); 340 341 if (args->a3 & GENMASK_64(63, 6)) { 342 rc = FFA_INVALID_PARAMETERS; 343 goto out; 344 } 345 346 sz = args->a3 * SMALL_PAGE_SIZE; 347 if (!sz) { 348 rc = FFA_INVALID_PARAMETERS; 349 goto out; 350 } 351 /* TX/RX are swapped compared to the caller */ 352 tx_pa = args->a2; 353 rx_pa = args->a1; 354 355 if (rxtx->size) { 356 rc = FFA_DENIED; 357 goto out; 358 } 359 360 /* 361 * If the buffer comes from a SP the address is virtual and already 362 * mapped. 363 */ 364 if (is_nw_buf(rxtx)) { 365 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 366 enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM; 367 bool tx_alloced = false; 368 369 /* 370 * With virtualization we establish this mapping in 371 * the nexus mapping which then is replicated to 372 * each partition. 373 * 374 * This means that this mapping must be done before 375 * any partition is created and then must not be 376 * changed. 377 */ 378 379 /* 380 * core_mmu_add_mapping() may reuse previous 381 * mappings. First check if there's any mappings to 382 * reuse so we know how to clean up in case of 383 * failure. 384 */ 385 tx = phys_to_virt(tx_pa, mt, sz); 386 rx = phys_to_virt(rx_pa, mt, sz); 387 if (!tx) { 388 tx = core_mmu_add_mapping(mt, tx_pa, sz); 389 if (!tx) { 390 rc = FFA_NO_MEMORY; 391 goto out; 392 } 393 tx_alloced = true; 394 } 395 if (!rx) 396 rx = core_mmu_add_mapping(mt, rx_pa, sz); 397 398 if (!rx) { 399 if (tx_alloced && tx) 400 core_mmu_remove_mapping(mt, tx, sz); 401 rc = FFA_NO_MEMORY; 402 goto out; 403 } 404 } else { 405 rc = map_buf(tx_pa, sz, &tx); 406 if (rc) 407 goto out; 408 rc = map_buf(rx_pa, sz, &rx); 409 if (rc) { 410 unmap_buf(tx, sz); 411 goto out; 412 } 413 } 414 rxtx->tx = tx; 415 rxtx->rx = rx; 416 } else { 417 if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) { 418 rc = FFA_INVALID_PARAMETERS; 419 goto out; 420 } 421 422 if (!virt_to_phys((void *)tx_pa) || 423 !virt_to_phys((void *)rx_pa)) { 424 rc = FFA_INVALID_PARAMETERS; 425 goto out; 426 } 427 428 rxtx->tx = (void *)tx_pa; 429 rxtx->rx = (void *)rx_pa; 430 } 431 432 rxtx->size = sz; 433 rxtx->tx_is_mine = true; 434 DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx); 435 DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx); 436 out: 437 cpu_spin_unlock(&rxtx->spinlock); 438 set_simple_ret_val(args, rc); 439 } 440 441 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 442 { 443 int rc = FFA_INVALID_PARAMETERS; 444 445 cpu_spin_lock(&rxtx->spinlock); 446 447 if (!rxtx->size) 448 goto out; 449 450 /* 451 * We don't unmap the SP memory as the SP might still use it. 452 * We avoid to make changes to nexus mappings at this stage since 453 * there currently isn't a way to replicate those changes to all 454 * partitions. 455 */ 456 if (is_nw_buf(rxtx) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 457 unmap_buf(rxtx->rx, rxtx->size); 458 unmap_buf(rxtx->tx, rxtx->size); 459 } 460 rxtx->size = 0; 461 rxtx->rx = NULL; 462 rxtx->tx = NULL; 463 rc = 0; 464 out: 465 cpu_spin_unlock(&rxtx->spinlock); 466 set_simple_ret_val(args, rc); 467 } 468 469 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 470 { 471 int rc = 0; 472 473 cpu_spin_lock(&rxtx->spinlock); 474 /* The senders RX is our TX */ 475 if (!rxtx->size || rxtx->tx_is_mine) { 476 rc = FFA_DENIED; 477 } else { 478 rc = 0; 479 rxtx->tx_is_mine = true; 480 } 481 cpu_spin_unlock(&rxtx->spinlock); 482 483 set_simple_ret_val(args, rc); 484 } 485 486 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 487 { 488 return !w0 && !w1 && !w2 && !w3; 489 } 490 491 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 492 { 493 /* 494 * This depends on which UUID we have been assigned. 495 * TODO add a generic mechanism to obtain our UUID. 496 * 497 * The test below is for the hard coded UUID 498 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 499 */ 500 return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] && 501 w2 == my_uuid_words[2] && w3 == my_uuid_words[3]; 502 } 503 504 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen, 505 size_t idx, uint16_t endpoint_id, 506 uint16_t execution_context, 507 uint32_t part_props, 508 const uint32_t uuid_words[4]) 509 { 510 struct ffa_partition_info_x *fpi = NULL; 511 size_t fpi_size = sizeof(*fpi); 512 513 if (ffa_vers >= FFA_VERSION_1_1) 514 fpi_size += FFA_UUID_SIZE; 515 516 if ((idx + 1) * fpi_size > blen) 517 return TEE_ERROR_OUT_OF_MEMORY; 518 519 fpi = (void *)((vaddr_t)buf + idx * fpi_size); 520 fpi->id = endpoint_id; 521 /* Number of execution contexts implemented by this partition */ 522 fpi->execution_context = execution_context; 523 524 fpi->partition_properties = part_props; 525 526 if (ffa_vers >= FFA_VERSION_1_1) { 527 if (uuid_words) 528 memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE); 529 else 530 memset(fpi->uuid, 0, FFA_UUID_SIZE); 531 } 532 533 return TEE_SUCCESS; 534 } 535 536 static int handle_partition_info_get_all(size_t *elem_count, 537 struct ffa_rxtx *rxtx, bool count_only) 538 { 539 if (!count_only) { 540 /* Add OP-TEE SP */ 541 if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx, 542 rxtx->size, 0, optee_endpoint_id, 543 CFG_TEE_CORE_NB_CORE, 544 my_part_props, my_uuid_words)) 545 return FFA_NO_MEMORY; 546 } 547 *elem_count = 1; 548 549 if (IS_ENABLED(CFG_SECURE_PARTITION)) { 550 if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size, 551 NULL, elem_count, count_only)) 552 return FFA_NO_MEMORY; 553 } 554 555 return FFA_OK; 556 } 557 558 void spmc_handle_partition_info_get(struct thread_smc_args *args, 559 struct ffa_rxtx *rxtx) 560 { 561 TEE_Result res = TEE_SUCCESS; 562 uint32_t ret_fid = FFA_ERROR; 563 uint32_t fpi_size = 0; 564 uint32_t rc = 0; 565 bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG; 566 567 if (!count_only) { 568 cpu_spin_lock(&rxtx->spinlock); 569 570 if (!rxtx->size || !rxtx->tx_is_mine) { 571 rc = FFA_BUSY; 572 goto out; 573 } 574 } 575 576 if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) { 577 size_t elem_count = 0; 578 579 ret_fid = handle_partition_info_get_all(&elem_count, rxtx, 580 count_only); 581 582 if (ret_fid) { 583 rc = ret_fid; 584 ret_fid = FFA_ERROR; 585 } else { 586 ret_fid = FFA_SUCCESS_32; 587 rc = elem_count; 588 } 589 590 goto out; 591 } 592 593 if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) { 594 if (!count_only) { 595 res = spmc_fill_partition_entry(rxtx->ffa_vers, 596 rxtx->tx, rxtx->size, 0, 597 optee_endpoint_id, 598 CFG_TEE_CORE_NB_CORE, 599 my_part_props, 600 my_uuid_words); 601 if (res) { 602 ret_fid = FFA_ERROR; 603 rc = FFA_INVALID_PARAMETERS; 604 goto out; 605 } 606 } 607 rc = 1; 608 } else if (IS_ENABLED(CFG_SECURE_PARTITION)) { 609 uint32_t uuid_array[4] = { 0 }; 610 TEE_UUID uuid = { }; 611 size_t count = 0; 612 613 uuid_array[0] = args->a1; 614 uuid_array[1] = args->a2; 615 uuid_array[2] = args->a3; 616 uuid_array[3] = args->a4; 617 tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array); 618 619 res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, 620 rxtx->size, &uuid, &count, 621 count_only); 622 if (res != TEE_SUCCESS) { 623 ret_fid = FFA_ERROR; 624 rc = FFA_INVALID_PARAMETERS; 625 goto out; 626 } 627 rc = count; 628 } else { 629 ret_fid = FFA_ERROR; 630 rc = FFA_INVALID_PARAMETERS; 631 goto out; 632 } 633 634 ret_fid = FFA_SUCCESS_32; 635 636 out: 637 if (ret_fid == FFA_SUCCESS_32 && !count_only && 638 rxtx->ffa_vers >= FFA_VERSION_1_1) 639 fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE; 640 641 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size, 642 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 643 if (!count_only) { 644 rxtx->tx_is_mine = false; 645 cpu_spin_unlock(&rxtx->spinlock); 646 } 647 } 648 649 static void spmc_handle_run(struct thread_smc_args *args) 650 { 651 uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1); 652 uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1); 653 uint32_t rc = FFA_OK; 654 655 if (endpoint != optee_endpoint_id) { 656 /* 657 * The endpoint should be an SP, try to resume the SP from 658 * preempted into busy state. 659 */ 660 rc = spmc_sp_resume_from_preempted(endpoint); 661 if (rc) 662 goto out; 663 } 664 665 thread_resume_from_rpc(thread_id, 0, 0, 0, 0); 666 667 /* thread_resume_from_rpc return only of the thread_id is invalid */ 668 rc = FFA_INVALID_PARAMETERS; 669 670 out: 671 set_simple_ret_val(args, rc); 672 } 673 #endif /*CFG_CORE_SEL1_SPMC*/ 674 675 static struct notif_vm_bitmap *get_notif_vm_bitmap(struct guest_partition *prtn, 676 uint16_t vm_id) 677 { 678 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 679 if (!prtn) 680 return NULL; 681 assert(vm_id == virt_get_guest_id(prtn)); 682 return virt_get_guest_spec_data(prtn, notif_vm_bitmap_id); 683 } 684 if (vm_id) 685 return NULL; 686 return &default_notif_vm_bitmap; 687 } 688 689 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value, 690 uint16_t vm_id) 691 { 692 struct guest_partition *prtn = NULL; 693 struct notif_vm_bitmap *nvb = NULL; 694 uint32_t old_itr_status = 0; 695 uint32_t res = 0; 696 697 if (!spmc_notif_is_ready) { 698 /* 699 * This should never happen, not if normal world respects the 700 * exchanged capabilities. 701 */ 702 EMSG("Asynchronous notifications are not ready"); 703 return TEE_ERROR_NOT_IMPLEMENTED; 704 } 705 706 if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) { 707 EMSG("Invalid bottom half value %"PRIu32, bottom_half_value); 708 return TEE_ERROR_BAD_PARAMETERS; 709 } 710 711 prtn = virt_get_guest(vm_id); 712 nvb = get_notif_vm_bitmap(prtn, vm_id); 713 if (!nvb) { 714 res = TEE_ERROR_BAD_PARAMETERS; 715 goto out; 716 } 717 718 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 719 nvb->do_bottom_half_value = bottom_half_value; 720 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 721 722 notif_deliver_atomic_event(NOTIF_EVENT_STARTED, vm_id); 723 res = TEE_SUCCESS; 724 out: 725 virt_put_guest(prtn); 726 return res; 727 } 728 729 static void handle_yielding_call(struct thread_smc_args *args, 730 uint32_t direct_resp_fid) 731 { 732 TEE_Result res = 0; 733 734 thread_check_canaries(); 735 736 #ifdef ARM64 737 /* Saving this for an eventual RPC */ 738 thread_get_core_local()->direct_resp_fid = direct_resp_fid; 739 #endif 740 741 if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) { 742 /* Note connection to struct thread_rpc_arg::ret */ 743 thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6, 744 0); 745 res = TEE_ERROR_BAD_PARAMETERS; 746 } else { 747 thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5, 748 args->a6, args->a7); 749 res = TEE_ERROR_BUSY; 750 } 751 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 752 0, res, 0, 0); 753 } 754 755 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5) 756 { 757 uint64_t cookie = reg_pair_to_64(a5, a4); 758 uint32_t res = 0; 759 760 res = mobj_ffa_unregister_by_cookie(cookie); 761 switch (res) { 762 case TEE_SUCCESS: 763 case TEE_ERROR_ITEM_NOT_FOUND: 764 return 0; 765 case TEE_ERROR_BUSY: 766 EMSG("res %#"PRIx32, res); 767 return FFA_BUSY; 768 default: 769 EMSG("res %#"PRIx32, res); 770 return FFA_INVALID_PARAMETERS; 771 } 772 } 773 774 static void handle_blocking_call(struct thread_smc_args *args, 775 uint32_t direct_resp_fid) 776 { 777 uint32_t sec_caps = 0; 778 779 switch (args->a3) { 780 case OPTEE_FFA_GET_API_VERSION: 781 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 782 OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR, 783 0); 784 break; 785 case OPTEE_FFA_GET_OS_VERSION: 786 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 787 CFG_OPTEE_REVISION_MAJOR, 788 CFG_OPTEE_REVISION_MINOR, 789 TEE_IMPL_GIT_SHA1 >> 32); 790 break; 791 case OPTEE_FFA_EXCHANGE_CAPABILITIES: 792 sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET; 793 if (spmc_notif_is_ready) 794 sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF; 795 spmc_set_args(args, direct_resp_fid, 796 swap_src_dst(args->a1), 0, 0, 797 THREAD_RPC_MAX_NUM_PARAMS, sec_caps); 798 break; 799 case OPTEE_FFA_UNREGISTER_SHM: 800 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 801 handle_unregister_shm(args->a4, args->a5), 0, 0); 802 break; 803 case OPTEE_FFA_ENABLE_ASYNC_NOTIF: 804 spmc_set_args(args, direct_resp_fid, 805 swap_src_dst(args->a1), 0, 806 spmc_enable_async_notif(args->a4, 807 FFA_SRC(args->a1)), 808 0, 0); 809 break; 810 default: 811 EMSG("Unhandled blocking service ID %#"PRIx32, 812 (uint32_t)args->a3); 813 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 814 TEE_ERROR_BAD_PARAMETERS, 0, 0); 815 } 816 } 817 818 static void handle_framework_direct_request(struct thread_smc_args *args, 819 struct ffa_rxtx *rxtx, 820 uint32_t direct_resp_fid) 821 { 822 uint32_t w0 = FFA_ERROR; 823 uint32_t w1 = FFA_PARAM_MBZ; 824 uint32_t w2 = FFA_NOT_SUPPORTED; 825 uint32_t w3 = FFA_PARAM_MBZ; 826 827 switch (args->a2 & FFA_MSG_TYPE_MASK) { 828 case FFA_MSG_SEND_VM_CREATED: 829 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 830 uint16_t guest_id = args->a5; 831 TEE_Result res = virt_guest_created(guest_id); 832 833 w0 = direct_resp_fid; 834 w1 = swap_src_dst(args->a1); 835 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED; 836 if (res == TEE_SUCCESS) 837 w3 = FFA_OK; 838 else if (res == TEE_ERROR_OUT_OF_MEMORY) 839 w3 = FFA_DENIED; 840 else 841 w3 = FFA_INVALID_PARAMETERS; 842 } 843 break; 844 case FFA_MSG_SEND_VM_DESTROYED: 845 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 846 uint16_t guest_id = args->a5; 847 TEE_Result res = virt_guest_destroyed(guest_id); 848 849 w0 = direct_resp_fid; 850 w1 = swap_src_dst(args->a1); 851 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED; 852 if (res == TEE_SUCCESS) 853 w3 = FFA_OK; 854 else 855 w3 = FFA_INVALID_PARAMETERS; 856 } 857 break; 858 case FFA_MSG_VERSION_REQ: 859 w0 = direct_resp_fid; 860 w1 = swap_src_dst(args->a1); 861 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP; 862 w3 = spmc_exchange_version(args->a3, rxtx); 863 break; 864 default: 865 break; 866 } 867 spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 868 } 869 870 static void handle_direct_request(struct thread_smc_args *args, 871 struct ffa_rxtx *rxtx) 872 { 873 uint32_t direct_resp_fid = 0; 874 875 if (IS_ENABLED(CFG_SECURE_PARTITION) && 876 FFA_DST(args->a1) != spmc_id && 877 FFA_DST(args->a1) != optee_endpoint_id) { 878 spmc_sp_start_thread(args); 879 return; 880 } 881 882 if (OPTEE_SMC_IS_64(args->a0)) 883 direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64; 884 else 885 direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32; 886 887 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) { 888 handle_framework_direct_request(args, rxtx, direct_resp_fid); 889 return; 890 } 891 892 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 893 virt_set_guest(get_sender_id(args->a1))) { 894 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 895 TEE_ERROR_ITEM_NOT_FOUND, 0, 0); 896 return; 897 } 898 899 if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT)) 900 handle_yielding_call(args, direct_resp_fid); 901 else 902 handle_blocking_call(args, direct_resp_fid); 903 904 /* 905 * Note that handle_yielding_call() typically only returns if a 906 * thread cannot be allocated or found. virt_unset_guest() is also 907 * called from thread_state_suspend() and thread_state_free(). 908 */ 909 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 910 virt_unset_guest(); 911 } 912 913 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen, 914 struct ffa_mem_transaction_x *trans) 915 { 916 uint16_t mem_reg_attr = 0; 917 uint32_t flags = 0; 918 uint32_t count = 0; 919 uint32_t offs = 0; 920 uint32_t size = 0; 921 size_t n = 0; 922 923 if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t)) 924 return FFA_INVALID_PARAMETERS; 925 926 if (ffa_vers >= FFA_VERSION_1_1) { 927 struct ffa_mem_transaction_1_1 *descr = NULL; 928 929 if (blen < sizeof(*descr)) 930 return FFA_INVALID_PARAMETERS; 931 932 descr = buf; 933 trans->sender_id = READ_ONCE(descr->sender_id); 934 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 935 flags = READ_ONCE(descr->flags); 936 trans->global_handle = READ_ONCE(descr->global_handle); 937 trans->tag = READ_ONCE(descr->tag); 938 939 count = READ_ONCE(descr->mem_access_count); 940 size = READ_ONCE(descr->mem_access_size); 941 offs = READ_ONCE(descr->mem_access_offs); 942 } else { 943 struct ffa_mem_transaction_1_0 *descr = NULL; 944 945 if (blen < sizeof(*descr)) 946 return FFA_INVALID_PARAMETERS; 947 948 descr = buf; 949 trans->sender_id = READ_ONCE(descr->sender_id); 950 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 951 flags = READ_ONCE(descr->flags); 952 trans->global_handle = READ_ONCE(descr->global_handle); 953 trans->tag = READ_ONCE(descr->tag); 954 955 count = READ_ONCE(descr->mem_access_count); 956 size = sizeof(struct ffa_mem_access); 957 offs = offsetof(struct ffa_mem_transaction_1_0, 958 mem_access_array); 959 } 960 961 if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX || 962 size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX) 963 return FFA_INVALID_PARAMETERS; 964 965 /* Check that the endpoint memory access descriptor array fits */ 966 if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) || 967 n > blen) 968 return FFA_INVALID_PARAMETERS; 969 970 trans->mem_reg_attr = mem_reg_attr; 971 trans->flags = flags; 972 trans->mem_access_size = size; 973 trans->mem_access_count = count; 974 trans->mem_access_offs = offs; 975 return 0; 976 } 977 978 #if defined(CFG_CORE_SEL1_SPMC) 979 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size, 980 unsigned int mem_access_count, uint8_t *acc_perms, 981 unsigned int *region_offs) 982 { 983 struct ffa_mem_access_perm *descr = NULL; 984 struct ffa_mem_access *mem_acc = NULL; 985 unsigned int n = 0; 986 987 for (n = 0; n < mem_access_count; n++) { 988 mem_acc = (void *)(mem_acc_base + mem_access_size * n); 989 descr = &mem_acc->access_perm; 990 if (READ_ONCE(descr->endpoint_id) == optee_endpoint_id) { 991 *acc_perms = READ_ONCE(descr->perm); 992 *region_offs = READ_ONCE(mem_acc[n].region_offs); 993 return 0; 994 } 995 } 996 997 return FFA_INVALID_PARAMETERS; 998 } 999 1000 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf, 1001 size_t blen, unsigned int *page_count, 1002 unsigned int *region_count, size_t *addr_range_offs) 1003 { 1004 const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 1005 const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW; 1006 struct ffa_mem_region *region_descr = NULL; 1007 unsigned int region_descr_offs = 0; 1008 uint8_t mem_acc_perm = 0; 1009 size_t n = 0; 1010 1011 if (mem_trans->mem_reg_attr != exp_mem_reg_attr) 1012 return FFA_INVALID_PARAMETERS; 1013 1014 /* Check that the access permissions matches what's expected */ 1015 if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs, 1016 mem_trans->mem_access_size, 1017 mem_trans->mem_access_count, 1018 &mem_acc_perm, ®ion_descr_offs) || 1019 mem_acc_perm != exp_mem_acc_perm) 1020 return FFA_INVALID_PARAMETERS; 1021 1022 /* Check that the Composite memory region descriptor fits */ 1023 if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) || 1024 n > blen) 1025 return FFA_INVALID_PARAMETERS; 1026 1027 if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs, 1028 struct ffa_mem_region)) 1029 return FFA_INVALID_PARAMETERS; 1030 1031 region_descr = (struct ffa_mem_region *)((vaddr_t)buf + 1032 region_descr_offs); 1033 *page_count = READ_ONCE(region_descr->total_page_count); 1034 *region_count = READ_ONCE(region_descr->address_range_count); 1035 *addr_range_offs = n; 1036 return 0; 1037 } 1038 1039 static int add_mem_share_helper(struct mem_share_state *s, void *buf, 1040 size_t flen) 1041 { 1042 unsigned int region_count = flen / sizeof(struct ffa_address_range); 1043 struct ffa_address_range *arange = NULL; 1044 unsigned int n = 0; 1045 1046 if (region_count > s->region_count) 1047 region_count = s->region_count; 1048 1049 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range)) 1050 return FFA_INVALID_PARAMETERS; 1051 arange = buf; 1052 1053 for (n = 0; n < region_count; n++) { 1054 unsigned int page_count = READ_ONCE(arange[n].page_count); 1055 uint64_t addr = READ_ONCE(arange[n].address); 1056 1057 if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx, 1058 addr, page_count)) 1059 return FFA_INVALID_PARAMETERS; 1060 } 1061 1062 s->region_count -= region_count; 1063 if (s->region_count) 1064 return region_count * sizeof(*arange); 1065 1066 if (s->current_page_idx != s->page_count) 1067 return FFA_INVALID_PARAMETERS; 1068 1069 return 0; 1070 } 1071 1072 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen) 1073 { 1074 int rc = 0; 1075 1076 rc = add_mem_share_helper(&s->share, buf, flen); 1077 if (rc >= 0) { 1078 if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) { 1079 /* We're not at the end of the descriptor yet */ 1080 if (s->share.region_count) 1081 return s->frag_offset; 1082 1083 /* We're done */ 1084 rc = 0; 1085 } else { 1086 rc = FFA_INVALID_PARAMETERS; 1087 } 1088 } 1089 1090 SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link); 1091 if (rc < 0) 1092 mobj_ffa_sel1_spmc_delete(s->share.mf); 1093 else 1094 mobj_ffa_push_to_inactive(s->share.mf); 1095 free(s); 1096 1097 return rc; 1098 } 1099 1100 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans, 1101 void *buf) 1102 { 1103 struct ffa_mem_access_perm *perm = NULL; 1104 struct ffa_mem_access *mem_acc = NULL; 1105 1106 if (!IS_ENABLED(CFG_SECURE_PARTITION)) 1107 return false; 1108 1109 if (mem_trans->mem_access_count < 1) 1110 return false; 1111 1112 mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs); 1113 perm = &mem_acc->access_perm; 1114 1115 /* 1116 * perm->endpoint_id is read here only to check if the endpoint is 1117 * OP-TEE. We do read it later on again, but there are some additional 1118 * checks there to make sure that the data is correct. 1119 */ 1120 return READ_ONCE(perm->endpoint_id) != optee_endpoint_id; 1121 } 1122 1123 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans, 1124 tee_mm_entry_t *mm, void *buf, size_t blen, 1125 size_t flen, uint64_t *global_handle) 1126 { 1127 int rc = 0; 1128 struct mem_share_state share = { }; 1129 size_t addr_range_offs = 0; 1130 uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 1131 size_t n = 0; 1132 1133 rc = mem_share_init(mem_trans, buf, flen, &share.page_count, 1134 &share.region_count, &addr_range_offs); 1135 if (rc) 1136 return rc; 1137 1138 if (!share.page_count || !share.region_count) 1139 return FFA_INVALID_PARAMETERS; 1140 1141 if (MUL_OVERFLOW(share.region_count, 1142 sizeof(struct ffa_address_range), &n) || 1143 ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen) 1144 return FFA_INVALID_PARAMETERS; 1145 1146 if (mem_trans->global_handle) 1147 cookie = mem_trans->global_handle; 1148 share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count); 1149 if (!share.mf) 1150 return FFA_NO_MEMORY; 1151 1152 if (flen != blen) { 1153 struct mem_frag_state *s = calloc(1, sizeof(*s)); 1154 1155 if (!s) { 1156 rc = FFA_NO_MEMORY; 1157 goto err; 1158 } 1159 s->share = share; 1160 s->mm = mm; 1161 s->frag_offset = addr_range_offs; 1162 1163 SLIST_INSERT_HEAD(&frag_state_head, s, link); 1164 rc = add_mem_share_frag(s, (char *)buf + addr_range_offs, 1165 flen - addr_range_offs); 1166 1167 if (rc >= 0) 1168 *global_handle = mobj_ffa_get_cookie(share.mf); 1169 1170 return rc; 1171 } 1172 1173 rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs, 1174 flen - addr_range_offs); 1175 if (rc) { 1176 /* 1177 * Number of consumed bytes may be returned instead of 0 for 1178 * done. 1179 */ 1180 rc = FFA_INVALID_PARAMETERS; 1181 goto err; 1182 } 1183 1184 *global_handle = mobj_ffa_push_to_inactive(share.mf); 1185 1186 return 0; 1187 err: 1188 mobj_ffa_sel1_spmc_delete(share.mf); 1189 return rc; 1190 } 1191 1192 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen, 1193 unsigned int page_count, 1194 uint64_t *global_handle, struct ffa_rxtx *rxtx) 1195 { 1196 struct ffa_mem_transaction_x mem_trans = { }; 1197 int rc = 0; 1198 size_t len = 0; 1199 void *buf = NULL; 1200 tee_mm_entry_t *mm = NULL; 1201 vaddr_t offs = pbuf & SMALL_PAGE_MASK; 1202 1203 if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len)) 1204 return FFA_INVALID_PARAMETERS; 1205 if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len)) 1206 return FFA_INVALID_PARAMETERS; 1207 1208 /* 1209 * Check that the length reported in flen is covered by len even 1210 * if the offset is taken into account. 1211 */ 1212 if (len < flen || len - offs < flen) 1213 return FFA_INVALID_PARAMETERS; 1214 1215 mm = tee_mm_alloc(&core_virt_shm_pool, len); 1216 if (!mm) 1217 return FFA_NO_MEMORY; 1218 1219 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf, 1220 page_count, MEM_AREA_NSEC_SHM)) { 1221 rc = FFA_INVALID_PARAMETERS; 1222 goto out; 1223 } 1224 buf = (void *)(tee_mm_get_smem(mm) + offs); 1225 1226 cpu_spin_lock(&rxtx->spinlock); 1227 rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans); 1228 if (rc) 1229 goto unlock; 1230 1231 if (is_sp_share(&mem_trans, buf)) { 1232 rc = spmc_sp_add_share(&mem_trans, buf, blen, flen, 1233 global_handle, NULL); 1234 goto unlock; 1235 } 1236 1237 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1238 virt_set_guest(mem_trans.sender_id)) { 1239 rc = FFA_DENIED; 1240 goto unlock; 1241 } 1242 1243 rc = add_mem_share(&mem_trans, mm, buf, blen, flen, global_handle); 1244 1245 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1246 virt_unset_guest(); 1247 1248 unlock: 1249 cpu_spin_unlock(&rxtx->spinlock); 1250 if (rc > 0) 1251 return rc; 1252 1253 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1254 out: 1255 tee_mm_free(mm); 1256 return rc; 1257 } 1258 1259 static int handle_mem_share_rxbuf(size_t blen, size_t flen, 1260 uint64_t *global_handle, 1261 struct ffa_rxtx *rxtx) 1262 { 1263 struct ffa_mem_transaction_x mem_trans = { }; 1264 int rc = FFA_DENIED; 1265 1266 cpu_spin_lock(&rxtx->spinlock); 1267 1268 if (!rxtx->rx || flen > rxtx->size) 1269 goto out; 1270 1271 rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen, 1272 &mem_trans); 1273 if (rc) 1274 goto out; 1275 if (is_sp_share(&mem_trans, rxtx->rx)) { 1276 rc = spmc_sp_add_share(&mem_trans, rxtx, blen, flen, 1277 global_handle, NULL); 1278 goto out; 1279 } 1280 1281 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1282 virt_set_guest(mem_trans.sender_id)) 1283 goto out; 1284 1285 rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen, 1286 global_handle); 1287 1288 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1289 virt_unset_guest(); 1290 1291 out: 1292 cpu_spin_unlock(&rxtx->spinlock); 1293 1294 return rc; 1295 } 1296 1297 static void handle_mem_share(struct thread_smc_args *args, 1298 struct ffa_rxtx *rxtx) 1299 { 1300 uint32_t tot_len = args->a1; 1301 uint32_t frag_len = args->a2; 1302 uint64_t addr = args->a3; 1303 uint32_t page_count = args->a4; 1304 uint32_t ret_w1 = 0; 1305 uint32_t ret_w2 = FFA_INVALID_PARAMETERS; 1306 uint32_t ret_w3 = 0; 1307 uint32_t ret_fid = FFA_ERROR; 1308 uint64_t global_handle = 0; 1309 int rc = 0; 1310 1311 /* Check that the MBZs are indeed 0 */ 1312 if (args->a5 || args->a6 || args->a7) 1313 goto out; 1314 1315 /* Check that fragment length doesn't exceed total length */ 1316 if (frag_len > tot_len) 1317 goto out; 1318 1319 /* Check for 32-bit calling convention */ 1320 if (args->a0 == FFA_MEM_SHARE_32) 1321 addr &= UINT32_MAX; 1322 1323 if (!addr) { 1324 /* 1325 * The memory transaction descriptor is passed via our rx 1326 * buffer. 1327 */ 1328 if (page_count) 1329 goto out; 1330 rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle, 1331 rxtx); 1332 } else { 1333 rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count, 1334 &global_handle, rxtx); 1335 } 1336 if (rc < 0) { 1337 ret_w2 = rc; 1338 } else if (rc > 0) { 1339 ret_fid = FFA_MEM_FRAG_RX; 1340 ret_w3 = rc; 1341 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1342 } else { 1343 ret_fid = FFA_SUCCESS_32; 1344 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1345 } 1346 out: 1347 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1348 } 1349 1350 static struct mem_frag_state *get_frag_state(uint64_t global_handle) 1351 { 1352 struct mem_frag_state *s = NULL; 1353 1354 SLIST_FOREACH(s, &frag_state_head, link) 1355 if (mobj_ffa_get_cookie(s->share.mf) == global_handle) 1356 return s; 1357 1358 return NULL; 1359 } 1360 1361 static void handle_mem_frag_tx(struct thread_smc_args *args, 1362 struct ffa_rxtx *rxtx) 1363 { 1364 uint64_t global_handle = reg_pair_to_64(args->a2, args->a1); 1365 size_t flen = args->a3; 1366 uint32_t endpoint_id = args->a4; 1367 struct mem_frag_state *s = NULL; 1368 tee_mm_entry_t *mm = NULL; 1369 unsigned int page_count = 0; 1370 void *buf = NULL; 1371 uint32_t ret_w1 = 0; 1372 uint32_t ret_w2 = 0; 1373 uint32_t ret_w3 = 0; 1374 uint32_t ret_fid = 0; 1375 int rc = 0; 1376 1377 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1378 uint16_t guest_id = endpoint_id >> 16; 1379 1380 if (!guest_id || virt_set_guest(guest_id)) { 1381 rc = FFA_INVALID_PARAMETERS; 1382 goto out_set_rc; 1383 } 1384 } 1385 1386 /* 1387 * Currently we're only doing this for fragmented FFA_MEM_SHARE_* 1388 * requests. 1389 */ 1390 1391 cpu_spin_lock(&rxtx->spinlock); 1392 1393 s = get_frag_state(global_handle); 1394 if (!s) { 1395 rc = FFA_INVALID_PARAMETERS; 1396 goto out; 1397 } 1398 1399 mm = s->mm; 1400 if (mm) { 1401 if (flen > tee_mm_get_bytes(mm)) { 1402 rc = FFA_INVALID_PARAMETERS; 1403 goto out; 1404 } 1405 page_count = s->share.page_count; 1406 buf = (void *)tee_mm_get_smem(mm); 1407 } else { 1408 if (flen > rxtx->size) { 1409 rc = FFA_INVALID_PARAMETERS; 1410 goto out; 1411 } 1412 buf = rxtx->rx; 1413 } 1414 1415 rc = add_mem_share_frag(s, buf, flen); 1416 out: 1417 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1418 virt_unset_guest(); 1419 1420 cpu_spin_unlock(&rxtx->spinlock); 1421 1422 if (rc <= 0 && mm) { 1423 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1424 tee_mm_free(mm); 1425 } 1426 1427 out_set_rc: 1428 if (rc < 0) { 1429 ret_fid = FFA_ERROR; 1430 ret_w2 = rc; 1431 } else if (rc > 0) { 1432 ret_fid = FFA_MEM_FRAG_RX; 1433 ret_w3 = rc; 1434 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1435 } else { 1436 ret_fid = FFA_SUCCESS_32; 1437 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1438 } 1439 1440 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1441 } 1442 1443 static void handle_mem_reclaim(struct thread_smc_args *args) 1444 { 1445 int rc = FFA_INVALID_PARAMETERS; 1446 uint64_t cookie = 0; 1447 1448 if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7) 1449 goto out; 1450 1451 cookie = reg_pair_to_64(args->a2, args->a1); 1452 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1453 uint16_t guest_id = 0; 1454 1455 if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) { 1456 guest_id = virt_find_guest_by_cookie(cookie); 1457 } else { 1458 guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) & 1459 FFA_MEMORY_HANDLE_PRTN_MASK; 1460 } 1461 if (!guest_id) 1462 goto out; 1463 if (virt_set_guest(guest_id)) { 1464 if (!virt_reclaim_cookie_from_destroyed_guest(guest_id, 1465 cookie)) 1466 rc = FFA_OK; 1467 goto out; 1468 } 1469 } 1470 1471 switch (mobj_ffa_sel1_spmc_reclaim(cookie)) { 1472 case TEE_SUCCESS: 1473 rc = FFA_OK; 1474 break; 1475 case TEE_ERROR_ITEM_NOT_FOUND: 1476 DMSG("cookie %#"PRIx64" not found", cookie); 1477 rc = FFA_INVALID_PARAMETERS; 1478 break; 1479 default: 1480 DMSG("cookie %#"PRIx64" busy", cookie); 1481 rc = FFA_DENIED; 1482 break; 1483 } 1484 1485 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1486 virt_unset_guest(); 1487 1488 out: 1489 set_simple_ret_val(args, rc); 1490 } 1491 1492 static void handle_notification_bitmap_create(struct thread_smc_args *args) 1493 { 1494 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1495 uint32_t ret_fid = FFA_ERROR; 1496 uint32_t old_itr_status = 0; 1497 1498 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 && 1499 !args->a5 && !args->a6 && !args->a7) { 1500 struct guest_partition *prtn = NULL; 1501 struct notif_vm_bitmap *nvb = NULL; 1502 uint16_t vm_id = args->a1; 1503 1504 prtn = virt_get_guest(vm_id); 1505 nvb = get_notif_vm_bitmap(prtn, vm_id); 1506 if (!nvb) { 1507 ret_val = FFA_INVALID_PARAMETERS; 1508 goto out_virt_put; 1509 } 1510 1511 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1512 1513 if (nvb->initialized) { 1514 ret_val = FFA_DENIED; 1515 goto out_unlock; 1516 } 1517 1518 nvb->initialized = true; 1519 nvb->do_bottom_half_value = -1; 1520 ret_val = FFA_OK; 1521 ret_fid = FFA_SUCCESS_32; 1522 out_unlock: 1523 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1524 out_virt_put: 1525 virt_put_guest(prtn); 1526 } 1527 1528 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1529 } 1530 1531 static void handle_notification_bitmap_destroy(struct thread_smc_args *args) 1532 { 1533 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1534 uint32_t ret_fid = FFA_ERROR; 1535 uint32_t old_itr_status = 0; 1536 1537 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 && 1538 !args->a5 && !args->a6 && !args->a7) { 1539 struct guest_partition *prtn = NULL; 1540 struct notif_vm_bitmap *nvb = NULL; 1541 uint16_t vm_id = args->a1; 1542 1543 prtn = virt_get_guest(vm_id); 1544 nvb = get_notif_vm_bitmap(prtn, vm_id); 1545 if (!nvb) { 1546 ret_val = FFA_INVALID_PARAMETERS; 1547 goto out_virt_put; 1548 } 1549 1550 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1551 1552 if (nvb->pending || nvb->bound) { 1553 ret_val = FFA_DENIED; 1554 goto out_unlock; 1555 } 1556 1557 memset(nvb, 0, sizeof(*nvb)); 1558 ret_val = FFA_OK; 1559 ret_fid = FFA_SUCCESS_32; 1560 out_unlock: 1561 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1562 out_virt_put: 1563 virt_put_guest(prtn); 1564 } 1565 1566 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1567 } 1568 1569 static void handle_notification_bind(struct thread_smc_args *args) 1570 { 1571 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1572 struct guest_partition *prtn = NULL; 1573 struct notif_vm_bitmap *nvb = NULL; 1574 uint32_t ret_fid = FFA_ERROR; 1575 uint32_t old_itr_status = 0; 1576 uint64_t bitmap = 0; 1577 uint16_t vm_id = 0; 1578 1579 if (args->a5 || args->a6 || args->a7) 1580 goto out; 1581 if (args->a2) { 1582 /* We only deal with global notifications */ 1583 ret_val = FFA_DENIED; 1584 goto out; 1585 } 1586 1587 /* The destination of the eventual notification */ 1588 vm_id = FFA_DST(args->a1); 1589 bitmap = reg_pair_to_64(args->a4, args->a3); 1590 1591 prtn = virt_get_guest(vm_id); 1592 nvb = get_notif_vm_bitmap(prtn, vm_id); 1593 if (!nvb) { 1594 ret_val = FFA_INVALID_PARAMETERS; 1595 goto out_virt_put; 1596 } 1597 1598 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1599 1600 if ((bitmap & nvb->bound)) { 1601 ret_val = FFA_DENIED; 1602 } else { 1603 nvb->bound |= bitmap; 1604 ret_val = FFA_OK; 1605 ret_fid = FFA_SUCCESS_32; 1606 } 1607 1608 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1609 out_virt_put: 1610 virt_put_guest(prtn); 1611 out: 1612 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1613 } 1614 1615 static void handle_notification_unbind(struct thread_smc_args *args) 1616 { 1617 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1618 struct guest_partition *prtn = NULL; 1619 struct notif_vm_bitmap *nvb = NULL; 1620 uint32_t ret_fid = FFA_ERROR; 1621 uint32_t old_itr_status = 0; 1622 uint64_t bitmap = 0; 1623 uint16_t vm_id = 0; 1624 1625 if (args->a2 || args->a5 || args->a6 || args->a7) 1626 goto out; 1627 1628 /* The destination of the eventual notification */ 1629 vm_id = FFA_DST(args->a1); 1630 bitmap = reg_pair_to_64(args->a4, args->a3); 1631 1632 prtn = virt_get_guest(vm_id); 1633 nvb = get_notif_vm_bitmap(prtn, vm_id); 1634 if (!nvb) { 1635 ret_val = FFA_INVALID_PARAMETERS; 1636 goto out_virt_put; 1637 } 1638 1639 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1640 1641 if (bitmap & nvb->pending) { 1642 ret_val = FFA_DENIED; 1643 } else { 1644 nvb->bound &= ~bitmap; 1645 ret_val = FFA_OK; 1646 ret_fid = FFA_SUCCESS_32; 1647 } 1648 1649 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1650 out_virt_put: 1651 virt_put_guest(prtn); 1652 out: 1653 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1654 } 1655 1656 static void handle_notification_get(struct thread_smc_args *args) 1657 { 1658 uint32_t w2 = FFA_INVALID_PARAMETERS; 1659 struct guest_partition *prtn = NULL; 1660 struct notif_vm_bitmap *nvb = NULL; 1661 uint32_t ret_fid = FFA_ERROR; 1662 uint32_t old_itr_status = 0; 1663 uint16_t vm_id = 0; 1664 uint32_t w3 = 0; 1665 1666 if (args->a5 || args->a6 || args->a7) 1667 goto out; 1668 if (!(args->a2 & 0x1)) { 1669 ret_fid = FFA_SUCCESS_32; 1670 w2 = 0; 1671 goto out; 1672 } 1673 vm_id = FFA_DST(args->a1); 1674 1675 prtn = virt_get_guest(vm_id); 1676 nvb = get_notif_vm_bitmap(prtn, vm_id); 1677 if (!nvb) 1678 goto out_virt_put; 1679 1680 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1681 1682 reg_pair_from_64(nvb->pending, &w3, &w2); 1683 nvb->pending = 0; 1684 ret_fid = FFA_SUCCESS_32; 1685 1686 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1687 out_virt_put: 1688 virt_put_guest(prtn); 1689 out: 1690 spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0); 1691 } 1692 1693 struct notif_info_get_state { 1694 struct thread_smc_args *args; 1695 unsigned int ids_per_reg; 1696 unsigned int ids_count; 1697 unsigned int id_pos; 1698 unsigned int count; 1699 unsigned int max_list_count; 1700 unsigned int list_count; 1701 }; 1702 1703 static unsigned long get_smc_arg(struct thread_smc_args *args, unsigned int idx) 1704 { 1705 switch (idx) { 1706 case 0: 1707 return args->a0; 1708 case 1: 1709 return args->a1; 1710 case 2: 1711 return args->a2; 1712 case 3: 1713 return args->a3; 1714 case 4: 1715 return args->a4; 1716 case 5: 1717 return args->a5; 1718 case 6: 1719 return args->a6; 1720 case 7: 1721 return args->a7; 1722 default: 1723 assert(0); 1724 return 0; 1725 } 1726 } 1727 1728 static void set_smc_arg(struct thread_smc_args *args, unsigned int idx, 1729 unsigned long val) 1730 { 1731 switch (idx) { 1732 case 0: 1733 args->a0 = val; 1734 break; 1735 case 1: 1736 args->a1 = val; 1737 break; 1738 case 2: 1739 args->a2 = val; 1740 break; 1741 case 3: 1742 args->a3 = val; 1743 break; 1744 case 4: 1745 args->a4 = val; 1746 break; 1747 case 5: 1748 args->a5 = val; 1749 break; 1750 case 6: 1751 args->a6 = val; 1752 break; 1753 case 7: 1754 args->a7 = val; 1755 break; 1756 default: 1757 assert(0); 1758 } 1759 } 1760 1761 static bool add_id_in_regs(struct notif_info_get_state *state, 1762 uint16_t id) 1763 { 1764 unsigned int reg_idx = state->id_pos / state->ids_per_reg + 3; 1765 unsigned int reg_shift = (state->id_pos % state->ids_per_reg) * 16; 1766 unsigned long v; 1767 1768 if (reg_idx > 7) 1769 return false; 1770 1771 v = get_smc_arg(state->args, reg_idx); 1772 v &= ~(0xffffUL << reg_shift); 1773 v |= (unsigned long)id << reg_shift; 1774 set_smc_arg(state->args, reg_idx, v); 1775 1776 state->id_pos++; 1777 state->count++; 1778 return true; 1779 } 1780 1781 static bool add_id_count(struct notif_info_get_state *state) 1782 { 1783 assert(state->list_count < state->max_list_count && 1784 state->count >= 1 && state->count <= 4); 1785 1786 state->ids_count |= (state->count - 1) << (state->list_count * 2 + 12); 1787 state->list_count++; 1788 state->count = 0; 1789 1790 return state->list_count < state->max_list_count; 1791 } 1792 1793 static bool add_nvb_to_state(struct notif_info_get_state *state, 1794 uint16_t guest_id, struct notif_vm_bitmap *nvb) 1795 { 1796 if (!nvb->pending) 1797 return true; 1798 /* 1799 * Add only the guest_id, meaning a global notification for this 1800 * guest. 1801 * 1802 * If notifications for one or more specific vCPUs we'd add those 1803 * before calling add_id_count(), but that's not supported. 1804 */ 1805 return add_id_in_regs(state, guest_id) && add_id_count(state); 1806 } 1807 1808 static void handle_notification_info_get(struct thread_smc_args *args) 1809 { 1810 struct notif_info_get_state state = { .args = args }; 1811 uint32_t ffa_res = FFA_INVALID_PARAMETERS; 1812 struct guest_partition *prtn = NULL; 1813 struct notif_vm_bitmap *nvb = NULL; 1814 uint32_t more_pending_flag = 0; 1815 uint32_t itr_state = 0; 1816 uint16_t guest_id = 0; 1817 1818 if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 || 1819 args->a6 || args->a7) 1820 goto err; 1821 1822 if (OPTEE_SMC_IS_64(args->a0)) { 1823 spmc_set_args(args, FFA_SUCCESS_64, 0, 0, 0, 0, 0); 1824 state.ids_per_reg = 4; 1825 state.max_list_count = 31; 1826 } else { 1827 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0); 1828 state.ids_per_reg = 2; 1829 state.max_list_count = 15; 1830 } 1831 1832 while (true) { 1833 /* 1834 * With NS-Virtualization we need to go through all 1835 * partitions to collect the notification bitmaps, without 1836 * we just check the only notification bitmap we have. 1837 */ 1838 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1839 prtn = virt_next_guest(prtn); 1840 if (!prtn) 1841 break; 1842 guest_id = virt_get_guest_id(prtn); 1843 } 1844 nvb = get_notif_vm_bitmap(prtn, guest_id); 1845 1846 itr_state = cpu_spin_lock_xsave(&spmc_notif_lock); 1847 if (!add_nvb_to_state(&state, guest_id, nvb)) 1848 more_pending_flag = BIT(0); 1849 cpu_spin_unlock_xrestore(&spmc_notif_lock, itr_state); 1850 1851 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || more_pending_flag) 1852 break; 1853 } 1854 virt_put_guest(prtn); 1855 1856 if (!state.id_pos) { 1857 ffa_res = FFA_NO_DATA; 1858 goto err; 1859 } 1860 args->a2 = (state.list_count << FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT) | 1861 (state.ids_count << FFA_NOTIF_INFO_GET_ID_LIST_SHIFT) | 1862 more_pending_flag; 1863 return; 1864 err: 1865 spmc_set_args(args, FFA_ERROR, 0, ffa_res, 0, 0, 0); 1866 } 1867 1868 void thread_spmc_set_async_notif_intid(int intid) 1869 { 1870 assert(interrupt_can_raise_sgi(interrupt_get_main_chip())); 1871 notif_intid = intid; 1872 spmc_notif_is_ready = true; 1873 DMSG("Asynchronous notifications are ready"); 1874 } 1875 1876 void notif_send_async(uint32_t value, uint16_t guest_id) 1877 { 1878 struct guest_partition *prtn = NULL; 1879 struct notif_vm_bitmap *nvb = NULL; 1880 uint32_t old_itr_status = 0; 1881 1882 prtn = virt_get_guest(guest_id); 1883 nvb = get_notif_vm_bitmap(prtn, guest_id); 1884 1885 if (nvb) { 1886 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1887 assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && 1888 spmc_notif_is_ready && nvb->do_bottom_half_value >= 0 && 1889 notif_intid >= 0); 1890 nvb->pending |= BIT64(nvb->do_bottom_half_value); 1891 interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid, 1892 ITR_CPU_MASK_TO_THIS_CPU); 1893 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1894 } 1895 1896 virt_put_guest(prtn); 1897 } 1898 #else 1899 void notif_send_async(uint32_t value, uint16_t guest_id) 1900 { 1901 struct guest_partition *prtn = NULL; 1902 struct notif_vm_bitmap *nvb = NULL; 1903 /* global notification, delay notification interrupt */ 1904 uint32_t flags = BIT32(1); 1905 int res = 0; 1906 1907 prtn = virt_get_guest(guest_id); 1908 nvb = get_notif_vm_bitmap(prtn, guest_id); 1909 1910 if (nvb) { 1911 assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && 1912 spmc_notif_is_ready && nvb->do_bottom_half_value >= 0); 1913 res = ffa_set_notification(guest_id, optee_endpoint_id, flags, 1914 BIT64(nvb->do_bottom_half_value)); 1915 if (res) { 1916 EMSG("notification set failed with error %d", res); 1917 panic(); 1918 } 1919 } 1920 1921 virt_put_guest(prtn); 1922 } 1923 #endif 1924 1925 /* Only called from assembly */ 1926 void thread_spmc_msg_recv(struct thread_smc_args *args); 1927 void thread_spmc_msg_recv(struct thread_smc_args *args) 1928 { 1929 assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL); 1930 switch (args->a0) { 1931 #if defined(CFG_CORE_SEL1_SPMC) 1932 case FFA_FEATURES: 1933 handle_features(args); 1934 break; 1935 case FFA_SPM_ID_GET: 1936 spmc_handle_spm_id_get(args); 1937 break; 1938 #ifdef ARM64 1939 case FFA_RXTX_MAP_64: 1940 #endif 1941 case FFA_RXTX_MAP_32: 1942 spmc_handle_rxtx_map(args, &my_rxtx); 1943 break; 1944 case FFA_RXTX_UNMAP: 1945 spmc_handle_rxtx_unmap(args, &my_rxtx); 1946 break; 1947 case FFA_RX_RELEASE: 1948 spmc_handle_rx_release(args, &my_rxtx); 1949 break; 1950 case FFA_PARTITION_INFO_GET: 1951 spmc_handle_partition_info_get(args, &my_rxtx); 1952 break; 1953 case FFA_RUN: 1954 spmc_handle_run(args); 1955 break; 1956 #endif /*CFG_CORE_SEL1_SPMC*/ 1957 case FFA_INTERRUPT: 1958 if (IS_ENABLED(CFG_CORE_SEL1_SPMC)) 1959 spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0, 1960 0, 0); 1961 else 1962 spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0); 1963 break; 1964 #ifdef ARM64 1965 case FFA_MSG_SEND_DIRECT_REQ_64: 1966 #endif 1967 case FFA_MSG_SEND_DIRECT_REQ_32: 1968 handle_direct_request(args, &my_rxtx); 1969 break; 1970 #if defined(CFG_CORE_SEL1_SPMC) 1971 #ifdef ARM64 1972 case FFA_MEM_SHARE_64: 1973 #endif 1974 case FFA_MEM_SHARE_32: 1975 handle_mem_share(args, &my_rxtx); 1976 break; 1977 case FFA_MEM_RECLAIM: 1978 if (!IS_ENABLED(CFG_SECURE_PARTITION) || 1979 !ffa_mem_reclaim(args, NULL)) 1980 handle_mem_reclaim(args); 1981 break; 1982 case FFA_MEM_FRAG_TX: 1983 handle_mem_frag_tx(args, &my_rxtx); 1984 break; 1985 case FFA_NOTIFICATION_BITMAP_CREATE: 1986 handle_notification_bitmap_create(args); 1987 break; 1988 case FFA_NOTIFICATION_BITMAP_DESTROY: 1989 handle_notification_bitmap_destroy(args); 1990 break; 1991 case FFA_NOTIFICATION_BIND: 1992 handle_notification_bind(args); 1993 break; 1994 case FFA_NOTIFICATION_UNBIND: 1995 handle_notification_unbind(args); 1996 break; 1997 case FFA_NOTIFICATION_GET: 1998 handle_notification_get(args); 1999 break; 2000 #ifdef ARM64 2001 case FFA_NOTIFICATION_INFO_GET_64: 2002 #endif 2003 case FFA_NOTIFICATION_INFO_GET_32: 2004 handle_notification_info_get(args); 2005 break; 2006 #endif /*CFG_CORE_SEL1_SPMC*/ 2007 case FFA_ERROR: 2008 EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2); 2009 if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) { 2010 /* 2011 * The SPMC will return an FFA_ERROR back so better 2012 * panic() now than flooding the log. 2013 */ 2014 panic("FFA_ERROR from SPMC is fatal"); 2015 } 2016 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED, 2017 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 2018 break; 2019 default: 2020 EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0); 2021 set_simple_ret_val(args, FFA_NOT_SUPPORTED); 2022 } 2023 } 2024 2025 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset) 2026 { 2027 size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 2028 struct thread_ctx *thr = threads + thread_get_id(); 2029 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 2030 struct optee_msg_arg *arg = NULL; 2031 struct mobj *mobj = NULL; 2032 uint32_t num_params = 0; 2033 size_t sz = 0; 2034 2035 mobj = mobj_ffa_get_by_cookie(cookie, 0); 2036 if (!mobj) { 2037 EMSG("Can't find cookie %#"PRIx64, cookie); 2038 return TEE_ERROR_BAD_PARAMETERS; 2039 } 2040 2041 res = mobj_inc_map(mobj); 2042 if (res) 2043 goto out_put_mobj; 2044 2045 res = TEE_ERROR_BAD_PARAMETERS; 2046 arg = mobj_get_va(mobj, offset, sizeof(*arg)); 2047 if (!arg) 2048 goto out_dec_map; 2049 2050 num_params = READ_ONCE(arg->num_params); 2051 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS) 2052 goto out_dec_map; 2053 2054 sz = OPTEE_MSG_GET_ARG_SIZE(num_params); 2055 2056 thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc); 2057 if (!thr->rpc_arg) 2058 goto out_dec_map; 2059 2060 virt_on_stdcall(); 2061 res = tee_entry_std(arg, num_params); 2062 2063 thread_rpc_shm_cache_clear(&thr->shm_cache); 2064 thr->rpc_arg = NULL; 2065 2066 out_dec_map: 2067 mobj_dec_map(mobj); 2068 out_put_mobj: 2069 mobj_put(mobj); 2070 return res; 2071 } 2072 2073 /* 2074 * Helper routine for the assembly function thread_std_smc_entry() 2075 * 2076 * Note: this function is weak just to make link_dummies_paged.c happy. 2077 */ 2078 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1, 2079 uint32_t a2, uint32_t a3, 2080 uint32_t a4, uint32_t a5 __unused) 2081 { 2082 /* 2083 * Arguments are supplied from handle_yielding_call() as: 2084 * a0 <- w1 2085 * a1 <- w3 2086 * a2 <- w4 2087 * a3 <- w5 2088 * a4 <- w6 2089 * a5 <- w7 2090 */ 2091 thread_get_tsd()->rpc_target_info = swap_src_dst(a0); 2092 if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG) 2093 return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4); 2094 return FFA_DENIED; 2095 } 2096 2097 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm) 2098 { 2099 uint64_t offs = tpm->u.memref.offs; 2100 2101 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN + 2102 OPTEE_MSG_ATTR_TYPE_FMEM_INPUT; 2103 2104 param->u.fmem.offs_low = offs; 2105 param->u.fmem.offs_high = offs >> 32; 2106 if (param->u.fmem.offs_high != offs >> 32) 2107 return false; 2108 2109 param->u.fmem.size = tpm->u.memref.size; 2110 if (tpm->u.memref.mobj) { 2111 uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj); 2112 2113 /* If a mobj is passed it better be one with a valid cookie. */ 2114 if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) 2115 return false; 2116 param->u.fmem.global_id = cookie; 2117 } else { 2118 param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 2119 } 2120 2121 return true; 2122 } 2123 2124 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params, 2125 struct thread_param *params, 2126 struct optee_msg_arg **arg_ret) 2127 { 2128 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 2129 struct thread_ctx *thr = threads + thread_get_id(); 2130 struct optee_msg_arg *arg = thr->rpc_arg; 2131 2132 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 2133 return TEE_ERROR_BAD_PARAMETERS; 2134 2135 if (!arg) { 2136 EMSG("rpc_arg not set"); 2137 return TEE_ERROR_GENERIC; 2138 } 2139 2140 memset(arg, 0, sz); 2141 arg->cmd = cmd; 2142 arg->num_params = num_params; 2143 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 2144 2145 for (size_t n = 0; n < num_params; n++) { 2146 switch (params[n].attr) { 2147 case THREAD_PARAM_ATTR_NONE: 2148 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE; 2149 break; 2150 case THREAD_PARAM_ATTR_VALUE_IN: 2151 case THREAD_PARAM_ATTR_VALUE_OUT: 2152 case THREAD_PARAM_ATTR_VALUE_INOUT: 2153 arg->params[n].attr = params[n].attr - 2154 THREAD_PARAM_ATTR_VALUE_IN + 2155 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 2156 arg->params[n].u.value.a = params[n].u.value.a; 2157 arg->params[n].u.value.b = params[n].u.value.b; 2158 arg->params[n].u.value.c = params[n].u.value.c; 2159 break; 2160 case THREAD_PARAM_ATTR_MEMREF_IN: 2161 case THREAD_PARAM_ATTR_MEMREF_OUT: 2162 case THREAD_PARAM_ATTR_MEMREF_INOUT: 2163 if (!set_fmem(arg->params + n, params + n)) 2164 return TEE_ERROR_BAD_PARAMETERS; 2165 break; 2166 default: 2167 return TEE_ERROR_BAD_PARAMETERS; 2168 } 2169 } 2170 2171 if (arg_ret) 2172 *arg_ret = arg; 2173 2174 return TEE_SUCCESS; 2175 } 2176 2177 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params, 2178 struct thread_param *params) 2179 { 2180 for (size_t n = 0; n < num_params; n++) { 2181 switch (params[n].attr) { 2182 case THREAD_PARAM_ATTR_VALUE_OUT: 2183 case THREAD_PARAM_ATTR_VALUE_INOUT: 2184 params[n].u.value.a = arg->params[n].u.value.a; 2185 params[n].u.value.b = arg->params[n].u.value.b; 2186 params[n].u.value.c = arg->params[n].u.value.c; 2187 break; 2188 case THREAD_PARAM_ATTR_MEMREF_OUT: 2189 case THREAD_PARAM_ATTR_MEMREF_INOUT: 2190 params[n].u.memref.size = arg->params[n].u.fmem.size; 2191 break; 2192 default: 2193 break; 2194 } 2195 } 2196 2197 return arg->ret; 2198 } 2199 2200 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 2201 struct thread_param *params) 2202 { 2203 struct thread_rpc_arg rpc_arg = { .call = { 2204 .w1 = thread_get_tsd()->rpc_target_info, 2205 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 2206 }, 2207 }; 2208 struct optee_msg_arg *arg = NULL; 2209 uint32_t ret = 0; 2210 2211 ret = get_rpc_arg(cmd, num_params, params, &arg); 2212 if (ret) 2213 return ret; 2214 2215 thread_rpc(&rpc_arg); 2216 2217 return get_rpc_arg_res(arg, num_params, params); 2218 } 2219 2220 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 2221 { 2222 struct thread_rpc_arg rpc_arg = { .call = { 2223 .w1 = thread_get_tsd()->rpc_target_info, 2224 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 2225 }, 2226 }; 2227 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0); 2228 uint32_t res2 = 0; 2229 uint32_t res = 0; 2230 2231 DMSG("freeing cookie %#"PRIx64, cookie); 2232 2233 res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m, NULL); 2234 2235 mobj_put(mobj); 2236 res2 = mobj_ffa_unregister_by_cookie(cookie); 2237 if (res2) 2238 DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32, 2239 cookie, res2); 2240 if (!res) 2241 thread_rpc(&rpc_arg); 2242 } 2243 2244 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt) 2245 { 2246 struct thread_rpc_arg rpc_arg = { .call = { 2247 .w1 = thread_get_tsd()->rpc_target_info, 2248 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 2249 }, 2250 }; 2251 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align); 2252 struct optee_msg_arg *arg = NULL; 2253 unsigned int internal_offset = 0; 2254 struct mobj *mobj = NULL; 2255 uint64_t cookie = 0; 2256 2257 if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m, &arg)) 2258 return NULL; 2259 2260 thread_rpc(&rpc_arg); 2261 2262 if (arg->num_params != 1 || 2263 arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT) 2264 return NULL; 2265 2266 internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs); 2267 cookie = READ_ONCE(arg->params->u.fmem.global_id); 2268 mobj = mobj_ffa_get_by_cookie(cookie, internal_offset); 2269 if (!mobj) { 2270 DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed", 2271 cookie, internal_offset); 2272 return NULL; 2273 } 2274 2275 assert(mobj_is_nonsec(mobj)); 2276 2277 if (mobj->size < size) { 2278 DMSG("Mobj %#"PRIx64": wrong size", cookie); 2279 mobj_put(mobj); 2280 return NULL; 2281 } 2282 2283 if (mobj_inc_map(mobj)) { 2284 DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie); 2285 mobj_put(mobj); 2286 return NULL; 2287 } 2288 2289 return mobj; 2290 } 2291 2292 struct mobj *thread_rpc_alloc_payload(size_t size) 2293 { 2294 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL); 2295 } 2296 2297 struct mobj *thread_rpc_alloc_kernel_payload(size_t size) 2298 { 2299 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL); 2300 } 2301 2302 void thread_rpc_free_kernel_payload(struct mobj *mobj) 2303 { 2304 if (mobj) 2305 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, 2306 mobj_get_cookie(mobj), mobj); 2307 } 2308 2309 void thread_rpc_free_payload(struct mobj *mobj) 2310 { 2311 if (mobj) 2312 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj), 2313 mobj); 2314 } 2315 2316 struct mobj *thread_rpc_alloc_global_payload(size_t size) 2317 { 2318 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL); 2319 } 2320 2321 void thread_rpc_free_global_payload(struct mobj *mobj) 2322 { 2323 if (mobj) 2324 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, 2325 mobj_get_cookie(mobj), mobj); 2326 } 2327 2328 void thread_spmc_register_secondary_ep(vaddr_t ep) 2329 { 2330 unsigned long ret = 0; 2331 2332 /* Let the SPM know the entry point for secondary CPUs */ 2333 ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0); 2334 2335 if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64) 2336 EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret); 2337 } 2338 2339 static uint16_t ffa_id_get(void) 2340 { 2341 /* 2342 * Ask the SPM component running at a higher EL to return our FF-A ID. 2343 * This can either be the SPMC ID (if the SPMC is enabled in OP-TEE) or 2344 * the partition ID (if not). 2345 */ 2346 struct thread_smc_args args = { 2347 .a0 = FFA_ID_GET, 2348 }; 2349 2350 thread_smccc(&args); 2351 if (!is_ffa_success(args.a0)) { 2352 if (args.a0 == FFA_ERROR) 2353 EMSG("Get id failed with error %ld", args.a2); 2354 else 2355 EMSG("Get id failed"); 2356 panic(); 2357 } 2358 2359 return args.a2; 2360 } 2361 2362 static uint16_t ffa_spm_id_get(void) 2363 { 2364 /* 2365 * Ask the SPM component running at a higher EL to return its ID. 2366 * If OP-TEE implements the S-EL1 SPMC, this will get the SPMD ID. 2367 * If not, the ID of the SPMC will be returned. 2368 */ 2369 struct thread_smc_args args = { 2370 .a0 = FFA_SPM_ID_GET, 2371 }; 2372 2373 thread_smccc(&args); 2374 if (!is_ffa_success(args.a0)) { 2375 if (args.a0 == FFA_ERROR) 2376 EMSG("Get spm id failed with error %ld", args.a2); 2377 else 2378 EMSG("Get spm id failed"); 2379 panic(); 2380 } 2381 2382 return args.a2; 2383 } 2384 2385 #if defined(CFG_CORE_SEL1_SPMC) 2386 static TEE_Result spmc_init(void) 2387 { 2388 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 2389 virt_add_guest_spec_data(¬if_vm_bitmap_id, 2390 sizeof(struct notif_vm_bitmap), NULL)) 2391 panic("virt_add_guest_spec_data"); 2392 spmd_id = ffa_spm_id_get(); 2393 DMSG("SPMD ID %#"PRIx16, spmd_id); 2394 2395 spmc_id = ffa_id_get(); 2396 DMSG("SPMC ID %#"PRIx16, spmc_id); 2397 2398 optee_endpoint_id = FFA_SWD_ID_MIN; 2399 while (optee_endpoint_id == spmd_id || optee_endpoint_id == spmc_id) 2400 optee_endpoint_id++; 2401 2402 DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id); 2403 2404 /* 2405 * If SPMD think we are version 1.0 it will report version 1.0 to 2406 * normal world regardless of what version we query the SPM with. 2407 * However, if SPMD think we are version 1.1 it will forward 2408 * queries from normal world to let us negotiate version. So by 2409 * setting version 1.0 here we should be compatible. 2410 * 2411 * Note that disagreement on negotiated version means that we'll 2412 * have communication problems with normal world. 2413 */ 2414 my_rxtx.ffa_vers = FFA_VERSION_1_0; 2415 2416 return TEE_SUCCESS; 2417 } 2418 #else /* !defined(CFG_CORE_SEL1_SPMC) */ 2419 static void spmc_rxtx_map(struct ffa_rxtx *rxtx) 2420 { 2421 struct thread_smc_args args = { 2422 #ifdef ARM64 2423 .a0 = FFA_RXTX_MAP_64, 2424 #else 2425 .a0 = FFA_RXTX_MAP_32, 2426 #endif 2427 .a1 = virt_to_phys(rxtx->tx), 2428 .a2 = virt_to_phys(rxtx->rx), 2429 .a3 = 1, 2430 }; 2431 2432 thread_smccc(&args); 2433 if (!is_ffa_success(args.a0)) { 2434 if (args.a0 == FFA_ERROR) 2435 EMSG("rxtx map failed with error %ld", args.a2); 2436 else 2437 EMSG("rxtx map failed"); 2438 panic(); 2439 } 2440 } 2441 2442 static uint32_t get_ffa_version(uint32_t my_version) 2443 { 2444 struct thread_smc_args args = { 2445 .a0 = FFA_VERSION, 2446 .a1 = my_version, 2447 }; 2448 2449 thread_smccc(&args); 2450 if (args.a0 & BIT(31)) { 2451 EMSG("FF-A version failed with error %ld", args.a0); 2452 panic(); 2453 } 2454 2455 return args.a0; 2456 } 2457 2458 static void *spmc_retrieve_req(uint64_t cookie, 2459 struct ffa_mem_transaction_x *trans) 2460 { 2461 struct ffa_mem_access *acc_descr_array = NULL; 2462 struct ffa_mem_access_perm *perm_descr = NULL; 2463 struct thread_smc_args args = { 2464 .a0 = FFA_MEM_RETRIEVE_REQ_32, 2465 .a3 = 0, /* Address, Using TX -> MBZ */ 2466 .a4 = 0, /* Using TX -> MBZ */ 2467 }; 2468 size_t size = 0; 2469 int rc = 0; 2470 2471 if (my_rxtx.ffa_vers == FFA_VERSION_1_0) { 2472 struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx; 2473 2474 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 2475 memset(trans_descr, 0, size); 2476 trans_descr->sender_id = thread_get_tsd()->rpc_target_info; 2477 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 2478 trans_descr->global_handle = cookie; 2479 trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE | 2480 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 2481 trans_descr->mem_access_count = 1; 2482 acc_descr_array = trans_descr->mem_access_array; 2483 } else { 2484 struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx; 2485 2486 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 2487 memset(trans_descr, 0, size); 2488 trans_descr->sender_id = thread_get_tsd()->rpc_target_info; 2489 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 2490 trans_descr->global_handle = cookie; 2491 trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE | 2492 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 2493 trans_descr->mem_access_count = 1; 2494 trans_descr->mem_access_offs = sizeof(*trans_descr); 2495 trans_descr->mem_access_size = sizeof(struct ffa_mem_access); 2496 acc_descr_array = (void *)((vaddr_t)my_rxtx.tx + 2497 sizeof(*trans_descr)); 2498 } 2499 acc_descr_array->region_offs = 0; 2500 acc_descr_array->reserved = 0; 2501 perm_descr = &acc_descr_array->access_perm; 2502 perm_descr->endpoint_id = optee_endpoint_id; 2503 perm_descr->perm = FFA_MEM_ACC_RW; 2504 perm_descr->flags = 0; 2505 2506 args.a1 = size; /* Total Length */ 2507 args.a2 = size; /* Frag Length == Total length */ 2508 thread_smccc(&args); 2509 if (args.a0 != FFA_MEM_RETRIEVE_RESP) { 2510 if (args.a0 == FFA_ERROR) 2511 EMSG("Failed to fetch cookie %#"PRIx64" error code %d", 2512 cookie, (int)args.a2); 2513 else 2514 EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64, 2515 cookie, args.a0); 2516 return NULL; 2517 } 2518 rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx, 2519 my_rxtx.size, trans); 2520 if (rc) { 2521 EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d", 2522 cookie, rc); 2523 return NULL; 2524 } 2525 2526 return my_rxtx.rx; 2527 } 2528 2529 void thread_spmc_relinquish(uint64_t cookie) 2530 { 2531 struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx; 2532 struct thread_smc_args args = { 2533 .a0 = FFA_MEM_RELINQUISH, 2534 }; 2535 2536 memset(relinquish_desc, 0, sizeof(*relinquish_desc)); 2537 relinquish_desc->handle = cookie; 2538 relinquish_desc->flags = 0; 2539 relinquish_desc->endpoint_count = 1; 2540 relinquish_desc->endpoint_id_array[0] = optee_endpoint_id; 2541 thread_smccc(&args); 2542 if (!is_ffa_success(args.a0)) 2543 EMSG("Failed to relinquish cookie %#"PRIx64, cookie); 2544 } 2545 2546 static int set_pages(struct ffa_address_range *regions, 2547 unsigned int num_regions, unsigned int num_pages, 2548 struct mobj_ffa *mf) 2549 { 2550 unsigned int n = 0; 2551 unsigned int idx = 0; 2552 2553 for (n = 0; n < num_regions; n++) { 2554 unsigned int page_count = READ_ONCE(regions[n].page_count); 2555 uint64_t addr = READ_ONCE(regions[n].address); 2556 2557 if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count)) 2558 return FFA_INVALID_PARAMETERS; 2559 } 2560 2561 if (idx != num_pages) 2562 return FFA_INVALID_PARAMETERS; 2563 2564 return 0; 2565 } 2566 2567 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie) 2568 { 2569 struct mobj_ffa *ret = NULL; 2570 struct ffa_mem_transaction_x retrieve_desc = { }; 2571 struct ffa_mem_access *descr_array = NULL; 2572 struct ffa_mem_region *descr = NULL; 2573 struct mobj_ffa *mf = NULL; 2574 unsigned int num_pages = 0; 2575 unsigned int offs = 0; 2576 void *buf = NULL; 2577 struct thread_smc_args ffa_rx_release_args = { 2578 .a0 = FFA_RX_RELEASE 2579 }; 2580 2581 /* 2582 * OP-TEE is only supporting a single mem_region while the 2583 * specification allows for more than one. 2584 */ 2585 buf = spmc_retrieve_req(cookie, &retrieve_desc); 2586 if (!buf) { 2587 EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64, 2588 cookie); 2589 return NULL; 2590 } 2591 2592 descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs); 2593 offs = READ_ONCE(descr_array->region_offs); 2594 descr = (struct ffa_mem_region *)((vaddr_t)buf + offs); 2595 2596 num_pages = READ_ONCE(descr->total_page_count); 2597 mf = mobj_ffa_spmc_new(cookie, num_pages); 2598 if (!mf) 2599 goto out; 2600 2601 if (set_pages(descr->address_range_array, 2602 READ_ONCE(descr->address_range_count), num_pages, mf)) { 2603 mobj_ffa_spmc_delete(mf); 2604 goto out; 2605 } 2606 2607 ret = mf; 2608 2609 out: 2610 /* Release RX buffer after the mem retrieve request. */ 2611 thread_smccc(&ffa_rx_release_args); 2612 2613 return ret; 2614 } 2615 2616 static TEE_Result spmc_init(void) 2617 { 2618 unsigned int major = 0; 2619 unsigned int minor __maybe_unused = 0; 2620 uint32_t my_vers = 0; 2621 uint32_t vers = 0; 2622 2623 my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 2624 vers = get_ffa_version(my_vers); 2625 major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK; 2626 minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK; 2627 DMSG("SPMC reported version %u.%u", major, minor); 2628 if (major != FFA_VERSION_MAJOR) { 2629 EMSG("Incompatible major version %u, expected %u", 2630 major, FFA_VERSION_MAJOR); 2631 panic(); 2632 } 2633 if (vers < my_vers) 2634 my_vers = vers; 2635 DMSG("Using version %u.%u", 2636 (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK, 2637 (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK); 2638 my_rxtx.ffa_vers = my_vers; 2639 2640 spmc_rxtx_map(&my_rxtx); 2641 2642 spmc_id = ffa_spm_id_get(); 2643 DMSG("SPMC ID %#"PRIx16, spmc_id); 2644 2645 optee_endpoint_id = ffa_id_get(); 2646 DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id); 2647 2648 if (!ffa_features(FFA_NOTIFICATION_SET)) { 2649 spmc_notif_is_ready = true; 2650 DMSG("Asynchronous notifications are ready"); 2651 } 2652 2653 return TEE_SUCCESS; 2654 } 2655 #endif /* !defined(CFG_CORE_SEL1_SPMC) */ 2656 2657 nex_service_init(spmc_init); 2658