1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2023, Linaro Limited. 4 * Copyright (c) 2019-2021, Arm Limited. All rights reserved. 5 */ 6 7 #include <assert.h> 8 #include <ffa.h> 9 #include <initcall.h> 10 #include <io.h> 11 #include <kernel/interrupt.h> 12 #include <kernel/notif.h> 13 #include <kernel/panic.h> 14 #include <kernel/secure_partition.h> 15 #include <kernel/spinlock.h> 16 #include <kernel/spmc_sp_handler.h> 17 #include <kernel/tee_misc.h> 18 #include <kernel/thread.h> 19 #include <kernel/thread_private.h> 20 #include <kernel/thread_spmc.h> 21 #include <kernel/virtualization.h> 22 #include <mm/core_mmu.h> 23 #include <mm/mobj.h> 24 #include <optee_ffa.h> 25 #include <optee_msg.h> 26 #include <optee_rpc_cmd.h> 27 #include <sm/optee_smc.h> 28 #include <string.h> 29 #include <sys/queue.h> 30 #include <tee/entry_std.h> 31 #include <tee/uuid.h> 32 #include <util.h> 33 34 #if defined(CFG_CORE_SEL1_SPMC) 35 struct mem_share_state { 36 struct mobj_ffa *mf; 37 unsigned int page_count; 38 unsigned int region_count; 39 unsigned int current_page_idx; 40 }; 41 42 struct mem_frag_state { 43 struct mem_share_state share; 44 tee_mm_entry_t *mm; 45 unsigned int frag_offset; 46 SLIST_ENTRY(mem_frag_state) link; 47 }; 48 #endif 49 50 static unsigned int spmc_notif_lock = SPINLOCK_UNLOCK; 51 static int do_bottom_half_value = -1; 52 static uint16_t notif_vm_id; 53 static bool spmc_notif_is_ready; 54 55 /* Initialized in spmc_init() below */ 56 static uint16_t my_endpoint_id __nex_bss; 57 #ifdef CFG_CORE_SEL1_SPMC 58 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV | 59 FFA_PART_PROP_DIRECT_REQ_SEND | 60 #ifdef CFG_NS_VIRTUALIZATION 61 FFA_PART_PROP_NOTIF_CREATED | 62 FFA_PART_PROP_NOTIF_DESTROYED | 63 #endif 64 #ifdef ARM64 65 FFA_PART_PROP_AARCH64_STATE | 66 #endif 67 FFA_PART_PROP_IS_PE_ID; 68 69 static uint32_t my_uuid_words[] = { 70 /* 71 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1 72 * SP, or 73 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a 74 * logical partition, residing in the same exception level as the 75 * SPMC 76 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 77 */ 78 0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5, 79 }; 80 81 /* 82 * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized. 83 * 84 * struct ffa_rxtx::spin_lock protects the variables below from concurrent 85 * access this includes the use of content of struct ffa_rxtx::rx and 86 * @frag_state_head. 87 * 88 * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct 89 * ffa_rxtx::tx and false when it is owned by normal world. 90 * 91 * Note that we can't prevent normal world from updating the content of 92 * these buffers so we must always be careful when reading. while we hold 93 * the lock. 94 */ 95 96 static struct ffa_rxtx my_rxtx __nex_bss; 97 98 static bool is_nw_buf(struct ffa_rxtx *rxtx) 99 { 100 return rxtx == &my_rxtx; 101 } 102 103 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head = 104 SLIST_HEAD_INITIALIZER(&frag_state_head); 105 106 static uint64_t notif_pending_bitmap; 107 static uint64_t notif_bound_bitmap; 108 static bool notif_vm_id_valid; 109 static int notif_intid = -1; 110 #else 111 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 112 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 113 static struct ffa_rxtx my_rxtx = { 114 .rx = __rx_buf, 115 .tx = __tx_buf, 116 .size = sizeof(__rx_buf), 117 }; 118 #endif 119 120 static uint32_t swap_src_dst(uint32_t src_dst) 121 { 122 return (src_dst >> 16) | (src_dst << 16); 123 } 124 125 static uint16_t get_sender_id(uint32_t src_dst) 126 { 127 return src_dst >> 16; 128 } 129 130 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst, 131 uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5) 132 { 133 *args = (struct thread_smc_args){ .a0 = fid, 134 .a1 = src_dst, 135 .a2 = w2, 136 .a3 = w3, 137 .a4 = w4, 138 .a5 = w5, }; 139 } 140 141 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx) 142 { 143 /* 144 * No locking, if the caller does concurrent calls to this it's 145 * only making a mess for itself. We must be able to renegotiate 146 * the FF-A version in order to support differing versions between 147 * the loader and the driver. 148 */ 149 if (vers < FFA_VERSION_1_1) 150 rxtx->ffa_vers = FFA_VERSION_1_0; 151 else 152 rxtx->ffa_vers = FFA_VERSION_1_1; 153 154 return rxtx->ffa_vers; 155 } 156 157 #if defined(CFG_CORE_SEL1_SPMC) 158 static void handle_features(struct thread_smc_args *args) 159 { 160 uint32_t ret_fid = FFA_ERROR; 161 uint32_t ret_w2 = FFA_NOT_SUPPORTED; 162 163 switch (args->a1) { 164 case FFA_FEATURE_SCHEDULE_RECV_INTR: 165 if (spmc_notif_is_ready) { 166 ret_fid = FFA_SUCCESS_32; 167 ret_w2 = notif_intid; 168 } 169 break; 170 171 #ifdef ARM64 172 case FFA_RXTX_MAP_64: 173 #endif 174 case FFA_RXTX_MAP_32: 175 ret_fid = FFA_SUCCESS_32; 176 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */ 177 break; 178 #ifdef ARM64 179 case FFA_MEM_SHARE_64: 180 #endif 181 case FFA_MEM_SHARE_32: 182 ret_fid = FFA_SUCCESS_32; 183 /* 184 * Partition manager supports transmission of a memory 185 * transaction descriptor in a buffer dynamically allocated 186 * by the endpoint. 187 */ 188 ret_w2 = BIT(0); 189 break; 190 191 case FFA_ERROR: 192 case FFA_VERSION: 193 case FFA_SUCCESS_32: 194 #ifdef ARM64 195 case FFA_SUCCESS_64: 196 #endif 197 case FFA_FEATURES: 198 case FFA_SPM_ID_GET: 199 case FFA_MEM_FRAG_TX: 200 case FFA_MEM_RECLAIM: 201 case FFA_MSG_SEND_DIRECT_REQ_64: 202 case FFA_MSG_SEND_DIRECT_REQ_32: 203 case FFA_INTERRUPT: 204 case FFA_PARTITION_INFO_GET: 205 case FFA_RXTX_UNMAP: 206 case FFA_RX_RELEASE: 207 case FFA_FEATURE_MANAGED_EXIT_INTR: 208 case FFA_NOTIFICATION_BITMAP_CREATE: 209 case FFA_NOTIFICATION_BITMAP_DESTROY: 210 case FFA_NOTIFICATION_BIND: 211 case FFA_NOTIFICATION_UNBIND: 212 case FFA_NOTIFICATION_SET: 213 case FFA_NOTIFICATION_GET: 214 case FFA_NOTIFICATION_INFO_GET_32: 215 #ifdef ARM64 216 case FFA_NOTIFICATION_INFO_GET_64: 217 #endif 218 ret_fid = FFA_SUCCESS_32; 219 ret_w2 = FFA_PARAM_MBZ; 220 break; 221 default: 222 break; 223 } 224 225 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ, 226 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 227 } 228 229 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret) 230 { 231 tee_mm_entry_t *mm = NULL; 232 233 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz)) 234 return FFA_INVALID_PARAMETERS; 235 236 mm = tee_mm_alloc(&tee_mm_shm, sz); 237 if (!mm) 238 return FFA_NO_MEMORY; 239 240 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa, 241 sz / SMALL_PAGE_SIZE, 242 MEM_AREA_NSEC_SHM)) { 243 tee_mm_free(mm); 244 return FFA_INVALID_PARAMETERS; 245 } 246 247 *va_ret = (void *)tee_mm_get_smem(mm); 248 return 0; 249 } 250 251 static void handle_spm_id_get(struct thread_smc_args *args) 252 { 253 spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, my_endpoint_id, 254 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 255 } 256 257 static void unmap_buf(void *va, size_t sz) 258 { 259 tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va); 260 261 assert(mm); 262 core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE); 263 tee_mm_free(mm); 264 } 265 266 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 267 { 268 int rc = 0; 269 uint32_t ret_fid = FFA_ERROR; 270 unsigned int sz = 0; 271 paddr_t rx_pa = 0; 272 paddr_t tx_pa = 0; 273 void *rx = NULL; 274 void *tx = NULL; 275 276 cpu_spin_lock(&rxtx->spinlock); 277 278 if (args->a3 & GENMASK_64(63, 6)) { 279 rc = FFA_INVALID_PARAMETERS; 280 goto out; 281 } 282 283 sz = args->a3 * SMALL_PAGE_SIZE; 284 if (!sz) { 285 rc = FFA_INVALID_PARAMETERS; 286 goto out; 287 } 288 /* TX/RX are swapped compared to the caller */ 289 tx_pa = args->a2; 290 rx_pa = args->a1; 291 292 if (rxtx->size) { 293 rc = FFA_DENIED; 294 goto out; 295 } 296 297 /* 298 * If the buffer comes from a SP the address is virtual and already 299 * mapped. 300 */ 301 if (is_nw_buf(rxtx)) { 302 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 303 enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM; 304 bool tx_alloced = false; 305 306 /* 307 * With virtualization we establish this mapping in 308 * the nexus mapping which then is replicated to 309 * each partition. 310 * 311 * This means that this mapping must be done before 312 * any partition is created and then must not be 313 * changed. 314 */ 315 316 /* 317 * core_mmu_add_mapping() may reuse previous 318 * mappings. First check if there's any mappings to 319 * reuse so we know how to clean up in case of 320 * failure. 321 */ 322 tx = phys_to_virt(tx_pa, mt, sz); 323 rx = phys_to_virt(rx_pa, mt, sz); 324 if (!tx) { 325 tx = core_mmu_add_mapping(mt, tx_pa, sz); 326 if (!tx) { 327 rc = FFA_NO_MEMORY; 328 goto out; 329 } 330 tx_alloced = true; 331 } 332 if (!rx) 333 rx = core_mmu_add_mapping(mt, rx_pa, sz); 334 335 if (!rx) { 336 if (tx_alloced && tx) 337 core_mmu_remove_mapping(mt, tx, sz); 338 rc = FFA_NO_MEMORY; 339 goto out; 340 } 341 } else { 342 rc = map_buf(tx_pa, sz, &tx); 343 if (rc) 344 goto out; 345 rc = map_buf(rx_pa, sz, &rx); 346 if (rc) { 347 unmap_buf(tx, sz); 348 goto out; 349 } 350 } 351 rxtx->tx = tx; 352 rxtx->rx = rx; 353 } else { 354 if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) { 355 rc = FFA_INVALID_PARAMETERS; 356 goto out; 357 } 358 359 if (!virt_to_phys((void *)tx_pa) || 360 !virt_to_phys((void *)rx_pa)) { 361 rc = FFA_INVALID_PARAMETERS; 362 goto out; 363 } 364 365 rxtx->tx = (void *)tx_pa; 366 rxtx->rx = (void *)rx_pa; 367 } 368 369 rxtx->size = sz; 370 rxtx->tx_is_mine = true; 371 ret_fid = FFA_SUCCESS_32; 372 DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx); 373 DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx); 374 out: 375 cpu_spin_unlock(&rxtx->spinlock); 376 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 377 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 378 } 379 380 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 381 { 382 uint32_t ret_fid = FFA_ERROR; 383 int rc = FFA_INVALID_PARAMETERS; 384 385 cpu_spin_lock(&rxtx->spinlock); 386 387 if (!rxtx->size) 388 goto out; 389 390 /* We don't unmap the SP memory as the SP might still use it */ 391 if (is_nw_buf(rxtx)) { 392 unmap_buf(rxtx->rx, rxtx->size); 393 unmap_buf(rxtx->tx, rxtx->size); 394 } 395 rxtx->size = 0; 396 rxtx->rx = NULL; 397 rxtx->tx = NULL; 398 ret_fid = FFA_SUCCESS_32; 399 rc = 0; 400 out: 401 cpu_spin_unlock(&rxtx->spinlock); 402 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 403 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 404 } 405 406 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 407 { 408 uint32_t ret_fid = 0; 409 int rc = 0; 410 411 cpu_spin_lock(&rxtx->spinlock); 412 /* The senders RX is our TX */ 413 if (!rxtx->size || rxtx->tx_is_mine) { 414 ret_fid = FFA_ERROR; 415 rc = FFA_DENIED; 416 } else { 417 ret_fid = FFA_SUCCESS_32; 418 rc = 0; 419 rxtx->tx_is_mine = true; 420 } 421 cpu_spin_unlock(&rxtx->spinlock); 422 423 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 424 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 425 } 426 427 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 428 { 429 return !w0 && !w1 && !w2 && !w3; 430 } 431 432 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 433 { 434 /* 435 * This depends on which UUID we have been assigned. 436 * TODO add a generic mechanism to obtain our UUID. 437 * 438 * The test below is for the hard coded UUID 439 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 440 */ 441 return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] && 442 w2 == my_uuid_words[2] && w3 == my_uuid_words[3]; 443 } 444 445 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen, 446 size_t idx, uint16_t endpoint_id, 447 uint16_t execution_context, 448 uint32_t part_props, 449 const uint32_t uuid_words[4]) 450 { 451 struct ffa_partition_info_x *fpi = NULL; 452 size_t fpi_size = sizeof(*fpi); 453 454 if (ffa_vers >= FFA_VERSION_1_1) 455 fpi_size += FFA_UUID_SIZE; 456 457 if ((idx + 1) * fpi_size > blen) 458 return TEE_ERROR_OUT_OF_MEMORY; 459 460 fpi = (void *)((vaddr_t)buf + idx * fpi_size); 461 fpi->id = endpoint_id; 462 /* Number of execution contexts implemented by this partition */ 463 fpi->execution_context = execution_context; 464 465 fpi->partition_properties = part_props; 466 467 if (ffa_vers >= FFA_VERSION_1_1) { 468 if (uuid_words) 469 memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE); 470 else 471 memset(fpi->uuid, 0, FFA_UUID_SIZE); 472 } 473 474 return TEE_SUCCESS; 475 } 476 477 static int handle_partition_info_get_all(size_t *elem_count, 478 struct ffa_rxtx *rxtx, bool count_only) 479 { 480 if (!count_only) { 481 /* Add OP-TEE SP */ 482 if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx, 483 rxtx->size, 0, my_endpoint_id, 484 CFG_TEE_CORE_NB_CORE, 485 my_part_props, my_uuid_words)) 486 return FFA_NO_MEMORY; 487 } 488 *elem_count = 1; 489 490 if (IS_ENABLED(CFG_SECURE_PARTITION)) { 491 if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size, 492 NULL, elem_count, count_only)) 493 return FFA_NO_MEMORY; 494 } 495 496 return FFA_OK; 497 } 498 499 void spmc_handle_partition_info_get(struct thread_smc_args *args, 500 struct ffa_rxtx *rxtx) 501 { 502 TEE_Result res = TEE_SUCCESS; 503 uint32_t ret_fid = FFA_ERROR; 504 uint32_t rc = 0; 505 bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG; 506 507 if (!count_only) { 508 cpu_spin_lock(&rxtx->spinlock); 509 510 if (!rxtx->size || !rxtx->tx_is_mine) { 511 rc = FFA_BUSY; 512 goto out; 513 } 514 } 515 516 if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) { 517 size_t elem_count = 0; 518 519 ret_fid = handle_partition_info_get_all(&elem_count, rxtx, 520 count_only); 521 522 if (ret_fid) { 523 rc = ret_fid; 524 ret_fid = FFA_ERROR; 525 } else { 526 ret_fid = FFA_SUCCESS_32; 527 rc = elem_count; 528 } 529 530 goto out; 531 } 532 533 if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) { 534 if (!count_only) { 535 res = spmc_fill_partition_entry(rxtx->ffa_vers, 536 rxtx->tx, rxtx->size, 0, 537 my_endpoint_id, 538 CFG_TEE_CORE_NB_CORE, 539 my_part_props, 540 my_uuid_words); 541 if (res) { 542 ret_fid = FFA_ERROR; 543 rc = FFA_INVALID_PARAMETERS; 544 goto out; 545 } 546 } 547 rc = 1; 548 } else if (IS_ENABLED(CFG_SECURE_PARTITION)) { 549 uint32_t uuid_array[4] = { 0 }; 550 TEE_UUID uuid = { }; 551 size_t count = 0; 552 553 uuid_array[0] = args->a1; 554 uuid_array[1] = args->a2; 555 uuid_array[2] = args->a3; 556 uuid_array[3] = args->a4; 557 tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array); 558 559 res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, 560 rxtx->size, &uuid, &count, 561 count_only); 562 if (res != TEE_SUCCESS) { 563 ret_fid = FFA_ERROR; 564 rc = FFA_INVALID_PARAMETERS; 565 goto out; 566 } 567 rc = count; 568 } else { 569 ret_fid = FFA_ERROR; 570 rc = FFA_INVALID_PARAMETERS; 571 goto out; 572 } 573 574 ret_fid = FFA_SUCCESS_32; 575 576 out: 577 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 578 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 579 if (!count_only) { 580 rxtx->tx_is_mine = false; 581 cpu_spin_unlock(&rxtx->spinlock); 582 } 583 } 584 585 static void spmc_handle_run(struct thread_smc_args *args) 586 { 587 uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1); 588 uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1); 589 uint32_t rc = FFA_OK; 590 591 if (endpoint != my_endpoint_id) { 592 /* 593 * The endpoint should be an SP, try to resume the SP from 594 * preempted into busy state. 595 */ 596 rc = spmc_sp_resume_from_preempted(endpoint); 597 if (rc) 598 goto out; 599 } 600 601 thread_resume_from_rpc(thread_id, 0, 0, 0, 0); 602 603 /* thread_resume_from_rpc return only of the thread_id is invalid */ 604 rc = FFA_INVALID_PARAMETERS; 605 606 out: 607 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 608 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 609 } 610 #endif /*CFG_CORE_SEL1_SPMC*/ 611 612 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value, 613 uint16_t vm_id) 614 { 615 uint32_t old_itr_status = 0; 616 617 if (!spmc_notif_is_ready) { 618 /* 619 * This should never happen, not if normal world respects the 620 * exchanged capabilities. 621 */ 622 EMSG("Asynchronous notifications are not ready"); 623 return TEE_ERROR_NOT_IMPLEMENTED; 624 } 625 626 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 627 do_bottom_half_value = bottom_half_value; 628 if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) 629 notif_vm_id = vm_id; 630 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 631 632 notif_deliver_atomic_event(NOTIF_EVENT_STARTED); 633 return TEE_SUCCESS; 634 } 635 636 static void handle_yielding_call(struct thread_smc_args *args, 637 uint32_t direct_resp_fid) 638 { 639 TEE_Result res = 0; 640 641 thread_check_canaries(); 642 643 #ifdef ARM64 644 /* Saving this for an eventual RPC */ 645 thread_get_core_local()->direct_resp_fid = direct_resp_fid; 646 #endif 647 648 if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) { 649 /* Note connection to struct thread_rpc_arg::ret */ 650 thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6, 651 0); 652 res = TEE_ERROR_BAD_PARAMETERS; 653 } else { 654 thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5, 655 args->a6, args->a7); 656 res = TEE_ERROR_BUSY; 657 } 658 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 659 0, res, 0, 0); 660 } 661 662 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5) 663 { 664 uint64_t cookie = reg_pair_to_64(a5, a4); 665 uint32_t res = 0; 666 667 res = mobj_ffa_unregister_by_cookie(cookie); 668 switch (res) { 669 case TEE_SUCCESS: 670 case TEE_ERROR_ITEM_NOT_FOUND: 671 return 0; 672 case TEE_ERROR_BUSY: 673 EMSG("res %#"PRIx32, res); 674 return FFA_BUSY; 675 default: 676 EMSG("res %#"PRIx32, res); 677 return FFA_INVALID_PARAMETERS; 678 } 679 } 680 681 static void handle_blocking_call(struct thread_smc_args *args, 682 uint32_t direct_resp_fid) 683 { 684 uint32_t sec_caps = 0; 685 686 switch (args->a3) { 687 case OPTEE_FFA_GET_API_VERSION: 688 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 689 OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR, 690 0); 691 break; 692 case OPTEE_FFA_GET_OS_VERSION: 693 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 694 CFG_OPTEE_REVISION_MAJOR, 695 CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1); 696 break; 697 case OPTEE_FFA_EXCHANGE_CAPABILITIES: 698 sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET; 699 if (spmc_notif_is_ready) 700 sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF; 701 spmc_set_args(args, direct_resp_fid, 702 swap_src_dst(args->a1), 0, 0, 703 THREAD_RPC_MAX_NUM_PARAMS, sec_caps); 704 break; 705 case OPTEE_FFA_UNREGISTER_SHM: 706 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 707 handle_unregister_shm(args->a4, args->a5), 0, 0); 708 break; 709 case OPTEE_FFA_ENABLE_ASYNC_NOTIF: 710 spmc_set_args(args, direct_resp_fid, 711 swap_src_dst(args->a1), 0, 712 spmc_enable_async_notif(args->a4, 713 FFA_SRC(args->a1)), 714 0, 0); 715 break; 716 default: 717 EMSG("Unhandled blocking service ID %#"PRIx32, 718 (uint32_t)args->a3); 719 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 720 TEE_ERROR_BAD_PARAMETERS, 0, 0); 721 } 722 } 723 724 static void handle_framework_direct_request(struct thread_smc_args *args, 725 struct ffa_rxtx *rxtx, 726 uint32_t direct_resp_fid) 727 { 728 uint32_t w0 = FFA_ERROR; 729 uint32_t w1 = FFA_PARAM_MBZ; 730 uint32_t w2 = FFA_NOT_SUPPORTED; 731 uint32_t w3 = FFA_PARAM_MBZ; 732 733 switch (args->a2 & FFA_MSG_TYPE_MASK) { 734 case FFA_MSG_SEND_VM_CREATED: 735 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 736 uint16_t guest_id = args->a5; 737 TEE_Result res = virt_guest_created(guest_id); 738 739 w0 = direct_resp_fid; 740 w1 = swap_src_dst(args->a1); 741 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED; 742 if (res == TEE_SUCCESS) 743 w3 = FFA_OK; 744 else if (res == TEE_ERROR_OUT_OF_MEMORY) 745 w3 = FFA_DENIED; 746 else 747 w3 = FFA_INVALID_PARAMETERS; 748 } 749 break; 750 case FFA_MSG_SEND_VM_DESTROYED: 751 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 752 uint16_t guest_id = args->a5; 753 TEE_Result res = virt_guest_destroyed(guest_id); 754 755 w0 = direct_resp_fid; 756 w1 = swap_src_dst(args->a1); 757 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED; 758 if (res == TEE_SUCCESS) 759 w3 = FFA_OK; 760 else 761 w3 = FFA_INVALID_PARAMETERS; 762 } 763 break; 764 case FFA_MSG_VERSION_REQ: 765 w0 = direct_resp_fid; 766 w1 = swap_src_dst(args->a1); 767 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP; 768 w3 = spmc_exchange_version(args->a3, rxtx); 769 break; 770 default: 771 break; 772 } 773 spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 774 } 775 776 static void handle_direct_request(struct thread_smc_args *args, 777 struct ffa_rxtx *rxtx) 778 { 779 uint32_t direct_resp_fid = 0; 780 781 if (IS_ENABLED(CFG_SECURE_PARTITION) && 782 FFA_DST(args->a1) != my_endpoint_id) { 783 spmc_sp_start_thread(args); 784 return; 785 } 786 787 if (OPTEE_SMC_IS_64(args->a0)) 788 direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64; 789 else 790 direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32; 791 792 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) { 793 handle_framework_direct_request(args, rxtx, direct_resp_fid); 794 return; 795 } 796 797 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 798 virt_set_guest(get_sender_id(args->a1))) { 799 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 800 TEE_ERROR_ITEM_NOT_FOUND, 0, 0); 801 return; 802 } 803 804 if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT)) 805 handle_yielding_call(args, direct_resp_fid); 806 else 807 handle_blocking_call(args, direct_resp_fid); 808 809 /* 810 * Note that handle_yielding_call() typically only returns if a 811 * thread cannot be allocated or found. virt_unset_guest() is also 812 * called from thread_state_suspend() and thread_state_free(). 813 */ 814 virt_unset_guest(); 815 } 816 817 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen, 818 struct ffa_mem_transaction_x *trans) 819 { 820 uint16_t mem_reg_attr = 0; 821 uint32_t flags = 0; 822 uint32_t count = 0; 823 uint32_t offs = 0; 824 uint32_t size = 0; 825 size_t n = 0; 826 827 if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t)) 828 return FFA_INVALID_PARAMETERS; 829 830 if (ffa_vers >= FFA_VERSION_1_1) { 831 struct ffa_mem_transaction_1_1 *descr = NULL; 832 833 if (blen < sizeof(*descr)) 834 return FFA_INVALID_PARAMETERS; 835 836 descr = buf; 837 trans->sender_id = READ_ONCE(descr->sender_id); 838 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 839 flags = READ_ONCE(descr->flags); 840 trans->global_handle = READ_ONCE(descr->global_handle); 841 trans->tag = READ_ONCE(descr->tag); 842 843 count = READ_ONCE(descr->mem_access_count); 844 size = READ_ONCE(descr->mem_access_size); 845 offs = READ_ONCE(descr->mem_access_offs); 846 } else { 847 struct ffa_mem_transaction_1_0 *descr = NULL; 848 849 if (blen < sizeof(*descr)) 850 return FFA_INVALID_PARAMETERS; 851 852 descr = buf; 853 trans->sender_id = READ_ONCE(descr->sender_id); 854 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 855 flags = READ_ONCE(descr->flags); 856 trans->global_handle = READ_ONCE(descr->global_handle); 857 trans->tag = READ_ONCE(descr->tag); 858 859 count = READ_ONCE(descr->mem_access_count); 860 size = sizeof(struct ffa_mem_access); 861 offs = offsetof(struct ffa_mem_transaction_1_0, 862 mem_access_array); 863 } 864 865 if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX || 866 size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX) 867 return FFA_INVALID_PARAMETERS; 868 869 /* Check that the endpoint memory access descriptor array fits */ 870 if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) || 871 n > blen) 872 return FFA_INVALID_PARAMETERS; 873 874 trans->mem_reg_attr = mem_reg_attr; 875 trans->flags = flags; 876 trans->mem_access_size = size; 877 trans->mem_access_count = count; 878 trans->mem_access_offs = offs; 879 return 0; 880 } 881 882 #if defined(CFG_CORE_SEL1_SPMC) 883 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size, 884 unsigned int mem_access_count, uint8_t *acc_perms, 885 unsigned int *region_offs) 886 { 887 struct ffa_mem_access_perm *descr = NULL; 888 struct ffa_mem_access *mem_acc = NULL; 889 unsigned int n = 0; 890 891 for (n = 0; n < mem_access_count; n++) { 892 mem_acc = (void *)(mem_acc_base + mem_access_size * n); 893 descr = &mem_acc->access_perm; 894 if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) { 895 *acc_perms = READ_ONCE(descr->perm); 896 *region_offs = READ_ONCE(mem_acc[n].region_offs); 897 return 0; 898 } 899 } 900 901 return FFA_INVALID_PARAMETERS; 902 } 903 904 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf, 905 size_t blen, unsigned int *page_count, 906 unsigned int *region_count, size_t *addr_range_offs) 907 { 908 const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 909 const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW; 910 struct ffa_mem_region *region_descr = NULL; 911 unsigned int region_descr_offs = 0; 912 uint8_t mem_acc_perm = 0; 913 size_t n = 0; 914 915 if (mem_trans->mem_reg_attr != exp_mem_reg_attr) 916 return FFA_INVALID_PARAMETERS; 917 918 /* Check that the access permissions matches what's expected */ 919 if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs, 920 mem_trans->mem_access_size, 921 mem_trans->mem_access_count, 922 &mem_acc_perm, ®ion_descr_offs) || 923 mem_acc_perm != exp_mem_acc_perm) 924 return FFA_INVALID_PARAMETERS; 925 926 /* Check that the Composite memory region descriptor fits */ 927 if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) || 928 n > blen) 929 return FFA_INVALID_PARAMETERS; 930 931 if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs, 932 struct ffa_mem_region)) 933 return FFA_INVALID_PARAMETERS; 934 935 region_descr = (struct ffa_mem_region *)((vaddr_t)buf + 936 region_descr_offs); 937 *page_count = READ_ONCE(region_descr->total_page_count); 938 *region_count = READ_ONCE(region_descr->address_range_count); 939 *addr_range_offs = n; 940 return 0; 941 } 942 943 static int add_mem_share_helper(struct mem_share_state *s, void *buf, 944 size_t flen) 945 { 946 unsigned int region_count = flen / sizeof(struct ffa_address_range); 947 struct ffa_address_range *arange = NULL; 948 unsigned int n = 0; 949 950 if (region_count > s->region_count) 951 region_count = s->region_count; 952 953 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range)) 954 return FFA_INVALID_PARAMETERS; 955 arange = buf; 956 957 for (n = 0; n < region_count; n++) { 958 unsigned int page_count = READ_ONCE(arange[n].page_count); 959 uint64_t addr = READ_ONCE(arange[n].address); 960 961 if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx, 962 addr, page_count)) 963 return FFA_INVALID_PARAMETERS; 964 } 965 966 s->region_count -= region_count; 967 if (s->region_count) 968 return region_count * sizeof(*arange); 969 970 if (s->current_page_idx != s->page_count) 971 return FFA_INVALID_PARAMETERS; 972 973 return 0; 974 } 975 976 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen) 977 { 978 int rc = 0; 979 980 rc = add_mem_share_helper(&s->share, buf, flen); 981 if (rc >= 0) { 982 if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) { 983 /* We're not at the end of the descriptor yet */ 984 if (s->share.region_count) 985 return s->frag_offset; 986 987 /* We're done */ 988 rc = 0; 989 } else { 990 rc = FFA_INVALID_PARAMETERS; 991 } 992 } 993 994 SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link); 995 if (rc < 0) 996 mobj_ffa_sel1_spmc_delete(s->share.mf); 997 else 998 mobj_ffa_push_to_inactive(s->share.mf); 999 free(s); 1000 1001 return rc; 1002 } 1003 1004 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans, 1005 void *buf) 1006 { 1007 struct ffa_mem_access_perm *perm = NULL; 1008 struct ffa_mem_access *mem_acc = NULL; 1009 1010 if (!IS_ENABLED(CFG_SECURE_PARTITION)) 1011 return false; 1012 1013 if (mem_trans->mem_access_count < 1) 1014 return false; 1015 1016 mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs); 1017 perm = &mem_acc->access_perm; 1018 1019 /* 1020 * perm->endpoint_id is read here only to check if the endpoint is 1021 * OP-TEE. We do read it later on again, but there are some additional 1022 * checks there to make sure that the data is correct. 1023 */ 1024 return READ_ONCE(perm->endpoint_id) != my_endpoint_id; 1025 } 1026 1027 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans, 1028 tee_mm_entry_t *mm, void *buf, size_t blen, 1029 size_t flen, uint64_t *global_handle) 1030 { 1031 int rc = 0; 1032 struct mem_share_state share = { }; 1033 size_t addr_range_offs = 0; 1034 uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 1035 size_t n = 0; 1036 1037 rc = mem_share_init(mem_trans, buf, flen, &share.page_count, 1038 &share.region_count, &addr_range_offs); 1039 if (rc) 1040 return rc; 1041 1042 if (MUL_OVERFLOW(share.region_count, 1043 sizeof(struct ffa_address_range), &n) || 1044 ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen) 1045 return FFA_INVALID_PARAMETERS; 1046 1047 if (mem_trans->global_handle) 1048 cookie = mem_trans->global_handle; 1049 share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count); 1050 if (!share.mf) 1051 return FFA_NO_MEMORY; 1052 1053 if (flen != blen) { 1054 struct mem_frag_state *s = calloc(sizeof(*s), 1); 1055 1056 if (!s) { 1057 rc = FFA_NO_MEMORY; 1058 goto err; 1059 } 1060 s->share = share; 1061 s->mm = mm; 1062 s->frag_offset = addr_range_offs; 1063 1064 SLIST_INSERT_HEAD(&frag_state_head, s, link); 1065 rc = add_mem_share_frag(s, (char *)buf + addr_range_offs, 1066 flen - addr_range_offs); 1067 1068 if (rc >= 0) 1069 *global_handle = mobj_ffa_get_cookie(share.mf); 1070 1071 return rc; 1072 } 1073 1074 rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs, 1075 flen - addr_range_offs); 1076 if (rc) { 1077 /* 1078 * Number of consumed bytes may be returned instead of 0 for 1079 * done. 1080 */ 1081 rc = FFA_INVALID_PARAMETERS; 1082 goto err; 1083 } 1084 1085 *global_handle = mobj_ffa_push_to_inactive(share.mf); 1086 1087 return 0; 1088 err: 1089 mobj_ffa_sel1_spmc_delete(share.mf); 1090 return rc; 1091 } 1092 1093 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen, 1094 unsigned int page_count, 1095 uint64_t *global_handle, struct ffa_rxtx *rxtx) 1096 { 1097 struct ffa_mem_transaction_x mem_trans = { }; 1098 int rc = 0; 1099 size_t len = 0; 1100 void *buf = NULL; 1101 tee_mm_entry_t *mm = NULL; 1102 vaddr_t offs = pbuf & SMALL_PAGE_MASK; 1103 1104 if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len)) 1105 return FFA_INVALID_PARAMETERS; 1106 if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len)) 1107 return FFA_INVALID_PARAMETERS; 1108 1109 /* 1110 * Check that the length reported in flen is covered by len even 1111 * if the offset is taken into account. 1112 */ 1113 if (len < flen || len - offs < flen) 1114 return FFA_INVALID_PARAMETERS; 1115 1116 mm = tee_mm_alloc(&tee_mm_shm, len); 1117 if (!mm) 1118 return FFA_NO_MEMORY; 1119 1120 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf, 1121 page_count, MEM_AREA_NSEC_SHM)) { 1122 rc = FFA_INVALID_PARAMETERS; 1123 goto out; 1124 } 1125 buf = (void *)(tee_mm_get_smem(mm) + offs); 1126 1127 cpu_spin_lock(&rxtx->spinlock); 1128 rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans); 1129 if (!rc && IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1130 virt_set_guest(mem_trans.sender_id)) 1131 rc = FFA_DENIED; 1132 if (!rc) 1133 rc = add_mem_share(&mem_trans, mm, buf, blen, flen, 1134 global_handle); 1135 virt_unset_guest(); 1136 cpu_spin_unlock(&rxtx->spinlock); 1137 if (rc > 0) 1138 return rc; 1139 1140 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1141 out: 1142 tee_mm_free(mm); 1143 return rc; 1144 } 1145 1146 static int handle_mem_share_rxbuf(size_t blen, size_t flen, 1147 uint64_t *global_handle, 1148 struct ffa_rxtx *rxtx) 1149 { 1150 struct ffa_mem_transaction_x mem_trans = { }; 1151 int rc = FFA_DENIED; 1152 1153 cpu_spin_lock(&rxtx->spinlock); 1154 1155 if (!rxtx->rx || flen > rxtx->size) 1156 goto out; 1157 1158 rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen, 1159 &mem_trans); 1160 if (rc) 1161 goto out; 1162 if (is_sp_share(&mem_trans, rxtx->rx)) { 1163 rc = spmc_sp_add_share(&mem_trans, rxtx, blen, 1164 global_handle, NULL); 1165 goto out; 1166 } 1167 1168 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1169 virt_set_guest(mem_trans.sender_id)) 1170 goto out; 1171 1172 rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen, 1173 global_handle); 1174 1175 virt_unset_guest(); 1176 1177 out: 1178 cpu_spin_unlock(&rxtx->spinlock); 1179 1180 return rc; 1181 } 1182 1183 static void handle_mem_share(struct thread_smc_args *args, 1184 struct ffa_rxtx *rxtx) 1185 { 1186 uint32_t tot_len = args->a1; 1187 uint32_t frag_len = args->a2; 1188 uint64_t addr = args->a3; 1189 uint32_t page_count = args->a4; 1190 uint32_t ret_w1 = 0; 1191 uint32_t ret_w2 = FFA_INVALID_PARAMETERS; 1192 uint32_t ret_w3 = 0; 1193 uint32_t ret_fid = FFA_ERROR; 1194 uint64_t global_handle = 0; 1195 int rc = 0; 1196 1197 /* Check that the MBZs are indeed 0 */ 1198 if (args->a5 || args->a6 || args->a7) 1199 goto out; 1200 1201 /* Check that fragment length doesn't exceed total length */ 1202 if (frag_len > tot_len) 1203 goto out; 1204 1205 /* Check for 32-bit calling convention */ 1206 if (args->a0 == FFA_MEM_SHARE_32) 1207 addr &= UINT32_MAX; 1208 1209 if (!addr) { 1210 /* 1211 * The memory transaction descriptor is passed via our rx 1212 * buffer. 1213 */ 1214 if (page_count) 1215 goto out; 1216 rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle, 1217 rxtx); 1218 } else { 1219 rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count, 1220 &global_handle, rxtx); 1221 } 1222 if (rc < 0) { 1223 ret_w2 = rc; 1224 } else if (rc > 0) { 1225 ret_fid = FFA_MEM_FRAG_RX; 1226 ret_w3 = rc; 1227 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1228 } else { 1229 ret_fid = FFA_SUCCESS_32; 1230 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1231 } 1232 out: 1233 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1234 } 1235 1236 static struct mem_frag_state *get_frag_state(uint64_t global_handle) 1237 { 1238 struct mem_frag_state *s = NULL; 1239 1240 SLIST_FOREACH(s, &frag_state_head, link) 1241 if (mobj_ffa_get_cookie(s->share.mf) == global_handle) 1242 return s; 1243 1244 return NULL; 1245 } 1246 1247 static void handle_mem_frag_tx(struct thread_smc_args *args, 1248 struct ffa_rxtx *rxtx) 1249 { 1250 uint64_t global_handle = reg_pair_to_64(args->a2, args->a1); 1251 size_t flen = args->a3; 1252 uint32_t endpoint_id = args->a4; 1253 struct mem_frag_state *s = NULL; 1254 tee_mm_entry_t *mm = NULL; 1255 unsigned int page_count = 0; 1256 void *buf = NULL; 1257 uint32_t ret_w1 = 0; 1258 uint32_t ret_w2 = 0; 1259 uint32_t ret_w3 = 0; 1260 uint32_t ret_fid = 0; 1261 int rc = 0; 1262 1263 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1264 uint16_t guest_id = endpoint_id >> 16; 1265 1266 if (!guest_id || virt_set_guest(guest_id)) { 1267 rc = FFA_INVALID_PARAMETERS; 1268 goto out_set_rc; 1269 } 1270 } 1271 1272 /* 1273 * Currently we're only doing this for fragmented FFA_MEM_SHARE_* 1274 * requests. 1275 */ 1276 1277 cpu_spin_lock(&rxtx->spinlock); 1278 1279 s = get_frag_state(global_handle); 1280 if (!s) { 1281 rc = FFA_INVALID_PARAMETERS; 1282 goto out; 1283 } 1284 1285 mm = s->mm; 1286 if (mm) { 1287 if (flen > tee_mm_get_bytes(mm)) { 1288 rc = FFA_INVALID_PARAMETERS; 1289 goto out; 1290 } 1291 page_count = s->share.page_count; 1292 buf = (void *)tee_mm_get_smem(mm); 1293 } else { 1294 if (flen > rxtx->size) { 1295 rc = FFA_INVALID_PARAMETERS; 1296 goto out; 1297 } 1298 buf = rxtx->rx; 1299 } 1300 1301 rc = add_mem_share_frag(s, buf, flen); 1302 out: 1303 virt_unset_guest(); 1304 cpu_spin_unlock(&rxtx->spinlock); 1305 1306 if (rc <= 0 && mm) { 1307 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1308 tee_mm_free(mm); 1309 } 1310 1311 out_set_rc: 1312 if (rc < 0) { 1313 ret_fid = FFA_ERROR; 1314 ret_w2 = rc; 1315 } else if (rc > 0) { 1316 ret_fid = FFA_MEM_FRAG_RX; 1317 ret_w3 = rc; 1318 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1319 } else { 1320 ret_fid = FFA_SUCCESS_32; 1321 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1322 } 1323 1324 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1325 } 1326 1327 static void handle_mem_reclaim(struct thread_smc_args *args) 1328 { 1329 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1330 uint32_t ret_fid = FFA_ERROR; 1331 uint64_t cookie = 0; 1332 1333 if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7) 1334 goto out; 1335 1336 cookie = reg_pair_to_64(args->a2, args->a1); 1337 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1338 uint16_t guest_id = 0; 1339 1340 if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) { 1341 guest_id = virt_find_guest_by_cookie(cookie); 1342 } else { 1343 guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) & 1344 FFA_MEMORY_HANDLE_PRTN_MASK; 1345 } 1346 if (!guest_id || virt_set_guest(guest_id)) 1347 goto out; 1348 } 1349 1350 switch (mobj_ffa_sel1_spmc_reclaim(cookie)) { 1351 case TEE_SUCCESS: 1352 ret_fid = FFA_SUCCESS_32; 1353 ret_val = 0; 1354 break; 1355 case TEE_ERROR_ITEM_NOT_FOUND: 1356 DMSG("cookie %#"PRIx64" not found", cookie); 1357 ret_val = FFA_INVALID_PARAMETERS; 1358 break; 1359 default: 1360 DMSG("cookie %#"PRIx64" busy", cookie); 1361 ret_val = FFA_DENIED; 1362 break; 1363 } 1364 1365 virt_unset_guest(); 1366 1367 out: 1368 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1369 } 1370 1371 static void handle_notification_bitmap_create(struct thread_smc_args *args) 1372 { 1373 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1374 uint32_t ret_fid = FFA_ERROR; 1375 uint32_t old_itr_status = 0; 1376 1377 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 && 1378 !args->a5 && !args->a6 && !args->a7) { 1379 uint16_t vm_id = args->a1; 1380 1381 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1382 1383 if (notif_vm_id_valid) { 1384 if (vm_id == notif_vm_id) 1385 ret_val = FFA_DENIED; 1386 else 1387 ret_val = FFA_NO_MEMORY; 1388 } else { 1389 notif_vm_id = vm_id; 1390 notif_vm_id_valid = true; 1391 ret_val = FFA_OK; 1392 ret_fid = FFA_SUCCESS_32; 1393 } 1394 1395 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1396 } 1397 1398 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1399 } 1400 1401 static void handle_notification_bitmap_destroy(struct thread_smc_args *args) 1402 { 1403 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1404 uint32_t ret_fid = FFA_ERROR; 1405 uint32_t old_itr_status = 0; 1406 1407 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 && 1408 !args->a5 && !args->a6 && !args->a7) { 1409 uint16_t vm_id = args->a1; 1410 1411 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1412 1413 if (notif_vm_id_valid && vm_id == notif_vm_id) { 1414 if (notif_pending_bitmap || notif_bound_bitmap) { 1415 ret_val = FFA_DENIED; 1416 } else { 1417 notif_vm_id_valid = false; 1418 ret_val = FFA_OK; 1419 ret_fid = FFA_SUCCESS_32; 1420 } 1421 } 1422 1423 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1424 } 1425 1426 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1427 } 1428 1429 static void handle_notification_bind(struct thread_smc_args *args) 1430 { 1431 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1432 uint32_t ret_fid = FFA_ERROR; 1433 uint32_t old_itr_status = 0; 1434 uint64_t bitmap = 0; 1435 uint16_t vm_id = 0; 1436 1437 if (args->a5 || args->a6 || args->a7) 1438 goto out; 1439 if (args->a2) { 1440 /* We only deal with global notifications for now */ 1441 ret_val = FFA_NOT_SUPPORTED; 1442 goto out; 1443 } 1444 1445 /* The destination of the eventual notification */ 1446 vm_id = FFA_DST(args->a1); 1447 bitmap = reg_pair_to_64(args->a4, args->a3); 1448 1449 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1450 1451 if (notif_vm_id_valid && vm_id == notif_vm_id) { 1452 if (bitmap & notif_bound_bitmap) { 1453 ret_val = FFA_DENIED; 1454 } else { 1455 notif_bound_bitmap |= bitmap; 1456 ret_val = FFA_OK; 1457 ret_fid = FFA_SUCCESS_32; 1458 } 1459 } 1460 1461 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1462 out: 1463 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1464 } 1465 1466 static void handle_notification_unbind(struct thread_smc_args *args) 1467 { 1468 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1469 uint32_t ret_fid = FFA_ERROR; 1470 uint32_t old_itr_status = 0; 1471 uint64_t bitmap = 0; 1472 uint16_t vm_id = 0; 1473 1474 if (args->a2 || args->a5 || args->a6 || args->a7) 1475 goto out; 1476 1477 /* The destination of the eventual notification */ 1478 vm_id = FFA_DST(args->a1); 1479 bitmap = reg_pair_to_64(args->a4, args->a3); 1480 1481 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1482 1483 if (notif_vm_id_valid && vm_id == notif_vm_id) { 1484 /* 1485 * Spec says: 1486 * At least one notification is bound to another Sender or 1487 * is currently pending. 1488 * 1489 * Not sure what the intention is. 1490 */ 1491 if (bitmap & notif_pending_bitmap) { 1492 ret_val = FFA_DENIED; 1493 } else { 1494 notif_bound_bitmap &= ~bitmap; 1495 ret_val = FFA_OK; 1496 ret_fid = FFA_SUCCESS_32; 1497 } 1498 } 1499 1500 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1501 out: 1502 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0); 1503 } 1504 1505 static void handle_notification_get(struct thread_smc_args *args) 1506 { 1507 uint32_t w2 = FFA_INVALID_PARAMETERS; 1508 uint32_t ret_fid = FFA_ERROR; 1509 uint32_t old_itr_status = 0; 1510 uint16_t vm_id = 0; 1511 uint32_t w3 = 0; 1512 1513 if (args->a5 || args->a6 || args->a7) 1514 goto out; 1515 if (!(args->a2 & 0x1)) { 1516 ret_fid = FFA_SUCCESS_32; 1517 w2 = 0; 1518 goto out; 1519 } 1520 vm_id = FFA_DST(args->a1); 1521 1522 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1523 1524 if (notif_vm_id_valid && vm_id == notif_vm_id) { 1525 reg_pair_from_64(notif_pending_bitmap, &w3, &w2); 1526 notif_pending_bitmap = 0; 1527 ret_fid = FFA_SUCCESS_32; 1528 } 1529 1530 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1531 out: 1532 spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0); 1533 } 1534 1535 static void handle_notification_info_get(struct thread_smc_args *args) 1536 { 1537 uint32_t w2 = FFA_INVALID_PARAMETERS; 1538 uint32_t ret_fid = FFA_ERROR; 1539 1540 if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 || 1541 args->a6 || args->a7) 1542 goto out; 1543 1544 if (OPTEE_SMC_IS_64(args->a0)) 1545 ret_fid = FFA_SUCCESS_64; 1546 else 1547 ret_fid = FFA_SUCCESS_32; 1548 1549 /* 1550 * Note, we're only supporting physical OS kernel in normal world 1551 * with Global Notifications. 1552 * So one list of ID list registers (BIT[11:7]) 1553 * and one count of IDs (BIT[13:12] + 1) 1554 * and the VM is always 0. 1555 */ 1556 w2 = SHIFT_U32(1, 7); 1557 out: 1558 spmc_set_args(args, ret_fid, 0, w2, 0, 0, 0); 1559 } 1560 1561 void thread_spmc_set_async_notif_intid(int intid) 1562 { 1563 assert(interrupt_can_raise_sgi(interrupt_get_main_chip())); 1564 notif_intid = intid; 1565 spmc_notif_is_ready = true; 1566 DMSG("Asynchronous notifications are ready"); 1567 } 1568 1569 void notif_send_async(uint32_t value) 1570 { 1571 uint32_t old_itr_status = 0; 1572 1573 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock); 1574 assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && spmc_notif_is_ready && 1575 do_bottom_half_value >= 0 && notif_intid >= 0); 1576 notif_pending_bitmap |= BIT64(do_bottom_half_value); 1577 interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid, 1578 ITR_CPU_MASK_TO_THIS_CPU); 1579 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status); 1580 } 1581 #else 1582 void __noreturn notif_send_async(uint32_t value __unused) 1583 { 1584 panic(); 1585 } 1586 #endif 1587 1588 /* Only called from assembly */ 1589 void thread_spmc_msg_recv(struct thread_smc_args *args); 1590 void thread_spmc_msg_recv(struct thread_smc_args *args) 1591 { 1592 assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL); 1593 switch (args->a0) { 1594 #if defined(CFG_CORE_SEL1_SPMC) 1595 case FFA_FEATURES: 1596 handle_features(args); 1597 break; 1598 case FFA_SPM_ID_GET: 1599 handle_spm_id_get(args); 1600 break; 1601 #ifdef ARM64 1602 case FFA_RXTX_MAP_64: 1603 #endif 1604 case FFA_RXTX_MAP_32: 1605 spmc_handle_rxtx_map(args, &my_rxtx); 1606 break; 1607 case FFA_RXTX_UNMAP: 1608 spmc_handle_rxtx_unmap(args, &my_rxtx); 1609 break; 1610 case FFA_RX_RELEASE: 1611 spmc_handle_rx_release(args, &my_rxtx); 1612 break; 1613 case FFA_PARTITION_INFO_GET: 1614 spmc_handle_partition_info_get(args, &my_rxtx); 1615 break; 1616 case FFA_RUN: 1617 spmc_handle_run(args); 1618 break; 1619 #endif /*CFG_CORE_SEL1_SPMC*/ 1620 case FFA_INTERRUPT: 1621 interrupt_main_handler(); 1622 if (IS_ENABLED(CFG_CORE_SEL1_SPMC)) 1623 spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0, 1624 0, 0); 1625 else 1626 spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0); 1627 break; 1628 #ifdef ARM64 1629 case FFA_MSG_SEND_DIRECT_REQ_64: 1630 #endif 1631 case FFA_MSG_SEND_DIRECT_REQ_32: 1632 handle_direct_request(args, &my_rxtx); 1633 break; 1634 #if defined(CFG_CORE_SEL1_SPMC) 1635 #ifdef ARM64 1636 case FFA_MEM_SHARE_64: 1637 #endif 1638 case FFA_MEM_SHARE_32: 1639 handle_mem_share(args, &my_rxtx); 1640 break; 1641 case FFA_MEM_RECLAIM: 1642 if (!IS_ENABLED(CFG_SECURE_PARTITION) || 1643 !ffa_mem_reclaim(args, NULL)) 1644 handle_mem_reclaim(args); 1645 break; 1646 case FFA_MEM_FRAG_TX: 1647 handle_mem_frag_tx(args, &my_rxtx); 1648 break; 1649 case FFA_NOTIFICATION_BITMAP_CREATE: 1650 handle_notification_bitmap_create(args); 1651 break; 1652 case FFA_NOTIFICATION_BITMAP_DESTROY: 1653 handle_notification_bitmap_destroy(args); 1654 break; 1655 case FFA_NOTIFICATION_BIND: 1656 handle_notification_bind(args); 1657 break; 1658 case FFA_NOTIFICATION_UNBIND: 1659 handle_notification_unbind(args); 1660 break; 1661 case FFA_NOTIFICATION_GET: 1662 handle_notification_get(args); 1663 break; 1664 #ifdef ARM64 1665 case FFA_NOTIFICATION_INFO_GET_64: 1666 #endif 1667 case FFA_NOTIFICATION_INFO_GET_32: 1668 handle_notification_info_get(args); 1669 break; 1670 #endif /*CFG_CORE_SEL1_SPMC*/ 1671 case FFA_ERROR: 1672 EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2); 1673 if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) { 1674 /* 1675 * The SPMC will return an FFA_ERROR back so better 1676 * panic() now than flooding the log. 1677 */ 1678 panic("FFA_ERROR from SPMC is fatal"); 1679 } 1680 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED, 1681 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 1682 break; 1683 default: 1684 EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0); 1685 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED, 1686 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 1687 } 1688 } 1689 1690 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset) 1691 { 1692 size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1693 struct thread_ctx *thr = threads + thread_get_id(); 1694 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1695 struct optee_msg_arg *arg = NULL; 1696 struct mobj *mobj = NULL; 1697 uint32_t num_params = 0; 1698 size_t sz = 0; 1699 1700 mobj = mobj_ffa_get_by_cookie(cookie, 0); 1701 if (!mobj) { 1702 EMSG("Can't find cookie %#"PRIx64, cookie); 1703 return TEE_ERROR_BAD_PARAMETERS; 1704 } 1705 1706 res = mobj_inc_map(mobj); 1707 if (res) 1708 goto out_put_mobj; 1709 1710 res = TEE_ERROR_BAD_PARAMETERS; 1711 arg = mobj_get_va(mobj, offset, sizeof(*arg)); 1712 if (!arg) 1713 goto out_dec_map; 1714 1715 num_params = READ_ONCE(arg->num_params); 1716 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS) 1717 goto out_dec_map; 1718 1719 sz = OPTEE_MSG_GET_ARG_SIZE(num_params); 1720 1721 thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc); 1722 if (!thr->rpc_arg) 1723 goto out_dec_map; 1724 1725 virt_on_stdcall(); 1726 res = tee_entry_std(arg, num_params); 1727 1728 thread_rpc_shm_cache_clear(&thr->shm_cache); 1729 thr->rpc_arg = NULL; 1730 1731 out_dec_map: 1732 mobj_dec_map(mobj); 1733 out_put_mobj: 1734 mobj_put(mobj); 1735 return res; 1736 } 1737 1738 /* 1739 * Helper routine for the assembly function thread_std_smc_entry() 1740 * 1741 * Note: this function is weak just to make link_dummies_paged.c happy. 1742 */ 1743 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1, 1744 uint32_t a2, uint32_t a3, 1745 uint32_t a4, uint32_t a5 __unused) 1746 { 1747 /* 1748 * Arguments are supplied from handle_yielding_call() as: 1749 * a0 <- w1 1750 * a1 <- w3 1751 * a2 <- w4 1752 * a3 <- w5 1753 * a4 <- w6 1754 * a5 <- w7 1755 */ 1756 thread_get_tsd()->rpc_target_info = swap_src_dst(a0); 1757 if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG) 1758 return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4); 1759 return FFA_DENIED; 1760 } 1761 1762 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm) 1763 { 1764 uint64_t offs = tpm->u.memref.offs; 1765 1766 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN + 1767 OPTEE_MSG_ATTR_TYPE_FMEM_INPUT; 1768 1769 param->u.fmem.offs_low = offs; 1770 param->u.fmem.offs_high = offs >> 32; 1771 if (param->u.fmem.offs_high != offs >> 32) 1772 return false; 1773 1774 param->u.fmem.size = tpm->u.memref.size; 1775 if (tpm->u.memref.mobj) { 1776 uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj); 1777 1778 /* If a mobj is passed it better be one with a valid cookie. */ 1779 if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) 1780 return false; 1781 param->u.fmem.global_id = cookie; 1782 } else { 1783 param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 1784 } 1785 1786 return true; 1787 } 1788 1789 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params, 1790 struct thread_param *params, 1791 struct optee_msg_arg **arg_ret) 1792 { 1793 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1794 struct thread_ctx *thr = threads + thread_get_id(); 1795 struct optee_msg_arg *arg = thr->rpc_arg; 1796 1797 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 1798 return TEE_ERROR_BAD_PARAMETERS; 1799 1800 if (!arg) { 1801 EMSG("rpc_arg not set"); 1802 return TEE_ERROR_GENERIC; 1803 } 1804 1805 memset(arg, 0, sz); 1806 arg->cmd = cmd; 1807 arg->num_params = num_params; 1808 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1809 1810 for (size_t n = 0; n < num_params; n++) { 1811 switch (params[n].attr) { 1812 case THREAD_PARAM_ATTR_NONE: 1813 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE; 1814 break; 1815 case THREAD_PARAM_ATTR_VALUE_IN: 1816 case THREAD_PARAM_ATTR_VALUE_OUT: 1817 case THREAD_PARAM_ATTR_VALUE_INOUT: 1818 arg->params[n].attr = params[n].attr - 1819 THREAD_PARAM_ATTR_VALUE_IN + 1820 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1821 arg->params[n].u.value.a = params[n].u.value.a; 1822 arg->params[n].u.value.b = params[n].u.value.b; 1823 arg->params[n].u.value.c = params[n].u.value.c; 1824 break; 1825 case THREAD_PARAM_ATTR_MEMREF_IN: 1826 case THREAD_PARAM_ATTR_MEMREF_OUT: 1827 case THREAD_PARAM_ATTR_MEMREF_INOUT: 1828 if (!set_fmem(arg->params + n, params + n)) 1829 return TEE_ERROR_BAD_PARAMETERS; 1830 break; 1831 default: 1832 return TEE_ERROR_BAD_PARAMETERS; 1833 } 1834 } 1835 1836 if (arg_ret) 1837 *arg_ret = arg; 1838 1839 return TEE_SUCCESS; 1840 } 1841 1842 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params, 1843 struct thread_param *params) 1844 { 1845 for (size_t n = 0; n < num_params; n++) { 1846 switch (params[n].attr) { 1847 case THREAD_PARAM_ATTR_VALUE_OUT: 1848 case THREAD_PARAM_ATTR_VALUE_INOUT: 1849 params[n].u.value.a = arg->params[n].u.value.a; 1850 params[n].u.value.b = arg->params[n].u.value.b; 1851 params[n].u.value.c = arg->params[n].u.value.c; 1852 break; 1853 case THREAD_PARAM_ATTR_MEMREF_OUT: 1854 case THREAD_PARAM_ATTR_MEMREF_INOUT: 1855 params[n].u.memref.size = arg->params[n].u.fmem.size; 1856 break; 1857 default: 1858 break; 1859 } 1860 } 1861 1862 return arg->ret; 1863 } 1864 1865 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1866 struct thread_param *params) 1867 { 1868 struct thread_rpc_arg rpc_arg = { .call = { 1869 .w1 = thread_get_tsd()->rpc_target_info, 1870 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1871 }, 1872 }; 1873 struct optee_msg_arg *arg = NULL; 1874 uint32_t ret = 0; 1875 1876 ret = get_rpc_arg(cmd, num_params, params, &arg); 1877 if (ret) 1878 return ret; 1879 1880 thread_rpc(&rpc_arg); 1881 1882 return get_rpc_arg_res(arg, num_params, params); 1883 } 1884 1885 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 1886 { 1887 struct thread_rpc_arg rpc_arg = { .call = { 1888 .w1 = thread_get_tsd()->rpc_target_info, 1889 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1890 }, 1891 }; 1892 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0); 1893 uint32_t res2 = 0; 1894 uint32_t res = 0; 1895 1896 DMSG("freeing cookie %#"PRIx64, cookie); 1897 1898 res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m, NULL); 1899 1900 mobj_put(mobj); 1901 res2 = mobj_ffa_unregister_by_cookie(cookie); 1902 if (res2) 1903 DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32, 1904 cookie, res2); 1905 if (!res) 1906 thread_rpc(&rpc_arg); 1907 } 1908 1909 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt) 1910 { 1911 struct thread_rpc_arg rpc_arg = { .call = { 1912 .w1 = thread_get_tsd()->rpc_target_info, 1913 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1914 }, 1915 }; 1916 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align); 1917 struct optee_msg_arg *arg = NULL; 1918 unsigned int internal_offset = 0; 1919 struct mobj *mobj = NULL; 1920 uint64_t cookie = 0; 1921 1922 if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m, &arg)) 1923 return NULL; 1924 1925 thread_rpc(&rpc_arg); 1926 1927 if (arg->num_params != 1 || 1928 arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT) 1929 return NULL; 1930 1931 internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs); 1932 cookie = READ_ONCE(arg->params->u.fmem.global_id); 1933 mobj = mobj_ffa_get_by_cookie(cookie, internal_offset); 1934 if (!mobj) { 1935 DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed", 1936 cookie, internal_offset); 1937 return NULL; 1938 } 1939 1940 assert(mobj_is_nonsec(mobj)); 1941 1942 if (mobj->size < size) { 1943 DMSG("Mobj %#"PRIx64": wrong size", cookie); 1944 mobj_put(mobj); 1945 return NULL; 1946 } 1947 1948 if (mobj_inc_map(mobj)) { 1949 DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie); 1950 mobj_put(mobj); 1951 return NULL; 1952 } 1953 1954 return mobj; 1955 } 1956 1957 struct mobj *thread_rpc_alloc_payload(size_t size) 1958 { 1959 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL); 1960 } 1961 1962 struct mobj *thread_rpc_alloc_kernel_payload(size_t size) 1963 { 1964 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL); 1965 } 1966 1967 void thread_rpc_free_kernel_payload(struct mobj *mobj) 1968 { 1969 if (mobj) 1970 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, 1971 mobj_get_cookie(mobj), mobj); 1972 } 1973 1974 void thread_rpc_free_payload(struct mobj *mobj) 1975 { 1976 if (mobj) 1977 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj), 1978 mobj); 1979 } 1980 1981 struct mobj *thread_rpc_alloc_global_payload(size_t size) 1982 { 1983 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL); 1984 } 1985 1986 void thread_rpc_free_global_payload(struct mobj *mobj) 1987 { 1988 if (mobj) 1989 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, 1990 mobj_get_cookie(mobj), mobj); 1991 } 1992 1993 void thread_spmc_register_secondary_ep(vaddr_t ep) 1994 { 1995 unsigned long ret = 0; 1996 1997 /* Let the SPM know the entry point for secondary CPUs */ 1998 ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0); 1999 2000 if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64) 2001 EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret); 2002 } 2003 2004 #if defined(CFG_CORE_SEL1_SPMC) 2005 static TEE_Result spmc_init(void) 2006 { 2007 my_endpoint_id = SPMC_ENDPOINT_ID; 2008 DMSG("My endpoint ID %#x", my_endpoint_id); 2009 2010 /* 2011 * If SPMD think we are version 1.0 it will report version 1.0 to 2012 * normal world regardless of what version we query the SPM with. 2013 * However, if SPMD think we are version 1.1 it will forward 2014 * queries from normal world to let us negotiate version. So by 2015 * setting version 1.0 here we should be compatible. 2016 * 2017 * Note that disagreement on negotiated version means that we'll 2018 * have communication problems with normal world. 2019 */ 2020 my_rxtx.ffa_vers = FFA_VERSION_1_0; 2021 2022 return TEE_SUCCESS; 2023 } 2024 #else /* !defined(CFG_CORE_SEL1_SPMC) */ 2025 static bool is_ffa_success(uint32_t fid) 2026 { 2027 #ifdef ARM64 2028 if (fid == FFA_SUCCESS_64) 2029 return true; 2030 #endif 2031 return fid == FFA_SUCCESS_32; 2032 } 2033 2034 static void spmc_rxtx_map(struct ffa_rxtx *rxtx) 2035 { 2036 struct thread_smc_args args = { 2037 #ifdef ARM64 2038 .a0 = FFA_RXTX_MAP_64, 2039 #else 2040 .a0 = FFA_RXTX_MAP_32, 2041 #endif 2042 .a1 = virt_to_phys(rxtx->tx), 2043 .a2 = virt_to_phys(rxtx->rx), 2044 .a3 = 1, 2045 }; 2046 2047 thread_smccc(&args); 2048 if (!is_ffa_success(args.a0)) { 2049 if (args.a0 == FFA_ERROR) 2050 EMSG("rxtx map failed with error %ld", args.a2); 2051 else 2052 EMSG("rxtx map failed"); 2053 panic(); 2054 } 2055 } 2056 2057 static uint16_t get_my_id(void) 2058 { 2059 struct thread_smc_args args = { 2060 .a0 = FFA_ID_GET, 2061 }; 2062 2063 thread_smccc(&args); 2064 if (!is_ffa_success(args.a0)) { 2065 if (args.a0 == FFA_ERROR) 2066 EMSG("Get id failed with error %ld", args.a2); 2067 else 2068 EMSG("Get id failed"); 2069 panic(); 2070 } 2071 2072 return args.a2; 2073 } 2074 2075 static uint32_t get_ffa_version(uint32_t my_version) 2076 { 2077 struct thread_smc_args args = { 2078 .a0 = FFA_VERSION, 2079 .a1 = my_version, 2080 }; 2081 2082 thread_smccc(&args); 2083 if (args.a0 & BIT(31)) { 2084 EMSG("FF-A version failed with error %ld", args.a0); 2085 panic(); 2086 } 2087 2088 return args.a0; 2089 } 2090 2091 static void *spmc_retrieve_req(uint64_t cookie, 2092 struct ffa_mem_transaction_x *trans) 2093 { 2094 struct ffa_mem_access *acc_descr_array = NULL; 2095 struct ffa_mem_access_perm *perm_descr = NULL; 2096 struct thread_smc_args args = { 2097 .a0 = FFA_MEM_RETRIEVE_REQ_32, 2098 .a3 = 0, /* Address, Using TX -> MBZ */ 2099 .a4 = 0, /* Using TX -> MBZ */ 2100 }; 2101 size_t size = 0; 2102 int rc = 0; 2103 2104 if (my_rxtx.ffa_vers == FFA_VERSION_1_0) { 2105 struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx; 2106 2107 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 2108 memset(trans_descr, 0, size); 2109 trans_descr->sender_id = thread_get_tsd()->rpc_target_info; 2110 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 2111 trans_descr->global_handle = cookie; 2112 trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE | 2113 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 2114 trans_descr->mem_access_count = 1; 2115 acc_descr_array = trans_descr->mem_access_array; 2116 } else { 2117 struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx; 2118 2119 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 2120 memset(trans_descr, 0, size); 2121 trans_descr->sender_id = thread_get_tsd()->rpc_target_info; 2122 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 2123 trans_descr->global_handle = cookie; 2124 trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE | 2125 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 2126 trans_descr->mem_access_count = 1; 2127 trans_descr->mem_access_offs = sizeof(*trans_descr); 2128 trans_descr->mem_access_size = sizeof(struct ffa_mem_access); 2129 acc_descr_array = (void *)((vaddr_t)my_rxtx.tx + 2130 sizeof(*trans_descr)); 2131 } 2132 acc_descr_array->region_offs = 0; 2133 acc_descr_array->reserved = 0; 2134 perm_descr = &acc_descr_array->access_perm; 2135 perm_descr->endpoint_id = my_endpoint_id; 2136 perm_descr->perm = FFA_MEM_ACC_RW; 2137 perm_descr->flags = 0; 2138 2139 args.a1 = size; /* Total Length */ 2140 args.a2 = size; /* Frag Length == Total length */ 2141 thread_smccc(&args); 2142 if (args.a0 != FFA_MEM_RETRIEVE_RESP) { 2143 if (args.a0 == FFA_ERROR) 2144 EMSG("Failed to fetch cookie %#"PRIx64" error code %d", 2145 cookie, (int)args.a2); 2146 else 2147 EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64, 2148 cookie, args.a0); 2149 return NULL; 2150 } 2151 rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx, 2152 my_rxtx.size, trans); 2153 if (rc) { 2154 EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d", 2155 cookie, rc); 2156 return NULL; 2157 } 2158 2159 return my_rxtx.rx; 2160 } 2161 2162 void thread_spmc_relinquish(uint64_t cookie) 2163 { 2164 struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx; 2165 struct thread_smc_args args = { 2166 .a0 = FFA_MEM_RELINQUISH, 2167 }; 2168 2169 memset(relinquish_desc, 0, sizeof(*relinquish_desc)); 2170 relinquish_desc->handle = cookie; 2171 relinquish_desc->flags = 0; 2172 relinquish_desc->endpoint_count = 1; 2173 relinquish_desc->endpoint_id_array[0] = my_endpoint_id; 2174 thread_smccc(&args); 2175 if (!is_ffa_success(args.a0)) 2176 EMSG("Failed to relinquish cookie %#"PRIx64, cookie); 2177 } 2178 2179 static int set_pages(struct ffa_address_range *regions, 2180 unsigned int num_regions, unsigned int num_pages, 2181 struct mobj_ffa *mf) 2182 { 2183 unsigned int n = 0; 2184 unsigned int idx = 0; 2185 2186 for (n = 0; n < num_regions; n++) { 2187 unsigned int page_count = READ_ONCE(regions[n].page_count); 2188 uint64_t addr = READ_ONCE(regions[n].address); 2189 2190 if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count)) 2191 return FFA_INVALID_PARAMETERS; 2192 } 2193 2194 if (idx != num_pages) 2195 return FFA_INVALID_PARAMETERS; 2196 2197 return 0; 2198 } 2199 2200 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie) 2201 { 2202 struct mobj_ffa *ret = NULL; 2203 struct ffa_mem_transaction_x retrieve_desc = { }; 2204 struct ffa_mem_access *descr_array = NULL; 2205 struct ffa_mem_region *descr = NULL; 2206 struct mobj_ffa *mf = NULL; 2207 unsigned int num_pages = 0; 2208 unsigned int offs = 0; 2209 void *buf = NULL; 2210 struct thread_smc_args ffa_rx_release_args = { 2211 .a0 = FFA_RX_RELEASE 2212 }; 2213 2214 /* 2215 * OP-TEE is only supporting a single mem_region while the 2216 * specification allows for more than one. 2217 */ 2218 buf = spmc_retrieve_req(cookie, &retrieve_desc); 2219 if (!buf) { 2220 EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64, 2221 cookie); 2222 return NULL; 2223 } 2224 2225 descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs); 2226 offs = READ_ONCE(descr_array->region_offs); 2227 descr = (struct ffa_mem_region *)((vaddr_t)buf + offs); 2228 2229 num_pages = READ_ONCE(descr->total_page_count); 2230 mf = mobj_ffa_spmc_new(cookie, num_pages); 2231 if (!mf) 2232 goto out; 2233 2234 if (set_pages(descr->address_range_array, 2235 READ_ONCE(descr->address_range_count), num_pages, mf)) { 2236 mobj_ffa_spmc_delete(mf); 2237 goto out; 2238 } 2239 2240 ret = mf; 2241 2242 out: 2243 /* Release RX buffer after the mem retrieve request. */ 2244 thread_smccc(&ffa_rx_release_args); 2245 2246 return ret; 2247 } 2248 2249 static TEE_Result spmc_init(void) 2250 { 2251 unsigned int major = 0; 2252 unsigned int minor __maybe_unused = 0; 2253 uint32_t my_vers = 0; 2254 uint32_t vers = 0; 2255 2256 my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 2257 vers = get_ffa_version(my_vers); 2258 major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK; 2259 minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK; 2260 DMSG("SPMC reported version %u.%u", major, minor); 2261 if (major != FFA_VERSION_MAJOR) { 2262 EMSG("Incompatible major version %u, expected %u", 2263 major, FFA_VERSION_MAJOR); 2264 panic(); 2265 } 2266 if (vers < my_vers) 2267 my_vers = vers; 2268 DMSG("Using version %u.%u", 2269 (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK, 2270 (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK); 2271 my_rxtx.ffa_vers = my_vers; 2272 2273 spmc_rxtx_map(&my_rxtx); 2274 my_endpoint_id = get_my_id(); 2275 DMSG("My endpoint ID %#x", my_endpoint_id); 2276 2277 return TEE_SUCCESS; 2278 } 2279 #endif /* !defined(CFG_CORE_SEL1_SPMC) */ 2280 2281 /* 2282 * boot_final() is always done before exiting at end of boot 2283 * initialization. In case of virtualization the init-calls are done only 2284 * once a OP-TEE partition has been created. So with virtualization we have 2285 * to initialize via boot_final() to make sure we have a value assigned 2286 * before it's used the first time. 2287 */ 2288 #ifdef CFG_NS_VIRTUALIZATION 2289 boot_final(spmc_init); 2290 #else 2291 service_init(spmc_init); 2292 #endif 2293