1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2021, Linaro Limited. 4 * Copyright (c) 2019-2021, Arm Limited. All rights reserved. 5 */ 6 7 #include <assert.h> 8 #include <ffa.h> 9 #include <initcall.h> 10 #include <io.h> 11 #include <kernel/interrupt.h> 12 #include <kernel/panic.h> 13 #include <kernel/secure_partition.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/spmc_sp_handler.h> 16 #include <kernel/tee_misc.h> 17 #include <kernel/thread.h> 18 #include <kernel/thread_private.h> 19 #include <kernel/thread_spmc.h> 20 #include <mm/core_mmu.h> 21 #include <mm/mobj.h> 22 #include <optee_ffa.h> 23 #include <optee_msg.h> 24 #include <optee_rpc_cmd.h> 25 #include <string.h> 26 #include <sys/queue.h> 27 #include <tee/entry_std.h> 28 #include <tee/uuid.h> 29 #include <util.h> 30 31 #if defined(CFG_CORE_SEL1_SPMC) 32 struct mem_share_state { 33 struct mobj_ffa *mf; 34 unsigned int page_count; 35 unsigned int region_count; 36 unsigned int current_page_idx; 37 }; 38 39 struct mem_frag_state { 40 struct mem_share_state share; 41 tee_mm_entry_t *mm; 42 unsigned int frag_offset; 43 SLIST_ENTRY(mem_frag_state) link; 44 }; 45 #endif 46 47 /* Initialized in spmc_init() below */ 48 static uint16_t my_endpoint_id; 49 50 /* 51 * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized. 52 * 53 * struct ffa_rxtx::spin_lock protects the variables below from concurrent 54 * access this includes the use of content of struct ffa_rxtx::rx and 55 * @frag_state_head. 56 * 57 * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct 58 * ffa_rxtx::tx and false when it is owned by normal world. 59 * 60 * Note that we can't prevent normal world from updating the content of 61 * these buffers so we must always be careful when reading. while we hold 62 * the lock. 63 */ 64 65 #ifdef CFG_CORE_SEL1_SPMC 66 static struct ffa_rxtx nw_rxtx; 67 68 static bool is_nw_buf(struct ffa_rxtx *rxtx) 69 { 70 return rxtx == &nw_rxtx; 71 } 72 73 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head = 74 SLIST_HEAD_INITIALIZER(&frag_state_head); 75 #else 76 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 77 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 78 static struct ffa_rxtx nw_rxtx = { .rx = __rx_buf, .tx = __tx_buf }; 79 #endif 80 81 static uint32_t swap_src_dst(uint32_t src_dst) 82 { 83 return (src_dst >> 16) | (src_dst << 16); 84 } 85 86 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst, 87 uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5) 88 { 89 *args = (struct thread_smc_args){ .a0 = fid, 90 .a1 = src_dst, 91 .a2 = w2, 92 .a3 = w3, 93 .a4 = w4, 94 .a5 = w5, }; 95 } 96 97 #if defined(CFG_CORE_SEL1_SPMC) 98 void spmc_handle_version(struct thread_smc_args *args) 99 { 100 /* 101 * We currently only support one version, 1.0 so let's keep it 102 * simple. 103 */ 104 spmc_set_args(args, 105 MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR), 106 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 107 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 108 } 109 110 static void handle_features(struct thread_smc_args *args) 111 { 112 uint32_t ret_fid = 0; 113 uint32_t ret_w2 = FFA_PARAM_MBZ; 114 115 switch (args->a1) { 116 #ifdef ARM64 117 case FFA_RXTX_MAP_64: 118 #endif 119 case FFA_RXTX_MAP_32: 120 ret_fid = FFA_SUCCESS_32; 121 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */ 122 break; 123 #ifdef ARM64 124 case FFA_MEM_SHARE_64: 125 #endif 126 case FFA_MEM_SHARE_32: 127 ret_fid = FFA_SUCCESS_32; 128 /* 129 * Partition manager supports transmission of a memory 130 * transaction descriptor in a buffer dynamically allocated 131 * by the endpoint. 132 */ 133 ret_w2 = BIT(0); 134 break; 135 136 case FFA_ERROR: 137 case FFA_VERSION: 138 case FFA_SUCCESS_32: 139 #ifdef ARM64 140 case FFA_SUCCESS_64: 141 #endif 142 case FFA_MEM_FRAG_TX: 143 case FFA_MEM_RECLAIM: 144 case FFA_MSG_SEND_DIRECT_REQ_32: 145 case FFA_INTERRUPT: 146 case FFA_PARTITION_INFO_GET: 147 case FFA_RX_RELEASE: 148 ret_fid = FFA_SUCCESS_32; 149 break; 150 default: 151 ret_fid = FFA_ERROR; 152 ret_w2 = FFA_NOT_SUPPORTED; 153 break; 154 } 155 156 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ, 157 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 158 } 159 160 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret) 161 { 162 tee_mm_entry_t *mm = NULL; 163 164 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz)) 165 return FFA_INVALID_PARAMETERS; 166 167 mm = tee_mm_alloc(&tee_mm_shm, sz); 168 if (!mm) 169 return FFA_NO_MEMORY; 170 171 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa, 172 sz / SMALL_PAGE_SIZE, 173 MEM_AREA_NSEC_SHM)) { 174 tee_mm_free(mm); 175 return FFA_INVALID_PARAMETERS; 176 } 177 178 *va_ret = (void *)tee_mm_get_smem(mm); 179 return 0; 180 } 181 182 static void unmap_buf(void *va, size_t sz) 183 { 184 tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va); 185 186 assert(mm); 187 core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE); 188 tee_mm_free(mm); 189 } 190 191 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 192 { 193 int rc = 0; 194 uint32_t ret_fid = FFA_ERROR; 195 unsigned int sz = 0; 196 paddr_t rx_pa = 0; 197 paddr_t tx_pa = 0; 198 void *rx = NULL; 199 void *tx = NULL; 200 201 cpu_spin_lock(&rxtx->spinlock); 202 203 if (args->a3 & GENMASK_64(63, 6)) { 204 rc = FFA_INVALID_PARAMETERS; 205 goto out; 206 } 207 208 sz = args->a3 * SMALL_PAGE_SIZE; 209 if (!sz) { 210 rc = FFA_INVALID_PARAMETERS; 211 goto out; 212 } 213 /* TX/RX are swapped compared to the caller */ 214 tx_pa = args->a2; 215 rx_pa = args->a1; 216 217 if (rxtx->size) { 218 rc = FFA_DENIED; 219 goto out; 220 } 221 222 /* 223 * If the buffer comes from a SP the address is virtual and already 224 * mapped. 225 */ 226 if (is_nw_buf(rxtx)) { 227 rc = map_buf(tx_pa, sz, &tx); 228 if (rc) 229 goto out; 230 rc = map_buf(rx_pa, sz, &rx); 231 if (rc) { 232 unmap_buf(tx, sz); 233 goto out; 234 } 235 rxtx->tx = tx; 236 rxtx->rx = rx; 237 } else { 238 if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) { 239 rc = FFA_INVALID_PARAMETERS; 240 goto out; 241 } 242 243 if (!virt_to_phys((void *)tx_pa) || 244 !virt_to_phys((void *)rx_pa)) { 245 rc = FFA_INVALID_PARAMETERS; 246 goto out; 247 } 248 249 rxtx->tx = (void *)tx_pa; 250 rxtx->rx = (void *)rx_pa; 251 } 252 253 rxtx->size = sz; 254 rxtx->tx_is_mine = true; 255 ret_fid = FFA_SUCCESS_32; 256 DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx); 257 DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx); 258 out: 259 cpu_spin_unlock(&rxtx->spinlock); 260 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 261 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 262 } 263 264 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 265 { 266 uint32_t ret_fid = FFA_ERROR; 267 int rc = FFA_INVALID_PARAMETERS; 268 269 cpu_spin_lock(&rxtx->spinlock); 270 271 if (!rxtx->size) 272 goto out; 273 274 /* We don't unmap the SP memory as the SP might still use it */ 275 if (is_nw_buf(rxtx)) { 276 unmap_buf(rxtx->rx, rxtx->size); 277 unmap_buf(rxtx->tx, rxtx->size); 278 } 279 rxtx->size = 0; 280 rxtx->rx = NULL; 281 rxtx->tx = NULL; 282 ret_fid = FFA_SUCCESS_32; 283 rc = 0; 284 out: 285 cpu_spin_unlock(&rxtx->spinlock); 286 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 287 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 288 } 289 290 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 291 { 292 uint32_t ret_fid = 0; 293 int rc = 0; 294 295 cpu_spin_lock(&rxtx->spinlock); 296 /* The senders RX is our TX */ 297 if (!rxtx->size || rxtx->tx_is_mine) { 298 ret_fid = FFA_ERROR; 299 rc = FFA_DENIED; 300 } else { 301 ret_fid = FFA_SUCCESS_32; 302 rc = 0; 303 rxtx->tx_is_mine = true; 304 } 305 cpu_spin_unlock(&rxtx->spinlock); 306 307 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 308 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 309 } 310 311 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 312 { 313 return !w0 && !w1 && !w2 && !w3; 314 } 315 316 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 317 { 318 /* 319 * This depends on which UUID we have been assigned. 320 * TODO add a generic mechanism to obtain our UUID. 321 * 322 * The test below is for the hard coded UUID 323 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 324 */ 325 return w0 == 0xe0786148 && w1 == 0xe311f8e7 && 326 w2 == 0x02005ebc && w3 == 0x1bc5d5a5; 327 } 328 329 void spmc_fill_partition_entry(struct ffa_partition_info *fpi, 330 uint16_t endpoint_id, uint16_t execution_context) 331 { 332 fpi->id = endpoint_id; 333 /* Number of execution contexts implemented by this partition */ 334 fpi->execution_context = execution_context; 335 336 fpi->partition_properties = FFA_PARTITION_DIRECT_REQ_RECV_SUPPORT | 337 FFA_PARTITION_DIRECT_REQ_SEND_SUPPORT; 338 } 339 340 static uint32_t handle_partition_info_get_all(size_t *elem_count, 341 struct ffa_rxtx *rxtx) 342 { 343 struct ffa_partition_info *fpi = rxtx->tx; 344 345 /* Add OP-TEE SP */ 346 spmc_fill_partition_entry(fpi, my_endpoint_id, CFG_TEE_CORE_NB_CORE); 347 rxtx->tx_is_mine = false; 348 *elem_count = 1; 349 fpi++; 350 351 if (IS_ENABLED(CFG_SECURE_PARTITION)) { 352 size_t count = (rxtx->size / sizeof(*fpi)) - 1; 353 354 if (sp_partition_info_get(fpi, NULL, &count)) 355 return FFA_NO_MEMORY; 356 *elem_count += count; 357 } 358 359 return FFA_OK; 360 } 361 362 void spmc_handle_partition_info_get(struct thread_smc_args *args, 363 struct ffa_rxtx *rxtx) 364 { 365 uint32_t ret_fid = FFA_ERROR; 366 uint32_t rc = 0; 367 uint32_t endpoint_id = my_endpoint_id; 368 struct ffa_partition_info *fpi = NULL; 369 370 cpu_spin_lock(&rxtx->spinlock); 371 372 if (!rxtx->size || !rxtx->tx_is_mine) { 373 if (rxtx->size) 374 rc = FFA_BUSY; 375 else 376 rc = FFA_DENIED; /* TX buffer not setup yet */ 377 goto out; 378 } 379 380 fpi = rxtx->tx; 381 382 if (rxtx->size < sizeof(*fpi)) { 383 ret_fid = FFA_ERROR; 384 rc = FFA_NO_MEMORY; 385 goto out; 386 } 387 388 if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) { 389 size_t elem_count = 0; 390 391 ret_fid = handle_partition_info_get_all(&elem_count, rxtx); 392 393 if (ret_fid) { 394 rc = ret_fid; 395 ret_fid = FFA_ERROR; 396 } else { 397 ret_fid = FFA_SUCCESS_32; 398 rc = elem_count; 399 } 400 401 goto out; 402 } 403 404 if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) { 405 spmc_fill_partition_entry(fpi, endpoint_id, 406 CFG_TEE_CORE_NB_CORE); 407 rc = 1; 408 } else if (IS_ENABLED(CFG_SECURE_PARTITION)) { 409 uint32_t uuid_array[4] = { 0 }; 410 TEE_UUID uuid = { }; 411 TEE_Result res = TEE_SUCCESS; 412 size_t count = (rxtx->size / sizeof(*fpi)); 413 414 uuid_array[0] = args->a1; 415 uuid_array[1] = args->a2; 416 uuid_array[2] = args->a3; 417 uuid_array[3] = args->a4; 418 tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array); 419 420 res = sp_partition_info_get(fpi, &uuid, &count); 421 if (res != TEE_SUCCESS) { 422 ret_fid = FFA_ERROR; 423 rc = FFA_INVALID_PARAMETERS; 424 goto out; 425 } 426 rc = count; 427 } else { 428 ret_fid = FFA_ERROR; 429 rc = FFA_INVALID_PARAMETERS; 430 goto out; 431 } 432 433 ret_fid = FFA_SUCCESS_32; 434 rxtx->tx_is_mine = false; 435 436 out: 437 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 438 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 439 cpu_spin_unlock(&rxtx->spinlock); 440 } 441 #endif /*CFG_CORE_SEL1_SPMC*/ 442 443 static void handle_yielding_call(struct thread_smc_args *args) 444 { 445 TEE_Result res = 0; 446 447 thread_check_canaries(); 448 449 if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) { 450 /* Note connection to struct thread_rpc_arg::ret */ 451 thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6, 452 0); 453 res = TEE_ERROR_BAD_PARAMETERS; 454 } else { 455 thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5, 456 args->a6, args->a7); 457 res = TEE_ERROR_BUSY; 458 } 459 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 460 swap_src_dst(args->a1), 0, res, 0, 0); 461 } 462 463 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5) 464 { 465 uint64_t cookie = reg_pair_to_64(a5, a4); 466 uint32_t res = 0; 467 468 res = mobj_ffa_unregister_by_cookie(cookie); 469 switch (res) { 470 case TEE_SUCCESS: 471 case TEE_ERROR_ITEM_NOT_FOUND: 472 return 0; 473 case TEE_ERROR_BUSY: 474 EMSG("res %#"PRIx32, res); 475 return FFA_BUSY; 476 default: 477 EMSG("res %#"PRIx32, res); 478 return FFA_INVALID_PARAMETERS; 479 } 480 } 481 482 static void handle_blocking_call(struct thread_smc_args *args) 483 { 484 switch (args->a3) { 485 case OPTEE_FFA_GET_API_VERSION: 486 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 487 swap_src_dst(args->a1), 0, 488 OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR, 489 0); 490 break; 491 case OPTEE_FFA_GET_OS_VERSION: 492 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 493 swap_src_dst(args->a1), 0, 494 CFG_OPTEE_REVISION_MAJOR, 495 CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1); 496 break; 497 case OPTEE_FFA_EXCHANGE_CAPABILITIES: 498 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 499 swap_src_dst(args->a1), 0, 0, 500 THREAD_RPC_MAX_NUM_PARAMS, 501 OPTEE_FFA_SEC_CAP_ARG_OFFSET); 502 break; 503 case OPTEE_FFA_UNREGISTER_SHM: 504 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 505 swap_src_dst(args->a1), 0, 506 handle_unregister_shm(args->a4, args->a5), 0, 0); 507 break; 508 default: 509 EMSG("Unhandled blocking service ID %#"PRIx32, 510 (uint32_t)args->a3); 511 panic(); 512 } 513 } 514 515 #if defined(CFG_CORE_SEL1_SPMC) 516 static int get_acc_perms(struct ffa_mem_access *mem_acc, 517 unsigned int num_mem_accs, uint8_t *acc_perms, 518 unsigned int *region_offs) 519 { 520 unsigned int n = 0; 521 522 for (n = 0; n < num_mem_accs; n++) { 523 struct ffa_mem_access_perm *descr = &mem_acc[n].access_perm; 524 525 if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) { 526 *acc_perms = READ_ONCE(descr->perm); 527 *region_offs = READ_ONCE(mem_acc[n].region_offs); 528 return 0; 529 } 530 } 531 532 return FFA_INVALID_PARAMETERS; 533 } 534 535 static int mem_share_init(void *buf, size_t blen, unsigned int *page_count, 536 unsigned int *region_count, size_t *addr_range_offs) 537 { 538 const uint8_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 539 const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW; 540 struct ffa_mem_region *region_descr = NULL; 541 struct ffa_mem_transaction *descr = NULL; 542 unsigned int num_mem_accs = 0; 543 uint8_t mem_acc_perm = 0; 544 unsigned int region_descr_offs = 0; 545 size_t n = 0; 546 547 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_mem_transaction) || 548 blen < sizeof(struct ffa_mem_transaction)) 549 return FFA_INVALID_PARAMETERS; 550 551 descr = buf; 552 553 /* Check that the endpoint memory access descriptor array fits */ 554 num_mem_accs = READ_ONCE(descr->mem_access_count); 555 if (MUL_OVERFLOW(sizeof(struct ffa_mem_access), num_mem_accs, &n) || 556 ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen) 557 return FFA_INVALID_PARAMETERS; 558 559 if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr) 560 return FFA_INVALID_PARAMETERS; 561 562 /* Check that the access permissions matches what's expected */ 563 if (get_acc_perms(descr->mem_access_array, 564 num_mem_accs, &mem_acc_perm, ®ion_descr_offs) || 565 mem_acc_perm != exp_mem_acc_perm) 566 return FFA_INVALID_PARAMETERS; 567 568 /* Check that the Composite memory region descriptor fits */ 569 if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) || 570 n > blen) 571 return FFA_INVALID_PARAMETERS; 572 573 if (!IS_ALIGNED_WITH_TYPE((vaddr_t)descr + region_descr_offs, 574 struct ffa_mem_region)) 575 return FFA_INVALID_PARAMETERS; 576 577 region_descr = (struct ffa_mem_region *)((vaddr_t)descr + 578 region_descr_offs); 579 *page_count = READ_ONCE(region_descr->total_page_count); 580 *region_count = READ_ONCE(region_descr->address_range_count); 581 *addr_range_offs = n; 582 return 0; 583 } 584 585 static int add_mem_share_helper(struct mem_share_state *s, void *buf, 586 size_t flen) 587 { 588 unsigned int region_count = flen / sizeof(struct ffa_address_range); 589 struct ffa_address_range *arange = NULL; 590 unsigned int n = 0; 591 592 if (region_count > s->region_count) 593 region_count = s->region_count; 594 595 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range)) 596 return FFA_INVALID_PARAMETERS; 597 arange = buf; 598 599 for (n = 0; n < region_count; n++) { 600 unsigned int page_count = READ_ONCE(arange[n].page_count); 601 uint64_t addr = READ_ONCE(arange[n].address); 602 603 if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx, 604 addr, page_count)) 605 return FFA_INVALID_PARAMETERS; 606 } 607 608 s->region_count -= region_count; 609 if (s->region_count) 610 return region_count * sizeof(*arange); 611 612 if (s->current_page_idx != s->page_count) 613 return FFA_INVALID_PARAMETERS; 614 615 return 0; 616 } 617 618 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen) 619 { 620 int rc = 0; 621 622 rc = add_mem_share_helper(&s->share, buf, flen); 623 if (rc >= 0) { 624 if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) { 625 /* We're not at the end of the descriptor yet */ 626 if (s->share.region_count) 627 return s->frag_offset; 628 629 /* We're done */ 630 rc = 0; 631 } else { 632 rc = FFA_INVALID_PARAMETERS; 633 } 634 } 635 636 SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link); 637 if (rc < 0) 638 mobj_ffa_sel1_spmc_delete(s->share.mf); 639 else 640 mobj_ffa_push_to_inactive(s->share.mf); 641 free(s); 642 643 return rc; 644 } 645 646 static bool is_sp_share(void *buf) 647 { 648 struct ffa_mem_transaction *input_descr = NULL; 649 struct ffa_mem_access_perm *perm = NULL; 650 651 if (!IS_ENABLED(CFG_SECURE_PARTITION)) 652 return false; 653 654 input_descr = buf; 655 perm = &input_descr->mem_access_array[0].access_perm; 656 657 /* 658 * perm->endpoint_id is read here only to check if the endpoint is 659 * OP-TEE. We do read it later on again, but there are some additional 660 * checks there to make sure that the data is correct. 661 */ 662 return READ_ONCE(perm->endpoint_id) != my_endpoint_id; 663 } 664 665 static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen, 666 size_t flen, uint64_t *global_handle) 667 { 668 int rc = 0; 669 struct mem_share_state share = { }; 670 size_t addr_range_offs = 0; 671 size_t n = 0; 672 673 rc = mem_share_init(buf, flen, &share.page_count, &share.region_count, 674 &addr_range_offs); 675 if (rc) 676 return rc; 677 678 if (MUL_OVERFLOW(share.region_count, 679 sizeof(struct ffa_address_range), &n) || 680 ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen) 681 return FFA_INVALID_PARAMETERS; 682 683 share.mf = mobj_ffa_sel1_spmc_new(share.page_count); 684 if (!share.mf) 685 return FFA_NO_MEMORY; 686 687 if (flen != blen) { 688 struct mem_frag_state *s = calloc(sizeof(*s), 1); 689 690 if (!s) { 691 rc = FFA_NO_MEMORY; 692 goto err; 693 } 694 s->share = share; 695 s->mm = mm; 696 s->frag_offset = addr_range_offs; 697 698 SLIST_INSERT_HEAD(&frag_state_head, s, link); 699 rc = add_mem_share_frag(s, (char *)buf + addr_range_offs, 700 flen - addr_range_offs); 701 702 if (rc >= 0) 703 *global_handle = mobj_ffa_get_cookie(share.mf); 704 705 return rc; 706 } 707 708 rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs, 709 flen - addr_range_offs); 710 if (rc) { 711 /* 712 * Number of consumed bytes may be returned instead of 0 for 713 * done. 714 */ 715 rc = FFA_INVALID_PARAMETERS; 716 goto err; 717 } 718 719 *global_handle = mobj_ffa_push_to_inactive(share.mf); 720 721 return 0; 722 err: 723 mobj_ffa_sel1_spmc_delete(share.mf); 724 return rc; 725 } 726 727 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen, 728 unsigned int page_count, 729 uint64_t *global_handle, struct ffa_rxtx *rxtx) 730 { 731 int rc = 0; 732 size_t len = 0; 733 tee_mm_entry_t *mm = NULL; 734 vaddr_t offs = pbuf & SMALL_PAGE_MASK; 735 736 if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len)) 737 return FFA_INVALID_PARAMETERS; 738 if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len)) 739 return FFA_INVALID_PARAMETERS; 740 741 /* 742 * Check that the length reported in flen is covered by len even 743 * if the offset is taken into account. 744 */ 745 if (len < flen || len - offs < flen) 746 return FFA_INVALID_PARAMETERS; 747 748 mm = tee_mm_alloc(&tee_mm_shm, len); 749 if (!mm) 750 return FFA_NO_MEMORY; 751 752 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf, 753 page_count, MEM_AREA_NSEC_SHM)) { 754 rc = FFA_INVALID_PARAMETERS; 755 goto out; 756 } 757 758 cpu_spin_lock(&rxtx->spinlock); 759 rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen, 760 global_handle); 761 cpu_spin_unlock(&rxtx->spinlock); 762 if (rc > 0) 763 return rc; 764 765 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 766 out: 767 tee_mm_free(mm); 768 return rc; 769 } 770 771 static int handle_mem_share_rxbuf(size_t blen, size_t flen, 772 uint64_t *global_handle, 773 struct ffa_rxtx *rxtx) 774 { 775 int rc = FFA_DENIED; 776 777 cpu_spin_lock(&rxtx->spinlock); 778 779 if (rxtx->rx && flen <= rxtx->size) { 780 if (is_sp_share(rxtx->rx)) { 781 rc = spmc_sp_add_share(rxtx, blen, 782 global_handle, NULL); 783 } else { 784 rc = add_mem_share(NULL, rxtx->rx, blen, flen, 785 global_handle); 786 } 787 } 788 789 cpu_spin_unlock(&rxtx->spinlock); 790 791 return rc; 792 } 793 794 static void handle_mem_share(struct thread_smc_args *args, 795 struct ffa_rxtx *rxtx) 796 { 797 uint32_t tot_len = args->a1; 798 uint32_t frag_len = args->a2; 799 uint64_t addr = args->a3; 800 uint32_t page_count = args->a4; 801 uint32_t ret_w1 = 0; 802 uint32_t ret_w2 = FFA_INVALID_PARAMETERS; 803 uint32_t ret_w3 = 0; 804 uint32_t ret_fid = FFA_ERROR; 805 uint64_t global_handle = 0; 806 int rc = 0; 807 808 /* Check that the MBZs are indeed 0 */ 809 if (args->a5 || args->a6 || args->a7) 810 goto out; 811 812 /* Check that fragment length doesn't exceed total length */ 813 if (frag_len > tot_len) 814 goto out; 815 816 /* Check for 32-bit calling convention */ 817 if (args->a0 == FFA_MEM_SHARE_32) 818 addr &= UINT32_MAX; 819 820 if (!addr) { 821 /* 822 * The memory transaction descriptor is passed via our rx 823 * buffer. 824 */ 825 if (page_count) 826 goto out; 827 rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle, 828 rxtx); 829 } else { 830 rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count, 831 &global_handle, rxtx); 832 } 833 if (rc < 0) { 834 ret_w2 = rc; 835 } else if (rc > 0) { 836 ret_fid = FFA_MEM_FRAG_RX; 837 ret_w3 = rc; 838 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 839 } else { 840 ret_fid = FFA_SUCCESS_32; 841 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 842 } 843 out: 844 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 845 } 846 847 static struct mem_frag_state *get_frag_state(uint64_t global_handle) 848 { 849 struct mem_frag_state *s = NULL; 850 851 SLIST_FOREACH(s, &frag_state_head, link) 852 if (mobj_ffa_get_cookie(s->share.mf) == global_handle) 853 return s; 854 855 return NULL; 856 } 857 858 static void handle_mem_frag_tx(struct thread_smc_args *args, 859 struct ffa_rxtx *rxtx) 860 { 861 int rc = 0; 862 uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2), 863 READ_ONCE(args->a1)); 864 size_t flen = READ_ONCE(args->a3); 865 struct mem_frag_state *s = NULL; 866 tee_mm_entry_t *mm = NULL; 867 unsigned int page_count = 0; 868 void *buf = NULL; 869 uint32_t ret_w1 = 0; 870 uint32_t ret_w2 = 0; 871 uint32_t ret_w3 = 0; 872 uint32_t ret_fid = 0; 873 874 /* 875 * Currently we're only doing this for fragmented FFA_MEM_SHARE_* 876 * requests. 877 */ 878 879 cpu_spin_lock(&rxtx->spinlock); 880 881 s = get_frag_state(global_handle); 882 if (!s) { 883 rc = FFA_INVALID_PARAMETERS; 884 goto out; 885 } 886 887 mm = s->mm; 888 if (mm) { 889 if (flen > tee_mm_get_bytes(mm)) { 890 rc = FFA_INVALID_PARAMETERS; 891 goto out; 892 } 893 page_count = s->share.page_count; 894 buf = (void *)tee_mm_get_smem(mm); 895 } else { 896 if (flen > rxtx->size) { 897 rc = FFA_INVALID_PARAMETERS; 898 goto out; 899 } 900 buf = rxtx->rx; 901 } 902 903 rc = add_mem_share_frag(s, buf, flen); 904 out: 905 cpu_spin_unlock(&rxtx->spinlock); 906 907 if (rc <= 0 && mm) { 908 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 909 tee_mm_free(mm); 910 } 911 912 if (rc < 0) { 913 ret_fid = FFA_ERROR; 914 ret_w2 = rc; 915 } else if (rc > 0) { 916 ret_fid = FFA_MEM_FRAG_RX; 917 ret_w3 = rc; 918 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 919 } else { 920 ret_fid = FFA_SUCCESS_32; 921 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 922 } 923 924 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 925 } 926 927 static void handle_mem_reclaim(struct thread_smc_args *args) 928 { 929 uint32_t ret_val = FFA_INVALID_PARAMETERS; 930 uint32_t ret_fid = FFA_ERROR; 931 uint64_t cookie = 0; 932 933 if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7) 934 goto out; 935 936 cookie = reg_pair_to_64(args->a2, args->a1); 937 switch (mobj_ffa_sel1_spmc_reclaim(cookie)) { 938 case TEE_SUCCESS: 939 ret_fid = FFA_SUCCESS_32; 940 ret_val = 0; 941 break; 942 case TEE_ERROR_ITEM_NOT_FOUND: 943 DMSG("cookie %#"PRIx64" not found", cookie); 944 ret_val = FFA_INVALID_PARAMETERS; 945 break; 946 default: 947 DMSG("cookie %#"PRIx64" busy", cookie); 948 ret_val = FFA_DENIED; 949 break; 950 } 951 out: 952 spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0); 953 } 954 #endif 955 956 /* Only called from assembly */ 957 void thread_spmc_msg_recv(struct thread_smc_args *args); 958 void thread_spmc_msg_recv(struct thread_smc_args *args) 959 { 960 assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL); 961 switch (args->a0) { 962 #if defined(CFG_CORE_SEL1_SPMC) 963 case FFA_VERSION: 964 spmc_handle_version(args); 965 break; 966 case FFA_FEATURES: 967 handle_features(args); 968 break; 969 #ifdef ARM64 970 case FFA_RXTX_MAP_64: 971 #endif 972 case FFA_RXTX_MAP_32: 973 spmc_handle_rxtx_map(args, &nw_rxtx); 974 break; 975 case FFA_RXTX_UNMAP: 976 spmc_handle_rxtx_unmap(args, &nw_rxtx); 977 break; 978 case FFA_RX_RELEASE: 979 spmc_handle_rx_release(args, &nw_rxtx); 980 break; 981 case FFA_PARTITION_INFO_GET: 982 spmc_handle_partition_info_get(args, &nw_rxtx); 983 break; 984 #endif /*CFG_CORE_SEL1_SPMC*/ 985 case FFA_INTERRUPT: 986 interrupt_main_handler(); 987 spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0); 988 break; 989 #ifdef ARM64 990 case FFA_MSG_SEND_DIRECT_REQ_64: 991 #endif 992 case FFA_MSG_SEND_DIRECT_REQ_32: 993 if (IS_ENABLED(CFG_SECURE_PARTITION) && 994 FFA_DST(args->a1) != my_endpoint_id) { 995 spmc_sp_start_thread(args); 996 break; 997 } 998 999 if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT)) 1000 handle_yielding_call(args); 1001 else 1002 handle_blocking_call(args); 1003 break; 1004 #if defined(CFG_CORE_SEL1_SPMC) 1005 #ifdef ARM64 1006 case FFA_MEM_SHARE_64: 1007 #endif 1008 case FFA_MEM_SHARE_32: 1009 handle_mem_share(args, &nw_rxtx); 1010 break; 1011 case FFA_MEM_RECLAIM: 1012 if (!IS_ENABLED(CFG_SECURE_PARTITION) || 1013 !ffa_mem_reclaim(args, NULL)) 1014 handle_mem_reclaim(args); 1015 break; 1016 case FFA_MEM_FRAG_TX: 1017 handle_mem_frag_tx(args, &nw_rxtx); 1018 break; 1019 #endif /*CFG_CORE_SEL1_SPMC*/ 1020 default: 1021 EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0); 1022 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED, 1023 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 1024 } 1025 } 1026 1027 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset) 1028 { 1029 size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1030 struct thread_ctx *thr = threads + thread_get_id(); 1031 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1032 struct optee_msg_arg *arg = NULL; 1033 struct mobj *mobj = NULL; 1034 uint32_t num_params = 0; 1035 size_t sz = 0; 1036 1037 mobj = mobj_ffa_get_by_cookie(cookie, 0); 1038 if (!mobj) { 1039 EMSG("Can't find cookie %#"PRIx64, cookie); 1040 return TEE_ERROR_BAD_PARAMETERS; 1041 } 1042 1043 res = mobj_inc_map(mobj); 1044 if (res) 1045 goto out_put_mobj; 1046 1047 res = TEE_ERROR_BAD_PARAMETERS; 1048 arg = mobj_get_va(mobj, offset, sizeof(*arg)); 1049 if (!arg) 1050 goto out_dec_map; 1051 1052 num_params = READ_ONCE(arg->num_params); 1053 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS) 1054 goto out_dec_map; 1055 1056 sz = OPTEE_MSG_GET_ARG_SIZE(num_params); 1057 1058 thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc); 1059 if (!thr->rpc_arg) 1060 goto out_dec_map; 1061 1062 res = tee_entry_std(arg, num_params); 1063 1064 thread_rpc_shm_cache_clear(&thr->shm_cache); 1065 thr->rpc_arg = NULL; 1066 1067 out_dec_map: 1068 mobj_dec_map(mobj); 1069 out_put_mobj: 1070 mobj_put(mobj); 1071 return res; 1072 } 1073 1074 /* 1075 * Helper routine for the assembly function thread_std_smc_entry() 1076 * 1077 * Note: this function is weak just to make link_dummies_paged.c happy. 1078 */ 1079 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1, 1080 uint32_t a2, uint32_t a3, 1081 uint32_t a4, uint32_t a5 __unused) 1082 { 1083 /* 1084 * Arguments are supplied from handle_yielding_call() as: 1085 * a0 <- w1 1086 * a1 <- w3 1087 * a2 <- w4 1088 * a3 <- w5 1089 * a4 <- w6 1090 * a5 <- w7 1091 */ 1092 thread_get_tsd()->rpc_target_info = swap_src_dst(a0); 1093 if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG) 1094 return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4); 1095 return FFA_DENIED; 1096 } 1097 1098 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm) 1099 { 1100 uint64_t offs = tpm->u.memref.offs; 1101 1102 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN + 1103 OPTEE_MSG_ATTR_TYPE_FMEM_INPUT; 1104 1105 param->u.fmem.offs_low = offs; 1106 param->u.fmem.offs_high = offs >> 32; 1107 if (param->u.fmem.offs_high != offs >> 32) 1108 return false; 1109 1110 param->u.fmem.size = tpm->u.memref.size; 1111 if (tpm->u.memref.mobj) { 1112 uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj); 1113 1114 /* If a mobj is passed it better be one with a valid cookie. */ 1115 if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) 1116 return false; 1117 param->u.fmem.global_id = cookie; 1118 } else { 1119 param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 1120 } 1121 1122 return true; 1123 } 1124 1125 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params, 1126 struct thread_param *params, 1127 struct optee_msg_arg **arg_ret) 1128 { 1129 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1130 struct thread_ctx *thr = threads + thread_get_id(); 1131 struct optee_msg_arg *arg = thr->rpc_arg; 1132 1133 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 1134 return TEE_ERROR_BAD_PARAMETERS; 1135 1136 if (!arg) { 1137 EMSG("rpc_arg not set"); 1138 return TEE_ERROR_GENERIC; 1139 } 1140 1141 memset(arg, 0, sz); 1142 arg->cmd = cmd; 1143 arg->num_params = num_params; 1144 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1145 1146 for (size_t n = 0; n < num_params; n++) { 1147 switch (params[n].attr) { 1148 case THREAD_PARAM_ATTR_NONE: 1149 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE; 1150 break; 1151 case THREAD_PARAM_ATTR_VALUE_IN: 1152 case THREAD_PARAM_ATTR_VALUE_OUT: 1153 case THREAD_PARAM_ATTR_VALUE_INOUT: 1154 arg->params[n].attr = params[n].attr - 1155 THREAD_PARAM_ATTR_VALUE_IN + 1156 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1157 arg->params[n].u.value.a = params[n].u.value.a; 1158 arg->params[n].u.value.b = params[n].u.value.b; 1159 arg->params[n].u.value.c = params[n].u.value.c; 1160 break; 1161 case THREAD_PARAM_ATTR_MEMREF_IN: 1162 case THREAD_PARAM_ATTR_MEMREF_OUT: 1163 case THREAD_PARAM_ATTR_MEMREF_INOUT: 1164 if (!set_fmem(arg->params + n, params + n)) 1165 return TEE_ERROR_BAD_PARAMETERS; 1166 break; 1167 default: 1168 return TEE_ERROR_BAD_PARAMETERS; 1169 } 1170 } 1171 1172 if (arg_ret) 1173 *arg_ret = arg; 1174 1175 return TEE_SUCCESS; 1176 } 1177 1178 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params, 1179 struct thread_param *params) 1180 { 1181 for (size_t n = 0; n < num_params; n++) { 1182 switch (params[n].attr) { 1183 case THREAD_PARAM_ATTR_VALUE_OUT: 1184 case THREAD_PARAM_ATTR_VALUE_INOUT: 1185 params[n].u.value.a = arg->params[n].u.value.a; 1186 params[n].u.value.b = arg->params[n].u.value.b; 1187 params[n].u.value.c = arg->params[n].u.value.c; 1188 break; 1189 case THREAD_PARAM_ATTR_MEMREF_OUT: 1190 case THREAD_PARAM_ATTR_MEMREF_INOUT: 1191 params[n].u.memref.size = arg->params[n].u.fmem.size; 1192 break; 1193 default: 1194 break; 1195 } 1196 } 1197 1198 return arg->ret; 1199 } 1200 1201 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1202 struct thread_param *params) 1203 { 1204 struct thread_rpc_arg rpc_arg = { .call = { 1205 .w1 = thread_get_tsd()->rpc_target_info, 1206 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1207 }, 1208 }; 1209 struct optee_msg_arg *arg = NULL; 1210 uint32_t ret = 0; 1211 1212 ret = get_rpc_arg(cmd, num_params, params, &arg); 1213 if (ret) 1214 return ret; 1215 1216 thread_rpc(&rpc_arg); 1217 1218 return get_rpc_arg_res(arg, num_params, params); 1219 } 1220 1221 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 1222 { 1223 struct thread_rpc_arg rpc_arg = { .call = { 1224 .w1 = thread_get_tsd()->rpc_target_info, 1225 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1226 }, 1227 }; 1228 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0); 1229 uint32_t res2 = 0; 1230 uint32_t res = 0; 1231 1232 DMSG("freeing cookie %#"PRIx64, cookie); 1233 1234 res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m, NULL); 1235 1236 mobj_put(mobj); 1237 res2 = mobj_ffa_unregister_by_cookie(cookie); 1238 if (res2) 1239 DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32, 1240 cookie, res2); 1241 if (!res) 1242 thread_rpc(&rpc_arg); 1243 } 1244 1245 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt) 1246 { 1247 struct thread_rpc_arg rpc_arg = { .call = { 1248 .w1 = thread_get_tsd()->rpc_target_info, 1249 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1250 }, 1251 }; 1252 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align); 1253 struct optee_msg_arg *arg = NULL; 1254 unsigned int internal_offset = 0; 1255 struct mobj *mobj = NULL; 1256 uint64_t cookie = 0; 1257 1258 if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m, &arg)) 1259 return NULL; 1260 1261 thread_rpc(&rpc_arg); 1262 1263 if (arg->num_params != 1 || 1264 arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT) 1265 return NULL; 1266 1267 internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs); 1268 cookie = READ_ONCE(arg->params->u.fmem.global_id); 1269 mobj = mobj_ffa_get_by_cookie(cookie, internal_offset); 1270 if (!mobj) { 1271 DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed", 1272 cookie, internal_offset); 1273 return NULL; 1274 } 1275 1276 assert(mobj_is_nonsec(mobj)); 1277 1278 if (mobj->size < size) { 1279 DMSG("Mobj %#"PRIx64": wrong size", cookie); 1280 mobj_put(mobj); 1281 return NULL; 1282 } 1283 1284 if (mobj_inc_map(mobj)) { 1285 DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie); 1286 mobj_put(mobj); 1287 return NULL; 1288 } 1289 1290 return mobj; 1291 } 1292 1293 struct mobj *thread_rpc_alloc_payload(size_t size) 1294 { 1295 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL); 1296 } 1297 1298 struct mobj *thread_rpc_alloc_kernel_payload(size_t size) 1299 { 1300 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL); 1301 } 1302 1303 void thread_rpc_free_kernel_payload(struct mobj *mobj) 1304 { 1305 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj); 1306 } 1307 1308 void thread_rpc_free_payload(struct mobj *mobj) 1309 { 1310 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj), 1311 mobj); 1312 } 1313 1314 struct mobj *thread_rpc_alloc_global_payload(size_t size) 1315 { 1316 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL); 1317 } 1318 1319 void thread_rpc_free_global_payload(struct mobj *mobj) 1320 { 1321 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj), 1322 mobj); 1323 } 1324 1325 void thread_spmc_register_secondary_ep(vaddr_t ep) 1326 { 1327 unsigned long ret = 0; 1328 1329 /* Let the SPM know the entry point for secondary CPUs */ 1330 ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0); 1331 1332 if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64) 1333 EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret); 1334 } 1335 1336 #if defined(CFG_CORE_SEL1_SPMC) 1337 static TEE_Result spmc_init(void) 1338 { 1339 my_endpoint_id = SPMC_ENDPOINT_ID; 1340 DMSG("My endpoint ID %#x", my_endpoint_id); 1341 1342 return TEE_SUCCESS; 1343 } 1344 #else /* !defined(CFG_CORE_SEL1_SPMC) */ 1345 static bool is_ffa_success(uint32_t fid) 1346 { 1347 #ifdef ARM64 1348 if (fid == FFA_SUCCESS_64) 1349 return true; 1350 #endif 1351 return fid == FFA_SUCCESS_32; 1352 } 1353 1354 static void spmc_rxtx_map(struct ffa_rxtx *rxtx) 1355 { 1356 struct thread_smc_args args = { 1357 #ifdef ARM64 1358 .a0 = FFA_RXTX_MAP_64, 1359 #else 1360 .a0 = FFA_RXTX_MAP_32, 1361 #endif 1362 .a1 = virt_to_phys(rxtx->tx), 1363 .a2 = virt_to_phys(rxtx->rx), 1364 .a3 = 1, 1365 }; 1366 1367 thread_smccc(&args); 1368 if (!is_ffa_success(args.a0)) { 1369 if (args.a0 == FFA_ERROR) 1370 EMSG("rxtx map failed with error %ld", args.a2); 1371 else 1372 EMSG("rxtx map failed"); 1373 panic(); 1374 } 1375 } 1376 1377 static uint16_t spmc_get_id(void) 1378 { 1379 struct thread_smc_args args = { 1380 .a0 = FFA_ID_GET, 1381 }; 1382 1383 thread_smccc(&args); 1384 if (!is_ffa_success(args.a0)) { 1385 if (args.a0 == FFA_ERROR) 1386 EMSG("Get id failed with error %ld", args.a2); 1387 else 1388 EMSG("Get id failed"); 1389 panic(); 1390 } 1391 1392 return args.a2; 1393 } 1394 1395 static struct ffa_mem_transaction *spmc_retrieve_req(uint64_t cookie) 1396 { 1397 struct ffa_mem_transaction *trans_descr = nw_rxtx.tx; 1398 struct ffa_mem_access *acc_descr_array = NULL; 1399 struct ffa_mem_access_perm *perm_descr = NULL; 1400 size_t size = sizeof(*trans_descr) + 1401 1 * sizeof(struct ffa_mem_access); 1402 struct thread_smc_args args = { 1403 .a0 = FFA_MEM_RETRIEVE_REQ_32, 1404 .a1 = size, /* Total Length */ 1405 .a2 = size, /* Frag Length == Total length */ 1406 .a3 = 0, /* Address, Using TX -> MBZ */ 1407 .a4 = 0, /* Using TX -> MBZ */ 1408 }; 1409 1410 memset(trans_descr, 0, size); 1411 trans_descr->sender_id = thread_get_tsd()->rpc_target_info; 1412 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 1413 trans_descr->global_handle = cookie; 1414 trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE | 1415 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 1416 trans_descr->mem_access_count = 1; 1417 acc_descr_array = trans_descr->mem_access_array; 1418 acc_descr_array->region_offs = 0; 1419 acc_descr_array->reserved = 0; 1420 perm_descr = &acc_descr_array->access_perm; 1421 perm_descr->endpoint_id = my_endpoint_id; 1422 perm_descr->perm = FFA_MEM_ACC_RW; 1423 perm_descr->flags = 0; 1424 1425 thread_smccc(&args); 1426 if (args.a0 != FFA_MEM_RETRIEVE_RESP) { 1427 if (args.a0 == FFA_ERROR) 1428 EMSG("Failed to fetch cookie %#"PRIx64" error code %d", 1429 cookie, (int)args.a2); 1430 else 1431 EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64, 1432 cookie, args.a0); 1433 return NULL; 1434 } 1435 1436 return nw_rxtx.rx; 1437 } 1438 1439 void thread_spmc_relinquish(uint64_t cookie) 1440 { 1441 struct ffa_mem_relinquish *relinquish_desc = nw_rxtx.tx; 1442 struct thread_smc_args args = { 1443 .a0 = FFA_MEM_RELINQUISH, 1444 }; 1445 1446 memset(relinquish_desc, 0, sizeof(*relinquish_desc)); 1447 relinquish_desc->handle = cookie; 1448 relinquish_desc->flags = 0; 1449 relinquish_desc->endpoint_count = 1; 1450 relinquish_desc->endpoint_id_array[0] = my_endpoint_id; 1451 thread_smccc(&args); 1452 if (!is_ffa_success(args.a0)) 1453 EMSG("Failed to relinquish cookie %#"PRIx64, cookie); 1454 } 1455 1456 static int set_pages(struct ffa_address_range *regions, 1457 unsigned int num_regions, unsigned int num_pages, 1458 struct mobj_ffa *mf) 1459 { 1460 unsigned int n = 0; 1461 unsigned int idx = 0; 1462 1463 for (n = 0; n < num_regions; n++) { 1464 unsigned int page_count = READ_ONCE(regions[n].page_count); 1465 uint64_t addr = READ_ONCE(regions[n].address); 1466 1467 if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count)) 1468 return FFA_INVALID_PARAMETERS; 1469 } 1470 1471 if (idx != num_pages) 1472 return FFA_INVALID_PARAMETERS; 1473 1474 return 0; 1475 } 1476 1477 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie) 1478 { 1479 struct mobj_ffa *ret = NULL; 1480 struct ffa_mem_transaction *retrieve_desc = NULL; 1481 struct ffa_mem_access *descr_array = NULL; 1482 struct ffa_mem_region *descr = NULL; 1483 struct mobj_ffa *mf = NULL; 1484 unsigned int num_pages = 0; 1485 unsigned int offs = 0; 1486 struct thread_smc_args ffa_rx_release_args = { 1487 .a0 = FFA_RX_RELEASE 1488 }; 1489 1490 /* 1491 * OP-TEE is only supporting a single mem_region while the 1492 * specification allows for more than one. 1493 */ 1494 retrieve_desc = spmc_retrieve_req(cookie); 1495 if (!retrieve_desc) { 1496 EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64, 1497 cookie); 1498 return NULL; 1499 } 1500 1501 descr_array = retrieve_desc->mem_access_array; 1502 offs = READ_ONCE(descr_array->region_offs); 1503 descr = (struct ffa_mem_region *)((vaddr_t)retrieve_desc + offs); 1504 1505 num_pages = READ_ONCE(descr->total_page_count); 1506 mf = mobj_ffa_spmc_new(cookie, num_pages); 1507 if (!mf) 1508 goto out; 1509 1510 if (set_pages(descr->address_range_array, 1511 READ_ONCE(descr->address_range_count), num_pages, mf)) { 1512 mobj_ffa_spmc_delete(mf); 1513 goto out; 1514 } 1515 1516 ret = mf; 1517 1518 out: 1519 /* Release RX buffer after the mem retrieve request. */ 1520 thread_smccc(&ffa_rx_release_args); 1521 1522 return ret; 1523 } 1524 1525 static TEE_Result spmc_init(void) 1526 { 1527 spmc_rxtx_map(&nw_rxtx); 1528 my_endpoint_id = spmc_get_id(); 1529 DMSG("My endpoint ID %#x", my_endpoint_id); 1530 1531 return TEE_SUCCESS; 1532 } 1533 #endif /* !defined(CFG_CORE_SEL1_SPMC) */ 1534 1535 service_init(spmc_init); 1536