1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2020, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <bitstring.h> 8 #include <config.h> 9 #include <ffa.h> 10 #include <initcall.h> 11 #include <kernel/refcount.h> 12 #include <kernel/spinlock.h> 13 #include <kernel/thread_spmc.h> 14 #include <kernel/virtualization.h> 15 #include <mm/mobj.h> 16 #include <sys/queue.h> 17 18 /* 19 * Life cycle of struct mobj_ffa 20 * 21 * SPMC at S-EL1 (CFG_CORE_SEL1_SPMC=y) 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * During FFA_MEM_SHARE allocated in mobj_ffa_sel1_spmc_new() and finally 24 * added to the inactive list at the end of add_mem_share() once 25 * successfully filled in. 26 * registered_by_cookie = false 27 * mobj.refs.val = 0 28 * inactive_refs = 0 29 * 30 * During FFA_MEM_RECLAIM reclaimed/freed using 31 * mobj_ffa_sel1_spmc_reclaim(). This will always succeed if the normal 32 * world is only calling this when all other threads are done with the 33 * shared memory object. However, there are some conditions that must be 34 * met to make sure that this is the case: 35 * mobj not in the active list, else -> return TEE_ERROR_BUSY 36 * mobj not in inactive list, else -> return TEE_ERROR_ITEM_NOT_FOUND 37 * mobj inactive_refs is 0, else -> return TEE_ERROR_BUSY 38 * 39 * mobj is activated using mobj_ffa_get_by_cookie() which unless the mobj 40 * is active already: 41 * - move the mobj into the active list 42 * - if not registered_by_cookie -> 43 * set registered_by_cookie and increase inactive_refs 44 * - set mobj.refc.val to 1 45 * - increase inactive_refs 46 * 47 * A previously activated mobj is made ready for reclaim using 48 * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in 49 * the inactive list and registered_by_cookie is set and then: 50 * - clears registered_by_cookie 51 * - decreases inactive_refs 52 * 53 * Each successful call to mobj_ffa_get_by_cookie() must be matched by a 54 * call to mobj_put(). If the mobj.refc.val reaches 0 it's 55 * - moved to the inactive list 56 * - inactive_refs is decreased 57 * 58 * SPMC at S-EL2/EL3 (CFG_CORE_SEL1_SPMC=n) 59 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 * mobj is activated/allocated using mobj_ffa_get_by_cookie() which if 61 * already active only is 62 * - increasing mobj.refc.val and inactive_refs 63 * if found in inactive list is 64 * - setting mobj.refc.val to 1 65 * - increasing inactive_refs 66 * - moved into active list 67 * if not found is created using thread_spmc_populate_mobj_from_rx() and 68 * then: 69 * - setting mobj.refc.val to 1 70 * - increasing inactive_refs 71 * - moved into active list 72 * 73 * A previously activated mobj is relinquished using 74 * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in 75 * the inactive list and inactive_refs is 1 76 */ 77 struct mobj_ffa { 78 struct mobj mobj; 79 SLIST_ENTRY(mobj_ffa) link; 80 uint64_t cookie; 81 tee_mm_entry_t *mm; 82 struct refcount mapcount; 83 unsigned int inactive_refs; 84 uint16_t page_offset; 85 #ifdef CFG_CORE_SEL1_SPMC 86 bool registered_by_cookie; 87 #endif 88 paddr_t pages[]; 89 }; 90 91 SLIST_HEAD(mobj_ffa_head, mobj_ffa); 92 93 #ifdef CFG_CORE_SEL1_SPMC 94 #define NUM_SHMS 64 95 static bitstr_t bit_decl(shm_bits, NUM_SHMS); 96 #endif 97 98 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head); 99 static struct mobj_ffa_head shm_inactive_head = 100 SLIST_HEAD_INITIALIZER(shm_inactive_head); 101 102 static unsigned int shm_lock = SPINLOCK_UNLOCK; 103 104 static const struct mobj_ops mobj_ffa_ops; 105 106 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj) 107 { 108 assert(mobj->ops == &mobj_ffa_ops); 109 return container_of(mobj, struct mobj_ffa, mobj); 110 } 111 112 static size_t shm_size(size_t num_pages) 113 { 114 size_t s = 0; 115 116 if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s)) 117 return 0; 118 if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s)) 119 return 0; 120 return s; 121 } 122 123 static struct mobj_ffa *ffa_new(unsigned int num_pages) 124 { 125 struct mobj_ffa *mf = NULL; 126 size_t s = 0; 127 128 if (!num_pages) 129 return NULL; 130 131 s = shm_size(num_pages); 132 if (!s) 133 return NULL; 134 mf = calloc(1, s); 135 if (!mf) 136 return NULL; 137 138 mf->mobj.ops = &mobj_ffa_ops; 139 mf->mobj.size = num_pages * SMALL_PAGE_SIZE; 140 mf->mobj.phys_granule = SMALL_PAGE_SIZE; 141 refcount_set(&mf->mobj.refc, 0); 142 mf->inactive_refs = 0; 143 144 return mf; 145 } 146 147 #ifdef CFG_CORE_SEL1_SPMC 148 struct mobj_ffa *mobj_ffa_sel1_spmc_new(uint64_t cookie, 149 unsigned int num_pages) 150 { 151 struct mobj_ffa *mf = NULL; 152 uint32_t exceptions = 0; 153 int i = 0; 154 155 if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) { 156 if (!(cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT)) 157 return NULL; 158 if (virt_add_cookie_to_current_guest(cookie)) 159 return NULL; 160 } 161 162 mf = ffa_new(num_pages); 163 if (!mf) { 164 if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) 165 virt_remove_cookie(cookie); 166 return NULL; 167 } 168 169 if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) { 170 mf->cookie = cookie; 171 return mf; 172 } 173 174 exceptions = cpu_spin_lock_xsave(&shm_lock); 175 bit_ffc(shm_bits, NUM_SHMS, &i); 176 if (i != -1) { 177 bit_set(shm_bits, i); 178 mf->cookie = i; 179 mf->cookie |= FFA_MEMORY_HANDLE_NON_SECURE_BIT; 180 /* 181 * Encode the partition ID into the handle so we know which 182 * partition to switch to when reclaiming a handle. 183 */ 184 mf->cookie |= SHIFT_U64(virt_get_current_guest_id(), 185 FFA_MEMORY_HANDLE_PRTN_SHIFT); 186 } 187 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 188 189 if (i == -1) { 190 free(mf); 191 return NULL; 192 } 193 194 return mf; 195 } 196 #endif /*CFG_CORE_SEL1_SPMC*/ 197 198 static size_t get_page_count(struct mobj_ffa *mf) 199 { 200 return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; 201 } 202 203 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie) 204 { 205 return mf->cookie == cookie; 206 } 207 208 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr) 209 { 210 return mf == (void *)(vaddr_t)ptr; 211 } 212 213 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head, 214 bool (*cmp_func)(struct mobj_ffa *mf, 215 uint64_t val), 216 uint64_t val) 217 { 218 struct mobj_ffa *mf = SLIST_FIRST(head); 219 struct mobj_ffa *p = NULL; 220 221 if (!mf) 222 return NULL; 223 224 if (cmp_func(mf, val)) { 225 SLIST_REMOVE_HEAD(head, link); 226 return mf; 227 } 228 229 while (true) { 230 p = SLIST_NEXT(mf, link); 231 if (!p) 232 return NULL; 233 if (cmp_func(p, val)) { 234 SLIST_REMOVE_AFTER(mf, link); 235 return p; 236 } 237 mf = p; 238 } 239 } 240 241 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head, 242 bool (*cmp_func)(struct mobj_ffa *mf, 243 uint64_t val), 244 uint64_t val) 245 { 246 struct mobj_ffa *mf = NULL; 247 248 SLIST_FOREACH(mf, head, link) 249 if (cmp_func(mf, val)) 250 return mf; 251 252 return NULL; 253 } 254 255 #if defined(CFG_CORE_SEL1_SPMC) 256 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf) 257 { 258 259 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || 260 !(mf->cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT)) { 261 uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT; 262 uint32_t exceptions = 0; 263 int64_t i = 0; 264 265 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 266 mask |= SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK, 267 FFA_MEMORY_HANDLE_PRTN_SHIFT); 268 i = mf->cookie & ~mask; 269 assert(i >= 0 && i < NUM_SHMS); 270 271 exceptions = cpu_spin_lock_xsave(&shm_lock); 272 assert(bit_test(shm_bits, i)); 273 bit_clear(shm_bits, i); 274 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 275 } 276 277 assert(!mf->mm); 278 free(mf); 279 } 280 #else /* !defined(CFG_CORE_SEL1_SPMC) */ 281 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages) 282 { 283 struct mobj_ffa *mf = NULL; 284 285 assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID); 286 mf = ffa_new(num_pages); 287 if (mf) 288 mf->cookie = cookie; 289 return mf; 290 } 291 292 void mobj_ffa_spmc_delete(struct mobj_ffa *mf) 293 { 294 free(mf); 295 } 296 #endif /* !defined(CFG_CORE_SEL1_SPMC) */ 297 298 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx, 299 paddr_t pa, unsigned int num_pages) 300 { 301 unsigned int n = 0; 302 size_t tot_page_count = get_page_count(mf); 303 304 if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count) 305 return TEE_ERROR_BAD_PARAMETERS; 306 307 if (!IS_ENABLED(CFG_CORE_SEL2_SPMC) && 308 !core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE)) 309 return TEE_ERROR_BAD_PARAMETERS; 310 311 for (n = 0; n < num_pages; n++) 312 mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE; 313 314 (*idx) += n; 315 return TEE_SUCCESS; 316 } 317 318 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf) 319 { 320 return mf->cookie; 321 } 322 323 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf) 324 { 325 uint32_t exceptions = 0; 326 327 exceptions = cpu_spin_lock_xsave(&shm_lock); 328 assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf)); 329 assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie)); 330 assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie)); 331 SLIST_INSERT_HEAD(&shm_inactive_head, mf, link); 332 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 333 334 return mf->cookie; 335 } 336 337 static void unmap_helper(struct mobj_ffa *mf) 338 { 339 if (mf->mm) { 340 core_mmu_unmap_pages(tee_mm_get_smem(mf->mm), 341 get_page_count(mf)); 342 tee_mm_free(mf->mm); 343 mf->mm = NULL; 344 } 345 } 346 347 #ifdef CFG_CORE_SEL1_SPMC 348 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie) 349 { 350 TEE_Result res = TEE_SUCCESS; 351 struct mobj_ffa *mf = NULL; 352 uint32_t exceptions = 0; 353 354 exceptions = cpu_spin_lock_xsave(&shm_lock); 355 mf = find_in_list(&shm_head, cmp_cookie, cookie); 356 /* 357 * If the mobj is found here it's still active and cannot be 358 * reclaimed. 359 */ 360 if (mf) { 361 DMSG("cookie %#"PRIx64" busy refc %u", 362 cookie, refcount_val(&mf->mobj.refc)); 363 res = TEE_ERROR_BUSY; 364 goto out; 365 } 366 367 mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie); 368 if (!mf) { 369 res = TEE_ERROR_ITEM_NOT_FOUND; 370 goto out; 371 } 372 /* 373 * If the mobj has been registered via mobj_ffa_get_by_cookie() 374 * but not unregistered yet with mobj_ffa_unregister_by_cookie(). 375 */ 376 if (mf->inactive_refs) { 377 DMSG("cookie %#"PRIx64" busy inactive_refs %u", 378 cookie, mf->inactive_refs); 379 res = TEE_ERROR_BUSY; 380 goto out; 381 } 382 383 if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf)) 384 panic(); 385 res = TEE_SUCCESS; 386 out: 387 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 388 if (!res) { 389 mobj_ffa_sel1_spmc_delete(mf); 390 virt_remove_cookie(cookie); 391 } 392 return res; 393 } 394 #endif /*CFG_CORE_SEL1_SPMC*/ 395 396 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie) 397 { 398 TEE_Result res = TEE_SUCCESS; 399 struct mobj_ffa *mf = NULL; 400 uint32_t exceptions = 0; 401 402 assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID); 403 exceptions = cpu_spin_lock_xsave(&shm_lock); 404 mf = find_in_list(&shm_head, cmp_cookie, cookie); 405 /* 406 * If the mobj is found here it's still active and cannot be 407 * unregistered. 408 */ 409 if (mf) { 410 EMSG("cookie %#"PRIx64" busy refc %u:%u", 411 cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs); 412 res = TEE_ERROR_BUSY; 413 goto out; 414 } 415 mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie); 416 /* 417 * If the mobj isn't found or if it already has been unregistered. 418 */ 419 if (!mf) { 420 EMSG("cookie %#"PRIx64" not found", cookie); 421 res = TEE_ERROR_ITEM_NOT_FOUND; 422 goto out; 423 } 424 #if defined(CFG_CORE_SEL1_SPMC) 425 if (!mf->registered_by_cookie) { 426 /* 427 * This is expected behaviour if the normal world has 428 * registered the memory but OP-TEE has not yet used the 429 * corresponding cookie with mobj_ffa_get_by_cookie(). It 430 * can be non-trivial for the normal world to predict if 431 * the cookie really has been used or not. So even if we 432 * return it as an error it will be ignored by 433 * handle_unregister_shm(). 434 */ 435 EMSG("cookie %#"PRIx64" not registered refs %u:%u", 436 cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs); 437 res = TEE_ERROR_ITEM_NOT_FOUND; 438 goto out; 439 } 440 assert(mf->inactive_refs); 441 mf->inactive_refs--; 442 mf->registered_by_cookie = false; 443 #else 444 if (mf->inactive_refs) { 445 EMSG("cookie %#"PRIx64" busy refc %u:%u", 446 cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs); 447 res = TEE_ERROR_BUSY; 448 goto out; 449 } 450 mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie); 451 mobj_ffa_spmc_delete(mf); 452 thread_spmc_relinquish(cookie); 453 #endif 454 res = TEE_SUCCESS; 455 456 out: 457 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 458 return res; 459 } 460 461 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie, 462 unsigned int internal_offs) 463 { 464 struct mobj_ffa *mf = NULL; 465 uint32_t exceptions = 0; 466 467 if (internal_offs >= SMALL_PAGE_SIZE) 468 return NULL; 469 exceptions = cpu_spin_lock_xsave(&shm_lock); 470 mf = find_in_list(&shm_head, cmp_cookie, cookie); 471 if (mf) { 472 if (mf->page_offset == internal_offs) { 473 if (!refcount_inc(&mf->mobj.refc)) { 474 /* 475 * If refcount is 0 some other thread has 476 * called mobj_put() on this reached 0 and 477 * before ffa_inactivate() got the lock we 478 * found it. Let's reinitialize it. 479 */ 480 refcount_set(&mf->mobj.refc, 1); 481 mf->inactive_refs++; 482 } 483 DMSG("cookie %#"PRIx64" active: refc %u:%u", 484 cookie, refcount_val(&mf->mobj.refc), 485 mf->inactive_refs); 486 } else { 487 EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x", 488 cookie, mf->page_offset, internal_offs); 489 mf = NULL; 490 } 491 } else { 492 mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie); 493 #if !defined(CFG_CORE_SEL1_SPMC) 494 /* Try to retrieve it from the SPM at S-EL2 */ 495 if (mf) { 496 DMSG("cookie %#"PRIx64" resurrecting", cookie); 497 } else { 498 DMSG("Populating mobj from rx buffer, cookie %#"PRIx64, 499 cookie); 500 mf = thread_spmc_populate_mobj_from_rx(cookie); 501 } 502 #endif 503 if (mf) { 504 #if defined(CFG_CORE_SEL1_SPMC) 505 if (!mf->registered_by_cookie) { 506 mf->inactive_refs++; 507 mf->registered_by_cookie = true; 508 } 509 #endif 510 assert(refcount_val(&mf->mobj.refc) == 0); 511 refcount_set(&mf->mobj.refc, 1); 512 refcount_set(&mf->mapcount, 0); 513 mf->inactive_refs++; 514 515 /* 516 * mf->page_offset is offset into the first page. 517 * This offset is assigned from the internal_offs 518 * parameter to this function. 519 * 520 * While a mobj_ffa is active (ref_count > 0) this 521 * will not change, but when being pushed to the 522 * inactive list it can be changed again. 523 * 524 * So below we're backing out the old 525 * mf->page_offset and then assigning a new from 526 * internal_offset. 527 */ 528 mf->mobj.size += mf->page_offset; 529 assert(!(mf->mobj.size & SMALL_PAGE_MASK)); 530 mf->mobj.size -= internal_offs; 531 mf->page_offset = internal_offs; 532 533 SLIST_INSERT_HEAD(&shm_head, mf, link); 534 } 535 } 536 537 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 538 539 if (!mf) { 540 EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x", 541 cookie, internal_offs); 542 return NULL; 543 } 544 return &mf->mobj; 545 } 546 547 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset, 548 size_t granule, paddr_t *pa) 549 { 550 struct mobj_ffa *mf = to_mobj_ffa(mobj); 551 size_t full_offset = 0; 552 paddr_t p = 0; 553 554 if (!pa) 555 return TEE_ERROR_GENERIC; 556 557 if (offset >= mobj->size) 558 return TEE_ERROR_GENERIC; 559 560 full_offset = offset + mf->page_offset; 561 switch (granule) { 562 case 0: 563 p = mf->pages[full_offset / SMALL_PAGE_SIZE] + 564 (full_offset & SMALL_PAGE_MASK); 565 break; 566 case SMALL_PAGE_SIZE: 567 p = mf->pages[full_offset / SMALL_PAGE_SIZE]; 568 break; 569 default: 570 return TEE_ERROR_GENERIC; 571 } 572 *pa = p; 573 574 return TEE_SUCCESS; 575 } 576 577 static size_t ffa_get_phys_offs(struct mobj *mobj, 578 size_t granule __maybe_unused) 579 { 580 assert(granule >= mobj->phys_granule); 581 582 return to_mobj_ffa(mobj)->page_offset; 583 } 584 585 static void *ffa_get_va(struct mobj *mobj, size_t offset, size_t len) 586 { 587 struct mobj_ffa *mf = to_mobj_ffa(mobj); 588 589 if (!mf->mm || !mobj_check_offset_and_len(mobj, offset, len)) 590 return NULL; 591 592 return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset); 593 } 594 595 static void ffa_inactivate(struct mobj *mobj) 596 { 597 struct mobj_ffa *mf = to_mobj_ffa(mobj); 598 uint32_t exceptions = 0; 599 600 exceptions = cpu_spin_lock_xsave(&shm_lock); 601 /* 602 * If refcount isn't 0 some other thread has found this mobj in 603 * shm_head after the mobj_put() that put us here and before we got 604 * the lock. 605 */ 606 if (refcount_val(&mobj->refc)) { 607 DMSG("cookie %#"PRIx64" was resurrected", mf->cookie); 608 goto out; 609 } 610 611 /* 612 * pop_from_list() can fail to find the mobj if we had just 613 * decreased the refcount to 0 in mobj_put() and was going to 614 * acquire the shm_lock but another thread found this mobj and 615 * reinitialized the refcount to 1. Then before we got cpu time the 616 * other thread called mobj_put() and deactivated the mobj again. 617 * 618 * However, we still have the inactive count that guarantees 619 * that the mobj can't be freed until it reaches 0. 620 * At this point the mobj is in the inactive list. 621 */ 622 if (pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf)) { 623 unmap_helper(mf); 624 SLIST_INSERT_HEAD(&shm_inactive_head, mf, link); 625 } 626 out: 627 if (!mf->inactive_refs) 628 panic(); 629 mf->inactive_refs--; 630 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 631 } 632 633 static TEE_Result ffa_get_mem_type(struct mobj *mobj __unused, uint32_t *mt) 634 { 635 if (!mt) 636 return TEE_ERROR_GENERIC; 637 638 *mt = TEE_MATTR_MEM_TYPE_CACHED; 639 640 return TEE_SUCCESS; 641 } 642 643 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr) 644 { 645 assert(mobj->ops == &mobj_ffa_ops); 646 647 return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM; 648 } 649 650 static uint64_t ffa_get_cookie(struct mobj *mobj) 651 { 652 return to_mobj_ffa(mobj)->cookie; 653 } 654 655 static TEE_Result ffa_inc_map(struct mobj *mobj) 656 { 657 TEE_Result res = TEE_SUCCESS; 658 struct mobj_ffa *mf = to_mobj_ffa(mobj); 659 uint32_t exceptions = 0; 660 size_t sz = 0; 661 662 while (true) { 663 if (refcount_inc(&mf->mapcount)) 664 return TEE_SUCCESS; 665 666 exceptions = cpu_spin_lock_xsave(&shm_lock); 667 668 if (!refcount_val(&mf->mapcount)) 669 break; /* continue to reinitialize */ 670 /* 671 * If another thread beat us to initialize mapcount, 672 * restart to make sure we still increase it. 673 */ 674 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 675 } 676 677 /* 678 * If we have beated another thread calling ffa_dec_map() 679 * to get the lock we need only to reinitialize mapcount to 1. 680 */ 681 if (!mf->mm) { 682 sz = ROUNDUP(mobj->size + mf->page_offset, SMALL_PAGE_SIZE); 683 mf->mm = tee_mm_alloc(&tee_mm_shm, sz); 684 if (!mf->mm) { 685 res = TEE_ERROR_OUT_OF_MEMORY; 686 goto out; 687 } 688 689 res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages, 690 sz / SMALL_PAGE_SIZE, 691 MEM_AREA_NSEC_SHM); 692 if (res) { 693 tee_mm_free(mf->mm); 694 mf->mm = NULL; 695 goto out; 696 } 697 } 698 699 refcount_set(&mf->mapcount, 1); 700 out: 701 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 702 703 return res; 704 } 705 706 static TEE_Result ffa_dec_map(struct mobj *mobj) 707 { 708 struct mobj_ffa *mf = to_mobj_ffa(mobj); 709 uint32_t exceptions = 0; 710 711 if (!refcount_dec(&mf->mapcount)) 712 return TEE_SUCCESS; 713 714 exceptions = cpu_spin_lock_xsave(&shm_lock); 715 if (!refcount_val(&mf->mapcount)) 716 unmap_helper(mf); 717 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 718 719 return TEE_SUCCESS; 720 } 721 722 static TEE_Result mapped_shm_init(void) 723 { 724 vaddr_t pool_start = 0; 725 vaddr_t pool_end = 0; 726 727 core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end); 728 if (!pool_start || !pool_end) 729 panic("Can't find region for shmem pool"); 730 731 if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end - pool_start, 732 SMALL_PAGE_SHIFT, 733 TEE_MM_POOL_NO_FLAGS)) 734 panic("Could not create shmem pool"); 735 736 DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA, 737 pool_start, pool_end); 738 return TEE_SUCCESS; 739 } 740 741 static const struct mobj_ops mobj_ffa_ops = { 742 .get_pa = ffa_get_pa, 743 .get_phys_offs = ffa_get_phys_offs, 744 .get_va = ffa_get_va, 745 .get_mem_type = ffa_get_mem_type, 746 .matches = ffa_matches, 747 .free = ffa_inactivate, 748 .get_cookie = ffa_get_cookie, 749 .inc_map = ffa_inc_map, 750 .dec_map = ffa_dec_map, 751 }; 752 753 preinit(mapped_shm_init); 754