1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2020, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <bitstring.h> 8 #include <initcall.h> 9 #include <keep.h> 10 #include <kernel/refcount.h> 11 #include <kernel/spinlock.h> 12 #include <mm/mobj.h> 13 #include <sys/queue.h> 14 15 struct mobj_ffa { 16 struct mobj mobj; 17 SLIST_ENTRY(mobj_ffa) link; 18 uint64_t cookie; 19 tee_mm_entry_t *mm; 20 struct refcount mapcount; 21 uint16_t page_offset; 22 #ifdef CFG_CORE_SEL1_SPMC 23 bool registered_by_cookie; 24 bool unregistered_by_cookie; 25 #endif 26 paddr_t pages[]; 27 }; 28 29 SLIST_HEAD(mobj_ffa_head, mobj_ffa); 30 31 #ifdef CFG_CORE_SEL1_SPMC 32 #define NUM_SHMS 64 33 static bitstr_t bit_decl(shm_bits, NUM_SHMS); 34 #endif 35 36 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head); 37 static struct mobj_ffa_head shm_inactive_head = 38 SLIST_HEAD_INITIALIZER(shm_inactive_head); 39 40 static unsigned int shm_lock = SPINLOCK_UNLOCK; 41 42 const struct mobj_ops mobj_ffa_ops; 43 44 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj) 45 { 46 assert(mobj->ops == &mobj_ffa_ops); 47 return container_of(mobj, struct mobj_ffa, mobj); 48 } 49 50 static size_t shm_size(size_t num_pages) 51 { 52 size_t s = 0; 53 54 if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s)) 55 return 0; 56 if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s)) 57 return 0; 58 return s; 59 } 60 61 static struct mobj_ffa *ffa_new(unsigned int num_pages) 62 { 63 struct mobj_ffa *mf = NULL; 64 size_t s = 0; 65 66 if (!num_pages) 67 return NULL; 68 69 s = shm_size(num_pages); 70 if (!s) 71 return NULL; 72 mf = calloc(1, s); 73 if (!mf) 74 return NULL; 75 76 mf->mobj.ops = &mobj_ffa_ops; 77 mf->mobj.size = num_pages * SMALL_PAGE_SIZE; 78 mf->mobj.phys_granule = SMALL_PAGE_SIZE; 79 refcount_set(&mf->mobj.refc, 0); 80 81 return mf; 82 } 83 84 #ifdef CFG_CORE_SEL1_SPMC 85 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages) 86 { 87 struct mobj_ffa *mf = NULL; 88 uint32_t exceptions = 0; 89 int i = 0; 90 91 mf = ffa_new(num_pages); 92 if (!mf) 93 return NULL; 94 95 exceptions = cpu_spin_lock_xsave(&shm_lock); 96 bit_ffc(shm_bits, NUM_SHMS, &i); 97 if (i != -1) { 98 bit_set(shm_bits, i); 99 /* 100 * Setting bit 44 to use one of the upper 32 bits too for 101 * testing. 102 */ 103 mf->cookie = i | BIT64(44); 104 } 105 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 106 107 if (i == -1) { 108 free(mf); 109 return NULL; 110 } 111 112 return mf; 113 } 114 #endif /*CFG_CORE_SEL1_SPMC*/ 115 116 static size_t get_page_count(struct mobj_ffa *mf) 117 { 118 return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; 119 } 120 121 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie) 122 { 123 return mf->cookie == cookie; 124 } 125 126 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr) 127 { 128 return mf == (void *)(vaddr_t)ptr; 129 } 130 131 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head, 132 bool (*cmp_func)(struct mobj_ffa *mf, 133 uint64_t val), 134 uint64_t val) 135 { 136 struct mobj_ffa *mf = SLIST_FIRST(head); 137 struct mobj_ffa *p = NULL; 138 139 if (!mf) 140 return NULL; 141 142 if (cmp_func(mf, val)) { 143 SLIST_REMOVE_HEAD(head, link); 144 return mf; 145 } 146 147 while (true) { 148 p = SLIST_NEXT(mf, link); 149 if (!p) 150 return NULL; 151 if (cmp_func(p, val)) { 152 SLIST_REMOVE_AFTER(mf, link); 153 return p; 154 } 155 mf = p; 156 } 157 } 158 159 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head, 160 bool (*cmp_func)(struct mobj_ffa *mf, 161 uint64_t val), 162 uint64_t val) 163 { 164 struct mobj_ffa *mf = NULL; 165 166 SLIST_FOREACH(mf, head, link) 167 if (cmp_func(mf, val)) 168 return mf; 169 170 return NULL; 171 } 172 173 #ifdef CFG_CORE_SEL1_SPMC 174 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf) 175 { 176 int i = mf->cookie & ~BIT64(44); 177 uint32_t exceptions = 0; 178 179 assert(i >= 0 && i < NUM_SHMS); 180 181 exceptions = cpu_spin_lock_xsave(&shm_lock); 182 assert(bit_test(shm_bits, i)); 183 bit_clear(shm_bits, i); 184 assert(!mf->mm); 185 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 186 187 free(mf); 188 } 189 #endif /*CFG_CORE_SEL1_SPMC*/ 190 191 #ifdef CFG_CORE_SEL2_SPMC 192 struct mobj_ffa *mobj_ffa_sel2_spmc_new(uint64_t cookie, 193 unsigned int num_pages) 194 { 195 struct mobj_ffa *mf = NULL; 196 197 assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID); 198 mf = ffa_new(num_pages); 199 if (mf) 200 mf->cookie = cookie; 201 return mf; 202 } 203 204 void mobj_ffa_sel2_spmc_delete(struct mobj_ffa *mf) 205 { 206 free(mf); 207 } 208 #endif /*CFG_CORE_SEL2_SPMC*/ 209 210 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx, 211 paddr_t pa, unsigned int num_pages) 212 { 213 unsigned int n = 0; 214 size_t tot_page_count = get_page_count(mf); 215 216 if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count) 217 return TEE_ERROR_BAD_PARAMETERS; 218 219 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE)) 220 return TEE_ERROR_BAD_PARAMETERS; 221 222 for (n = 0; n < num_pages; n++) 223 mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE; 224 225 (*idx) += n; 226 return TEE_SUCCESS; 227 } 228 229 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf) 230 { 231 return mf->cookie; 232 } 233 234 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf) 235 { 236 uint32_t exceptions = 0; 237 238 exceptions = cpu_spin_lock_xsave(&shm_lock); 239 assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf)); 240 assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie)); 241 assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie)); 242 SLIST_INSERT_HEAD(&shm_inactive_head, mf, link); 243 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 244 245 return mf->cookie; 246 } 247 248 static void unmap_helper(struct mobj_ffa *mf) 249 { 250 if (mf->mm) { 251 core_mmu_unmap_pages(tee_mm_get_smem(mf->mm), 252 get_page_count(mf)); 253 tee_mm_free(mf->mm); 254 mf->mm = NULL; 255 } 256 } 257 258 #ifdef CFG_CORE_SEL1_SPMC 259 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie) 260 { 261 TEE_Result res = TEE_SUCCESS; 262 struct mobj_ffa *mf = NULL; 263 uint32_t exceptions = 0; 264 265 exceptions = cpu_spin_lock_xsave(&shm_lock); 266 mf = find_in_list(&shm_head, cmp_cookie, cookie); 267 /* 268 * If the mobj is found here it's still active and cannot be 269 * reclaimed. 270 */ 271 if (mf) { 272 DMSG("cookie %#"PRIx64" busy refc %u", 273 cookie, refcount_val(&mf->mobj.refc)); 274 res = TEE_ERROR_BUSY; 275 goto out; 276 } 277 278 mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie); 279 if (!mf) { 280 res = TEE_ERROR_ITEM_NOT_FOUND; 281 goto out; 282 } 283 /* 284 * If the mobj has been registered via mobj_ffa_get_by_cookie() 285 * but not unregistered yet with mobj_ffa_unregister_by_cookie(). 286 */ 287 if (mf->registered_by_cookie && !mf->unregistered_by_cookie) { 288 DMSG("cookie %#"PRIx64" busy", cookie); 289 res = TEE_ERROR_BUSY; 290 goto out; 291 } 292 293 if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf)) 294 panic(); 295 res = TEE_SUCCESS; 296 out: 297 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 298 if (!res) 299 mobj_ffa_sel1_spmc_delete(mf); 300 return res; 301 } 302 #endif /*CFG_CORE_SEL1_SPMC*/ 303 304 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie) 305 { 306 TEE_Result res = TEE_SUCCESS; 307 struct mobj_ffa *mf = NULL; 308 uint32_t exceptions = 0; 309 310 assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID); 311 exceptions = cpu_spin_lock_xsave(&shm_lock); 312 mf = find_in_list(&shm_head, cmp_cookie, cookie); 313 /* 314 * If the mobj is found here it's still active and cannot be 315 * unregistered. 316 */ 317 if (mf) { 318 DMSG("cookie %#"PRIx64" busy refc %u", 319 cookie, refcount_val(&mf->mobj.refc)); 320 res = TEE_ERROR_BUSY; 321 goto out; 322 } 323 mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie); 324 /* 325 * If the mobj isn't found or if it already has been unregistered. 326 */ 327 #ifdef CFG_CORE_SEL2_SPMC 328 if (!mf) { 329 #else 330 if (!mf || mf->unregistered_by_cookie) { 331 #endif 332 res = TEE_ERROR_ITEM_NOT_FOUND; 333 goto out; 334 } 335 336 #ifdef CFG_CORE_SEL2_SPMC 337 mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie); 338 mobj_ffa_sel2_spmc_delete(mf); 339 thread_spmc_relinquish(cookie); 340 #else 341 mf->unregistered_by_cookie = true; 342 #endif 343 res = TEE_SUCCESS; 344 345 out: 346 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 347 return res; 348 } 349 350 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie, 351 unsigned int internal_offs) 352 { 353 struct mobj_ffa *mf = NULL; 354 uint32_t exceptions = 0; 355 356 if (internal_offs >= SMALL_PAGE_SIZE) 357 return NULL; 358 exceptions = cpu_spin_lock_xsave(&shm_lock); 359 mf = find_in_list(&shm_head, cmp_cookie, cookie); 360 if (mf) { 361 if (mf->page_offset == internal_offs) { 362 if (!refcount_inc(&mf->mobj.refc)) { 363 /* 364 * If refcount is 0 some other thread has 365 * called mobj_put() on this reached 0 and 366 * before ffa_inactivate() got the lock we 367 * found it. Let's reinitialize it. 368 */ 369 refcount_set(&mf->mobj.refc, 1); 370 } 371 DMSG("cookie %#"PRIx64" active: refc %d", 372 cookie, refcount_val(&mf->mobj.refc)); 373 } else { 374 EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x", 375 cookie, mf->page_offset, internal_offs); 376 mf = NULL; 377 } 378 } else { 379 mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie); 380 #if defined(CFG_CORE_SEL2_SPMC) 381 /* Try to retrieve it from the SPM at S-EL2 */ 382 if (mf) { 383 DMSG("cookie %#"PRIx64" resurrecting", cookie); 384 } else { 385 EMSG("Populating mobj from rx buffer, cookie %#"PRIx64, 386 cookie); 387 mf = thread_spmc_populate_mobj_from_rx(cookie); 388 } 389 #endif 390 if (mf) { 391 #if defined(CFG_CORE_SEL1_SPMC) 392 mf->unregistered_by_cookie = false; 393 mf->registered_by_cookie = true; 394 #endif 395 assert(refcount_val(&mf->mobj.refc) == 0); 396 refcount_set(&mf->mobj.refc, 1); 397 refcount_set(&mf->mapcount, 0); 398 399 /* 400 * mf->page_offset is offset into the first page. 401 * This offset is assigned from the internal_offs 402 * parameter to this function. 403 * 404 * While a mobj_ffa is active (ref_count > 0) this 405 * will not change, but when being pushed to the 406 * inactive list it can be changed again. 407 * 408 * So below we're backing out the old 409 * mf->page_offset and then assigning a new from 410 * internal_offset. 411 */ 412 mf->mobj.size += mf->page_offset; 413 assert(!(mf->mobj.size & SMALL_PAGE_MASK)); 414 mf->mobj.size -= internal_offs; 415 mf->page_offset = internal_offs; 416 417 SLIST_INSERT_HEAD(&shm_head, mf, link); 418 } 419 } 420 421 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 422 423 if (!mf) { 424 EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x", 425 cookie, internal_offs); 426 return NULL; 427 } 428 return &mf->mobj; 429 } 430 431 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset, 432 size_t granule, paddr_t *pa) 433 { 434 struct mobj_ffa *mf = to_mobj_ffa(mobj); 435 size_t full_offset = 0; 436 paddr_t p = 0; 437 438 if (!pa) 439 return TEE_ERROR_GENERIC; 440 441 if (offset >= mobj->size) 442 return TEE_ERROR_GENERIC; 443 444 full_offset = offset + mf->page_offset; 445 switch (granule) { 446 case 0: 447 p = mf->pages[full_offset / SMALL_PAGE_SIZE] + 448 (full_offset & SMALL_PAGE_MASK); 449 break; 450 case SMALL_PAGE_SIZE: 451 p = mf->pages[full_offset / SMALL_PAGE_SIZE]; 452 break; 453 default: 454 return TEE_ERROR_GENERIC; 455 } 456 *pa = p; 457 458 return TEE_SUCCESS; 459 } 460 DECLARE_KEEP_PAGER(ffa_get_pa); 461 462 static size_t ffa_get_phys_offs(struct mobj *mobj, 463 size_t granule __maybe_unused) 464 { 465 assert(granule >= mobj->phys_granule); 466 467 return to_mobj_ffa(mobj)->page_offset; 468 } 469 470 static void *ffa_get_va(struct mobj *mobj, size_t offset) 471 { 472 struct mobj_ffa *mf = to_mobj_ffa(mobj); 473 474 if (!mf->mm || offset >= mobj->size) 475 return NULL; 476 477 return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset); 478 } 479 480 static void ffa_inactivate(struct mobj *mobj) 481 { 482 struct mobj_ffa *mf = to_mobj_ffa(mobj); 483 uint32_t exceptions = 0; 484 485 exceptions = cpu_spin_lock_xsave(&shm_lock); 486 /* 487 * If refcount isn't 0 some other thread has found this mobj in 488 * shm_head after the mobj_put() that put us here and before we got 489 * the lock. 490 */ 491 if (refcount_val(&mobj->refc)) { 492 DMSG("cookie %#"PRIx64" was resurrected", mf->cookie); 493 goto out; 494 } 495 496 DMSG("cookie %#"PRIx64, mf->cookie); 497 if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf)) 498 panic(); 499 unmap_helper(mf); 500 SLIST_INSERT_HEAD(&shm_inactive_head, mf, link); 501 out: 502 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 503 } 504 505 static TEE_Result ffa_get_cattr(struct mobj *mobj __unused, uint32_t *cattr) 506 { 507 if (!cattr) 508 return TEE_ERROR_GENERIC; 509 510 *cattr = TEE_MATTR_CACHE_CACHED; 511 512 return TEE_SUCCESS; 513 } 514 515 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr) 516 { 517 assert(mobj->ops == &mobj_ffa_ops); 518 519 return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM; 520 } 521 522 static uint64_t ffa_get_cookie(struct mobj *mobj) 523 { 524 return to_mobj_ffa(mobj)->cookie; 525 } 526 527 static TEE_Result ffa_inc_map(struct mobj *mobj) 528 { 529 TEE_Result res = TEE_SUCCESS; 530 uint32_t exceptions = 0; 531 struct mobj_ffa *mf = to_mobj_ffa(mobj); 532 533 if (refcount_inc(&mf->mapcount)) 534 return TEE_SUCCESS; 535 536 exceptions = cpu_spin_lock_xsave(&shm_lock); 537 538 if (refcount_val(&mf->mapcount)) 539 goto out; 540 541 mf->mm = tee_mm_alloc(&tee_mm_shm, mf->mobj.size); 542 if (!mf->mm) { 543 res = TEE_ERROR_OUT_OF_MEMORY; 544 goto out; 545 } 546 547 res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages, 548 get_page_count(mf), MEM_AREA_NSEC_SHM); 549 if (res) { 550 tee_mm_free(mf->mm); 551 mf->mm = NULL; 552 goto out; 553 } 554 555 refcount_set(&mf->mapcount, 1); 556 out: 557 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 558 559 return res; 560 } 561 562 static TEE_Result ffa_dec_map(struct mobj *mobj) 563 { 564 struct mobj_ffa *mf = to_mobj_ffa(mobj); 565 uint32_t exceptions = 0; 566 567 if (!refcount_dec(&mf->mapcount)) 568 return TEE_SUCCESS; 569 570 exceptions = cpu_spin_lock_xsave(&shm_lock); 571 unmap_helper(mf); 572 cpu_spin_unlock_xrestore(&shm_lock, exceptions); 573 574 return TEE_SUCCESS; 575 } 576 577 static TEE_Result mapped_shm_init(void) 578 { 579 vaddr_t pool_start = 0; 580 vaddr_t pool_end = 0; 581 582 core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end); 583 if (!pool_start || !pool_end) 584 panic("Can't find region for shmem pool"); 585 586 if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end, SMALL_PAGE_SHIFT, 587 TEE_MM_POOL_NO_FLAGS)) 588 panic("Could not create shmem pool"); 589 590 DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA, 591 pool_start, pool_end); 592 return TEE_SUCCESS; 593 } 594 595 /* 596 * Note: this variable is weak just to ease breaking its dependency chain 597 * when added to the unpaged area. 598 */ 599 const struct mobj_ops mobj_ffa_ops __weak __rodata_unpaged("mobj_ffa_ops") = { 600 .get_pa = ffa_get_pa, 601 .get_phys_offs = ffa_get_phys_offs, 602 .get_va = ffa_get_va, 603 .get_cattr = ffa_get_cattr, 604 .matches = ffa_matches, 605 .free = ffa_inactivate, 606 .get_cookie = ffa_get_cookie, 607 .inc_map = ffa_inc_map, 608 .dec_map = ffa_dec_map, 609 }; 610 611 preinit(mapped_shm_init); 612