// SPDX-License-Identifier: BSD-2-Clause /* * Copyright (c) 2016-2024, Linaro Limited */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct mutex shm_mu = MUTEX_INITIALIZER; static struct condvar shm_cv = CONDVAR_INITIALIZER; static size_t shm_release_waiters; /* * mobj_reg_shm implementation. Describes shared memory provided by normal world */ struct mobj_reg_shm { struct mobj mobj; SLIST_ENTRY(mobj_reg_shm) next; uint64_t cookie; tee_mm_entry_t *mm; paddr_t page_offset; struct refcount mapcount; bool guarded; bool releasing; bool release_frees; paddr_t pages[]; }; /* * struct mobj_protmem - describes protected memory lent by normal world */ struct mobj_protmem { struct mobj mobj; SLIST_ENTRY(mobj_protmem) next; uint64_t cookie; paddr_t pa; enum mobj_use_case use_case; bool releasing; bool release_frees; }; static size_t mobj_reg_shm_size(size_t nr_pages) { size_t s = 0; if (MUL_OVERFLOW(sizeof(paddr_t), nr_pages, &s)) return 0; if (ADD_OVERFLOW(sizeof(struct mobj_reg_shm), s, &s)) return 0; return s; } static SLIST_HEAD(reg_shm_head, mobj_reg_shm) reg_shm_list = SLIST_HEAD_INITIALIZER(reg_shm_head); static unsigned int reg_shm_slist_lock = SPINLOCK_UNLOCK; static unsigned int reg_shm_map_lock = SPINLOCK_UNLOCK; /* Access is serialized with reg_shm_slist_lock */ static SLIST_HEAD(protmem_head, mobj_protmem) protmem_list = SLIST_HEAD_INITIALIZER(protmem_head); static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj); static TEE_Result mobj_reg_shm_get_pa(struct mobj *mobj, size_t offst, size_t granule, paddr_t *pa) { struct mobj_reg_shm *mobj_reg_shm = to_mobj_reg_shm(mobj); size_t full_offset = 0; paddr_t p = 0; if (!pa) return TEE_ERROR_GENERIC; if (offst >= mobj->size) return TEE_ERROR_GENERIC; full_offset = offst + mobj_reg_shm->page_offset; switch (granule) { case 0: p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE] + (full_offset & SMALL_PAGE_MASK); break; case SMALL_PAGE_SIZE: p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE]; break; default: return TEE_ERROR_GENERIC; } *pa = p; return TEE_SUCCESS; } DECLARE_KEEP_PAGER(mobj_reg_shm_get_pa); static size_t mobj_reg_shm_get_phys_offs(struct mobj *mobj, size_t granule __maybe_unused) { assert(granule >= mobj->phys_granule); return to_mobj_reg_shm(mobj)->page_offset; } static void *mobj_reg_shm_get_va(struct mobj *mobj, size_t offst, size_t len) { struct mobj_reg_shm *mrs = to_mobj_reg_shm(mobj); if (!mrs->mm || !mobj_check_offset_and_len(mobj, offst, len)) return NULL; return (void *)(vaddr_t)(tee_mm_get_smem(mrs->mm) + offst + mrs->page_offset); } static void reg_shm_unmap_helper(struct mobj_reg_shm *r) { assert(r->mm); assert(r->mm->pool->shift == SMALL_PAGE_SHIFT); core_mmu_unmap_pages(tee_mm_get_smem(r->mm), r->mm->size); tee_mm_free(r->mm); r->mm = NULL; } static void reg_shm_free_helper(struct mobj_reg_shm *mobj_reg_shm) { uint32_t exceptions = cpu_spin_lock_xsave(®_shm_map_lock); if (mobj_reg_shm->mm) reg_shm_unmap_helper(mobj_reg_shm); cpu_spin_unlock_xrestore(®_shm_map_lock, exceptions); SLIST_REMOVE(®_shm_list, mobj_reg_shm, mobj_reg_shm, next); free(mobj_reg_shm); } static void mobj_reg_shm_free(struct mobj *mobj) { struct mobj_reg_shm *r = to_mobj_reg_shm(mobj); uint32_t exceptions = 0; if (r->guarded && !r->releasing) { /* * Guarded registersted shared memory can't be released * by cookie, only by mobj_put(). However, unguarded * registered shared memory can also be freed by mobj_put() * unless mobj_reg_shm_release_by_cookie() is waiting for * the mobj to be released. */ exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); reg_shm_free_helper(r); cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); } else { /* * We've reached the point where an unguarded reg shm can * be released by cookie. Notify eventual waiters. */ exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); r->release_frees = true; cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); mutex_lock(&shm_mu); if (shm_release_waiters) condvar_broadcast(&shm_cv); mutex_unlock(&shm_mu); } } static TEE_Result mobj_reg_shm_get_mem_type(struct mobj *mobj __unused, uint32_t *mt) { if (!mt) return TEE_ERROR_GENERIC; *mt = TEE_MATTR_MEM_TYPE_CACHED; return TEE_SUCCESS; } static TEE_Result mobj_reg_shm_inc_map(struct mobj *mobj) { TEE_Result res = TEE_SUCCESS; struct mobj_reg_shm *r = to_mobj_reg_shm(mobj); uint32_t exceptions = 0; size_t sz = 0; while (true) { if (refcount_inc(&r->mapcount)) return TEE_SUCCESS; exceptions = cpu_spin_lock_xsave(®_shm_map_lock); if (!refcount_val(&r->mapcount)) break; /* continue to reinitialize */ /* * If another thread beat us to initialize mapcount, * restart to make sure we still increase it. */ cpu_spin_unlock_xrestore(®_shm_map_lock, exceptions); } /* * If we have beaten another thread calling mobj_reg_shm_dec_map() * to get the lock we need only to reinitialize mapcount to 1. */ if (!r->mm) { sz = ROUNDUP(mobj->size + r->page_offset, SMALL_PAGE_SIZE); r->mm = tee_mm_alloc(&core_virt_shm_pool, sz); if (!r->mm) { res = TEE_ERROR_OUT_OF_MEMORY; goto out; } res = core_mmu_map_pages(tee_mm_get_smem(r->mm), r->pages, sz / SMALL_PAGE_SIZE, MEM_AREA_NSEC_SHM); if (res) { tee_mm_free(r->mm); r->mm = NULL; goto out; } } refcount_set(&r->mapcount, 1); out: cpu_spin_unlock_xrestore(®_shm_map_lock, exceptions); return res; } static TEE_Result mobj_reg_shm_dec_map(struct mobj *mobj) { struct mobj_reg_shm *r = to_mobj_reg_shm(mobj); uint32_t exceptions = 0; if (!refcount_dec(&r->mapcount)) return TEE_SUCCESS; exceptions = cpu_spin_lock_xsave(®_shm_map_lock); /* * Check that another thread hasn't been able to: * - increase the mapcount * - or, increase the mapcount, decrease it again, and set r->mm to * NULL * before we acquired the spinlock */ if (!refcount_val(&r->mapcount) && r->mm) reg_shm_unmap_helper(r); cpu_spin_unlock_xrestore(®_shm_map_lock, exceptions); return TEE_SUCCESS; } static bool mobj_reg_shm_matches(struct mobj *mobj, enum buf_is_attr attr); static uint64_t mobj_reg_shm_get_cookie(struct mobj *mobj) { return to_mobj_reg_shm(mobj)->cookie; } /* * When CFG_PREALLOC_RPC_CACHE is disabled, this variable is weak just * to ease breaking its dependency chain when added to the unpaged area. * When CFG_PREALLOC_RPC_CACHE is enabled, releasing RPC preallocated * shm mandates these resources to be unpaged. */ const struct mobj_ops mobj_reg_shm_ops __weak __relrodata_unpaged("mobj_reg_shm_ops") = { .get_pa = mobj_reg_shm_get_pa, .get_phys_offs = mobj_reg_shm_get_phys_offs, .get_va = mobj_reg_shm_get_va, .get_mem_type = mobj_reg_shm_get_mem_type, .matches = mobj_reg_shm_matches, .free = mobj_reg_shm_free, .get_cookie = mobj_reg_shm_get_cookie, .inc_map = mobj_reg_shm_inc_map, .dec_map = mobj_reg_shm_dec_map, }; #ifdef CFG_PREALLOC_RPC_CACHE /* Releasing RPC preallocated shm mandates few resources to be unpaged */ DECLARE_KEEP_PAGER(mobj_reg_shm_get_cookie); DECLARE_KEEP_PAGER(mobj_reg_shm_matches); DECLARE_KEEP_PAGER(mobj_reg_shm_free); #endif static bool mobj_reg_shm_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr) { assert(mobj->ops == &mobj_reg_shm_ops); return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM; } static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj) { assert(mobj->ops == &mobj_reg_shm_ops); return container_of(mobj, struct mobj_reg_shm, mobj); } static TEE_Result check_reg_shm_conflict(struct mobj_reg_shm *r, paddr_t pa, paddr_size_t size) { size_t n = 0; for (n = 0; n < r->mobj.size / SMALL_PAGE_SIZE; n++) if (core_is_buffer_intersect(pa, size, r->pages[n], SMALL_PAGE_SIZE)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } static TEE_Result check_protmem_conflict(struct mobj_reg_shm *r) { struct mobj_protmem *m = NULL; TEE_Result res = TEE_SUCCESS; SLIST_FOREACH(m, &protmem_list, next) { res = check_reg_shm_conflict(r, m->pa, m->mobj.size); if (res) break; } return res; } struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages, paddr_t page_offset, uint64_t cookie) { struct mobj_reg_shm *mobj_reg_shm = NULL; TEE_Result res = TEE_SUCCESS; size_t i = 0; uint32_t exceptions = 0; size_t s = 0; if (!num_pages || page_offset >= SMALL_PAGE_SIZE) return NULL; s = mobj_reg_shm_size(num_pages); if (!s) return NULL; mobj_reg_shm = calloc(1, s); if (!mobj_reg_shm) return NULL; mobj_reg_shm->mobj.ops = &mobj_reg_shm_ops; mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE - page_offset; mobj_reg_shm->mobj.phys_granule = SMALL_PAGE_SIZE; refcount_set(&mobj_reg_shm->mobj.refc, 1); mobj_reg_shm->cookie = cookie; mobj_reg_shm->guarded = true; mobj_reg_shm->page_offset = page_offset; memcpy(mobj_reg_shm->pages, pages, sizeof(*pages) * num_pages); /* Ensure loaded references match format and security constraints */ for (i = 0; i < num_pages; i++) { if (mobj_reg_shm->pages[i] & SMALL_PAGE_MASK) goto err; /* Only Non-secure memory can be mapped there */ if (!core_pbuf_is(CORE_MEM_NON_SEC, mobj_reg_shm->pages[i], SMALL_PAGE_SIZE)) goto err; } exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); res = check_protmem_conflict(mobj_reg_shm); if (!res) SLIST_INSERT_HEAD(®_shm_list, mobj_reg_shm, next); cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); if (res) goto err; return &mobj_reg_shm->mobj; err: free(mobj_reg_shm); return NULL; } void mobj_reg_shm_unguard(struct mobj *mobj) { uint32_t exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); to_mobj_reg_shm(mobj)->guarded = false; cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); } static struct mobj_reg_shm *reg_shm_find_unlocked(uint64_t cookie) { struct mobj_reg_shm *mobj_reg_shm = NULL; SLIST_FOREACH(mobj_reg_shm, ®_shm_list, next) if (mobj_reg_shm->cookie == cookie) return mobj_reg_shm; return NULL; } static struct mobj_protmem *protmem_find_unlocked(uint64_t cookie) { struct mobj_protmem *m = NULL; SLIST_FOREACH(m, &protmem_list, next) if (m->cookie == cookie) return m; return NULL; } struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie) { struct mobj_reg_shm *rs = NULL; struct mobj_protmem *rm = NULL; uint32_t exceptions = 0; struct mobj *m = NULL; exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); rs = reg_shm_find_unlocked(cookie); if (rs) { m = mobj_get(&rs->mobj); goto out; } rm = protmem_find_unlocked(cookie); if (rm) m = mobj_get(&rm->mobj); out: cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); return m; } TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie) { uint32_t exceptions = 0; struct mobj_reg_shm *r = NULL; /* * Try to find r and see can be released by this function, if so * call mobj_put(). Otherwise this function is called either by * wrong cookie and perhaps a second time, regardless return * TEE_ERROR_BAD_PARAMETERS. */ exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); r = reg_shm_find_unlocked(cookie); if (!r || r->guarded || r->releasing) r = NULL; else r->releasing = true; cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); if (!r) return TEE_ERROR_BAD_PARAMETERS; mobj_put(&r->mobj); /* * We've established that this function can release the cookie. * Now we wait until mobj_reg_shm_free() is called by the last * mobj_put() needed to free this mobj. Note that the call to * mobj_put() above could very well be that call. * * Once mobj_reg_shm_free() is called it will set r->release_frees * to true and we can free the mobj here. */ mutex_lock(&shm_mu); shm_release_waiters++; assert(shm_release_waiters); while (true) { exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); if (r->release_frees) { reg_shm_free_helper(r); r = NULL; } cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); if (!r) break; condvar_wait(&shm_cv, &shm_mu); } assert(shm_release_waiters); shm_release_waiters--; mutex_unlock(&shm_mu); return TEE_SUCCESS; } struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages, paddr_t page_offset, uint64_t cookie) { struct mobj *mobj = mobj_reg_shm_alloc(pages, num_pages, page_offset, cookie); if (!mobj) return NULL; if (mobj_inc_map(mobj)) { mobj_put(mobj); return NULL; } return mobj; } static TEE_Result mobj_mapped_shm_init(void) { vaddr_t pool_start = 0; vaddr_t pool_end = 0; core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end); if (!pool_start || !pool_end) panic("Can't find region for shmem pool"); if (!tee_mm_init(&core_virt_shm_pool, pool_start, pool_end - pool_start, SMALL_PAGE_SHIFT, TEE_MM_POOL_NO_FLAGS)) panic("Could not create shmem pool"); DMSG("Shared memory address range: %" PRIxVA ", %" PRIxVA, pool_start, pool_end); return TEE_SUCCESS; } preinit(mobj_mapped_shm_init); #ifdef CFG_CORE_DYN_PROTMEM static struct mobj_protmem *to_mobj_protmem(struct mobj *mobj); static TEE_Result check_reg_shm_list_conflict(paddr_t pa, paddr_size_t size) { struct mobj_reg_shm *r = NULL; TEE_Result res = TEE_SUCCESS; SLIST_FOREACH(r, ®_shm_list, next) { res = check_reg_shm_conflict(r, pa, size); if (res) break; } return res; } static TEE_Result protect_mem(struct mobj_protmem *m) { if ((m->pa | m->mobj.size) & SMALL_PAGE_MASK) return TEE_ERROR_BAD_PARAMETERS; DMSG("use_case %d pa %#"PRIxPA", size %#zx", m->use_case, m->pa, m->mobj.size); return plat_set_protmem_range(m->use_case, m->pa, m->mobj.size); } static TEE_Result restore_mem(struct mobj_protmem *m) { DMSG("use_case %d pa %#"PRIxPA", size %#zx", m->use_case, m->pa, m->mobj.size); return plat_set_protmem_range(MOBJ_USE_CASE_NS_SHM, m->pa, m->mobj.size); } static TEE_Result mobj_protmem_get_pa(struct mobj *mobj, size_t offs, size_t granule, paddr_t *pa) { struct mobj_protmem *m = to_mobj_protmem(mobj); paddr_t p = 0; if (!pa) return TEE_ERROR_GENERIC; if (offs >= mobj->size) return TEE_ERROR_GENERIC; p = m->pa + offs; if (granule) { if (granule != SMALL_PAGE_SIZE) return TEE_ERROR_GENERIC; p &= ~(granule - 1); } *pa = p; return TEE_SUCCESS; } static TEE_Result mobj_protmem_get_mem_type(struct mobj *mobj __unused, uint32_t *mt) { if (!mt) return TEE_ERROR_GENERIC; *mt = TEE_MATTR_MEM_TYPE_CACHED; return TEE_SUCCESS; } static bool mobj_protmem_matches(struct mobj *mobj __unused, enum buf_is_attr attr) { return attr == CORE_MEM_SEC || attr == CORE_MEM_SDP_MEM; } static void protmem_free_helper(struct mobj_protmem *mobj_protmem) { uint32_t exceptions = 0; exceptions = cpu_spin_lock_xsave(®_shm_map_lock); SLIST_REMOVE(&protmem_list, mobj_protmem, mobj_protmem, next); cpu_spin_unlock_xrestore(®_shm_map_lock, exceptions); restore_mem(mobj_protmem); free(mobj_protmem); } static void mobj_protmem_free(struct mobj *mobj) { struct mobj_protmem *r = to_mobj_protmem(mobj); uint32_t exceptions = 0; if (!r->releasing) { exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); protmem_free_helper(r); cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); } else { exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); r->release_frees = true; cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); mutex_lock(&shm_mu); if (shm_release_waiters) condvar_broadcast(&shm_cv); mutex_unlock(&shm_mu); } } static uint64_t mobj_protmem_get_cookie(struct mobj *mobj) { return to_mobj_protmem(mobj)->cookie; } static TEE_Result mobj_protmem_inc_map(struct mobj *mobj __maybe_unused) { assert(to_mobj_protmem(mobj)); return TEE_ERROR_BAD_PARAMETERS; } static TEE_Result mobj_protmem_dec_map(struct mobj *mobj __maybe_unused) { assert(to_mobj_protmem(mobj)); return TEE_ERROR_BAD_PARAMETERS; } const struct mobj_ops mobj_protmem_ops __relrodata_unpaged("mobj_protmem_ops") = { .get_pa = mobj_protmem_get_pa, .get_mem_type = mobj_protmem_get_mem_type, .matches = mobj_protmem_matches, .free = mobj_protmem_free, .get_cookie = mobj_protmem_get_cookie, .inc_map = mobj_protmem_inc_map, .dec_map = mobj_protmem_dec_map, }; static struct mobj_protmem *to_mobj_protmem(struct mobj *mobj) { assert(mobj->ops == &mobj_protmem_ops); return container_of(mobj, struct mobj_protmem, mobj); } struct mobj *mobj_protmem_alloc(paddr_t pa, paddr_size_t size, uint64_t cookie, enum mobj_use_case use_case) { TEE_Result res = TEE_SUCCESS; struct mobj_protmem *m = NULL; uint32_t exceptions = 0; if (use_case == MOBJ_USE_CASE_NS_SHM || !core_pbuf_is(CORE_MEM_NON_SEC, pa, size)) return NULL; m = calloc(1, sizeof(*m)); if (!m) return NULL; m->mobj.ops = &mobj_protmem_ops; m->use_case = use_case; m->mobj.size = size; m->mobj.phys_granule = SMALL_PAGE_SIZE; refcount_set(&m->mobj.refc, 1); m->cookie = cookie; m->pa = pa; exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); res = check_reg_shm_list_conflict(pa, size); if (res) goto out; res = protect_mem(m); if (res) goto out; SLIST_INSERT_HEAD(&protmem_list, m, next); out: cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); if (res) { free(m); return NULL; } return &m->mobj; } TEE_Result mobj_protmem_release_by_cookie(uint64_t cookie) { uint32_t exceptions = 0; struct mobj_protmem *rm = NULL; /* * Try to find m and see can be released by this function, if so * call mobj_put(). Otherwise this function is called either by * wrong cookie and perhaps a second time, regardless return * TEE_ERROR_BAD_PARAMETERS. */ exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); rm = protmem_find_unlocked(cookie); if (!rm || rm->releasing) rm = NULL; else rm->releasing = true; cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); if (!rm) return TEE_ERROR_BAD_PARAMETERS; mobj_put(&rm->mobj); /* * We've established that this function can release the cookie. * Now we wait until mobj_reg_shm_free() is called by the last * mobj_put() needed to free this mobj. Note that the call to * mobj_put() above could very well be that call. * * Once mobj_reg_shm_free() is called it will set r->release_frees * to true and we can free the mobj here. */ mutex_lock(&shm_mu); shm_release_waiters++; assert(shm_release_waiters); while (true) { exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); if (rm->release_frees) { protmem_free_helper(rm); rm = NULL; } cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); if (!rm) break; condvar_wait(&shm_cv, &shm_mu); } assert(shm_release_waiters); shm_release_waiters--; mutex_unlock(&shm_mu); return TEE_SUCCESS; } static struct mobj_protmem *protmem_find_by_pa_unlocked(paddr_t pa, paddr_size_t sz) { struct mobj_protmem *m = NULL; if (!sz) sz = 1; SLIST_FOREACH(m, &protmem_list, next) if (core_is_buffer_inside(pa, sz, m->pa, m->mobj.size)) return m; return NULL; } struct mobj *mobj_protmem_get_by_pa(paddr_t pa, paddr_size_t size) { struct mobj_protmem *rm = NULL; struct mobj *mobj = NULL; uint32_t exceptions = 0; exceptions = cpu_spin_lock_xsave(®_shm_slist_lock); rm = protmem_find_by_pa_unlocked(pa, size); if (rm && !rm->releasing) mobj = mobj_get(&rm->mobj); cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions); return mobj; } #endif /*CFG_CORE_DYN_PROTMEM*/