173e1d3f3SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause
273e1d3f3SJens Wiklander /*
373e1d3f3SJens Wiklander * Copyright (c) 2016-2020, Linaro Limited
473e1d3f3SJens Wiklander */
573e1d3f3SJens Wiklander
673e1d3f3SJens Wiklander #include <assert.h>
773e1d3f3SJens Wiklander #include <bitstring.h>
8a65dd3a6SJens Wiklander #include <config.h>
9c6726b47SJelle #include <ffa.h>
1073e1d3f3SJens Wiklander #include <initcall.h>
1173e1d3f3SJens Wiklander #include <kernel/refcount.h>
1273e1d3f3SJens Wiklander #include <kernel/spinlock.h>
13*00338334SJens Wiklander #include <kernel/tee_misc.h>
1421c96e48SMarouene Boubakri #include <kernel/thread_spmc.h>
15a65dd3a6SJens Wiklander #include <kernel/virtualization.h>
1673e1d3f3SJens Wiklander #include <mm/mobj.h>
1773e1d3f3SJens Wiklander #include <sys/queue.h>
1873e1d3f3SJens Wiklander
19287e68f4SJens Wiklander /*
20287e68f4SJens Wiklander * Life cycle of struct mobj_ffa
21287e68f4SJens Wiklander *
22287e68f4SJens Wiklander * SPMC at S-EL1 (CFG_CORE_SEL1_SPMC=y)
23287e68f4SJens Wiklander * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24287e68f4SJens Wiklander * During FFA_MEM_SHARE allocated in mobj_ffa_sel1_spmc_new() and finally
25287e68f4SJens Wiklander * added to the inactive list at the end of add_mem_share() once
26287e68f4SJens Wiklander * successfully filled in.
27287e68f4SJens Wiklander * registered_by_cookie = false
28287e68f4SJens Wiklander * mobj.refs.val = 0
29287e68f4SJens Wiklander * inactive_refs = 0
30287e68f4SJens Wiklander *
31287e68f4SJens Wiklander * During FFA_MEM_RECLAIM reclaimed/freed using
32287e68f4SJens Wiklander * mobj_ffa_sel1_spmc_reclaim(). This will always succeed if the normal
33287e68f4SJens Wiklander * world is only calling this when all other threads are done with the
34287e68f4SJens Wiklander * shared memory object. However, there are some conditions that must be
35287e68f4SJens Wiklander * met to make sure that this is the case:
36287e68f4SJens Wiklander * mobj not in the active list, else -> return TEE_ERROR_BUSY
37287e68f4SJens Wiklander * mobj not in inactive list, else -> return TEE_ERROR_ITEM_NOT_FOUND
38287e68f4SJens Wiklander * mobj inactive_refs is 0, else -> return TEE_ERROR_BUSY
39287e68f4SJens Wiklander *
40287e68f4SJens Wiklander * mobj is activated using mobj_ffa_get_by_cookie() which unless the mobj
41287e68f4SJens Wiklander * is active already:
42287e68f4SJens Wiklander * - move the mobj into the active list
43287e68f4SJens Wiklander * - if not registered_by_cookie ->
44287e68f4SJens Wiklander * set registered_by_cookie and increase inactive_refs
45287e68f4SJens Wiklander * - set mobj.refc.val to 1
46287e68f4SJens Wiklander * - increase inactive_refs
47287e68f4SJens Wiklander *
48287e68f4SJens Wiklander * A previously activated mobj is made ready for reclaim using
49287e68f4SJens Wiklander * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
50287e68f4SJens Wiklander * the inactive list and registered_by_cookie is set and then:
51287e68f4SJens Wiklander * - clears registered_by_cookie
52287e68f4SJens Wiklander * - decreases inactive_refs
53287e68f4SJens Wiklander *
54287e68f4SJens Wiklander * Each successful call to mobj_ffa_get_by_cookie() must be matched by a
55287e68f4SJens Wiklander * call to mobj_put(). If the mobj.refc.val reaches 0 it's
56287e68f4SJens Wiklander * - moved to the inactive list
57287e68f4SJens Wiklander * - inactive_refs is decreased
58287e68f4SJens Wiklander *
59287e68f4SJens Wiklander * SPMC at S-EL2/EL3 (CFG_CORE_SEL1_SPMC=n)
60287e68f4SJens Wiklander * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61287e68f4SJens Wiklander * mobj is activated/allocated using mobj_ffa_get_by_cookie() which if
62287e68f4SJens Wiklander * already active only is
63287e68f4SJens Wiklander * - increasing mobj.refc.val and inactive_refs
64287e68f4SJens Wiklander * if found in inactive list is
65287e68f4SJens Wiklander * - setting mobj.refc.val to 1
66287e68f4SJens Wiklander * - increasing inactive_refs
67287e68f4SJens Wiklander * - moved into active list
68287e68f4SJens Wiklander * if not found is created using thread_spmc_populate_mobj_from_rx() and
69287e68f4SJens Wiklander * then:
70287e68f4SJens Wiklander * - setting mobj.refc.val to 1
71287e68f4SJens Wiklander * - increasing inactive_refs
72287e68f4SJens Wiklander * - moved into active list
73287e68f4SJens Wiklander *
74287e68f4SJens Wiklander * A previously activated mobj is relinquished using
75287e68f4SJens Wiklander * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
76287e68f4SJens Wiklander * the inactive list and inactive_refs is 1
77287e68f4SJens Wiklander */
7873e1d3f3SJens Wiklander struct mobj_ffa {
7973e1d3f3SJens Wiklander struct mobj mobj;
8073e1d3f3SJens Wiklander SLIST_ENTRY(mobj_ffa) link;
8173e1d3f3SJens Wiklander uint64_t cookie;
82287e68f4SJens Wiklander unsigned int inactive_refs;
83fb19e98eSJens Wiklander #ifdef CFG_CORE_SEL1_SPMC
8473e1d3f3SJens Wiklander bool registered_by_cookie;
85fb19e98eSJens Wiklander #endif
8646195e2fSJens Wiklander };
8746195e2fSJens Wiklander
8846195e2fSJens Wiklander struct mobj_ffa_shm {
8946195e2fSJens Wiklander struct mobj_ffa mf;
9046195e2fSJens Wiklander tee_mm_entry_t *mm;
9146195e2fSJens Wiklander struct refcount mapcount;
9246195e2fSJens Wiklander uint16_t page_offset;
9373e1d3f3SJens Wiklander paddr_t pages[];
9473e1d3f3SJens Wiklander };
9573e1d3f3SJens Wiklander
96*00338334SJens Wiklander struct mobj_ffa_prm {
97*00338334SJens Wiklander struct mobj_ffa mf;
98*00338334SJens Wiklander paddr_t pa;
99*00338334SJens Wiklander enum mobj_use_case use_case;
100*00338334SJens Wiklander bool assigned_use_case;
101*00338334SJens Wiklander };
102*00338334SJens Wiklander
10373e1d3f3SJens Wiklander SLIST_HEAD(mobj_ffa_head, mobj_ffa);
10473e1d3f3SJens Wiklander
10573e1d3f3SJens Wiklander #ifdef CFG_CORE_SEL1_SPMC
1063e0b361eSJens Wiklander #ifdef CFG_NS_VIRTUALIZATION
get_shm_bits(void)1073e0b361eSJens Wiklander static bitstr_t *get_shm_bits(void)
1083e0b361eSJens Wiklander {
1093e0b361eSJens Wiklander return virt_get_shm_bits();
1103e0b361eSJens Wiklander }
1113e0b361eSJens Wiklander #else
1123e0b361eSJens Wiklander static bitstr_t bit_decl(__shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
1133e0b361eSJens Wiklander
get_shm_bits(void)1143e0b361eSJens Wiklander static bitstr_t *get_shm_bits(void)
1153e0b361eSJens Wiklander {
1163e0b361eSJens Wiklander return __shm_bits;
1173e0b361eSJens Wiklander }
1183e0b361eSJens Wiklander #endif
11973e1d3f3SJens Wiklander #endif
12073e1d3f3SJens Wiklander
12173e1d3f3SJens Wiklander static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
12273e1d3f3SJens Wiklander static struct mobj_ffa_head shm_inactive_head =
12373e1d3f3SJens Wiklander SLIST_HEAD_INITIALIZER(shm_inactive_head);
12473e1d3f3SJens Wiklander
12573e1d3f3SJens Wiklander static unsigned int shm_lock = SPINLOCK_UNLOCK;
12673e1d3f3SJens Wiklander
12746195e2fSJens Wiklander static const struct mobj_ops mobj_ffa_shm_ops;
128*00338334SJens Wiklander static const struct mobj_ops mobj_ffa_prm_ops;
12973e1d3f3SJens Wiklander
is_mobj_ffa_shm(struct mobj * mobj)130*00338334SJens Wiklander static bool is_mobj_ffa_shm(struct mobj *mobj)
13173e1d3f3SJens Wiklander {
13246195e2fSJens Wiklander return mobj->ops == &mobj_ffa_shm_ops;
13346195e2fSJens Wiklander }
13446195e2fSJens Wiklander
to_mobj_ffa_shm(struct mobj * mobj)13546195e2fSJens Wiklander static struct mobj_ffa_shm *to_mobj_ffa_shm(struct mobj *mobj)
13646195e2fSJens Wiklander {
13746195e2fSJens Wiklander assert(is_mobj_ffa_shm(mobj));
13846195e2fSJens Wiklander return container_of(mobj, struct mobj_ffa_shm, mf.mobj);
13973e1d3f3SJens Wiklander }
14073e1d3f3SJens Wiklander
is_mobj_ffa_prm(struct mobj * mobj)141*00338334SJens Wiklander static bool is_mobj_ffa_prm(struct mobj *mobj)
142*00338334SJens Wiklander {
143*00338334SJens Wiklander return mobj->ops == &mobj_ffa_prm_ops;
144*00338334SJens Wiklander }
145*00338334SJens Wiklander
to_mobj_ffa_prm(struct mobj * mobj)146*00338334SJens Wiklander static struct mobj_ffa_prm *to_mobj_ffa_prm(struct mobj *mobj)
147*00338334SJens Wiklander {
148*00338334SJens Wiklander assert(is_mobj_ffa_prm(mobj));
149*00338334SJens Wiklander return container_of(mobj, struct mobj_ffa_prm, mf.mobj);
150*00338334SJens Wiklander }
151*00338334SJens Wiklander
shm_size(size_t num_pages)15273e1d3f3SJens Wiklander static size_t shm_size(size_t num_pages)
15373e1d3f3SJens Wiklander {
15473e1d3f3SJens Wiklander size_t s = 0;
15573e1d3f3SJens Wiklander
15673e1d3f3SJens Wiklander if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
15773e1d3f3SJens Wiklander return 0;
15846195e2fSJens Wiklander if (ADD_OVERFLOW(sizeof(struct mobj_ffa_shm), s, &s))
15973e1d3f3SJens Wiklander return 0;
16073e1d3f3SJens Wiklander return s;
16173e1d3f3SJens Wiklander }
16273e1d3f3SJens Wiklander
ffa_shm_new(unsigned int num_pages)163*00338334SJens Wiklander static struct mobj_ffa *ffa_shm_new(unsigned int num_pages)
16473e1d3f3SJens Wiklander {
16546195e2fSJens Wiklander struct mobj_ffa_shm *m = NULL;
16673e1d3f3SJens Wiklander size_t s = 0;
16773e1d3f3SJens Wiklander
16873e1d3f3SJens Wiklander if (!num_pages)
16973e1d3f3SJens Wiklander return NULL;
17073e1d3f3SJens Wiklander
17173e1d3f3SJens Wiklander s = shm_size(num_pages);
17273e1d3f3SJens Wiklander if (!s)
17373e1d3f3SJens Wiklander return NULL;
17446195e2fSJens Wiklander m = calloc(1, s);
17546195e2fSJens Wiklander if (!m)
17673e1d3f3SJens Wiklander return NULL;
17773e1d3f3SJens Wiklander
17846195e2fSJens Wiklander m->mf.mobj.ops = &mobj_ffa_shm_ops;
17946195e2fSJens Wiklander m->mf.mobj.size = num_pages * SMALL_PAGE_SIZE;
18046195e2fSJens Wiklander m->mf.mobj.phys_granule = SMALL_PAGE_SIZE;
18146195e2fSJens Wiklander refcount_set(&m->mf.mobj.refc, 0);
18246195e2fSJens Wiklander m->mf.inactive_refs = 0;
18373e1d3f3SJens Wiklander
184*00338334SJens Wiklander return &m->mf;
185*00338334SJens Wiklander }
186*00338334SJens Wiklander
ffa_prm_new(unsigned int num_pages,enum mobj_use_case use_case)187*00338334SJens Wiklander static struct mobj_ffa *ffa_prm_new(unsigned int num_pages,
188*00338334SJens Wiklander enum mobj_use_case use_case)
189*00338334SJens Wiklander {
190*00338334SJens Wiklander struct mobj_ffa_prm *m = NULL;
191*00338334SJens Wiklander size_t sz = 0;
192*00338334SJens Wiklander
193*00338334SJens Wiklander if (!num_pages || MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &sz) ||
194*00338334SJens Wiklander use_case == MOBJ_USE_CASE_NS_SHM)
195*00338334SJens Wiklander return NULL;
196*00338334SJens Wiklander
197*00338334SJens Wiklander m = calloc(1, sizeof(*m));
198*00338334SJens Wiklander if (!m)
199*00338334SJens Wiklander return NULL;
200*00338334SJens Wiklander
201*00338334SJens Wiklander m->mf.mobj.ops = &mobj_ffa_prm_ops;
202*00338334SJens Wiklander m->mf.mobj.size = sz;
203*00338334SJens Wiklander m->mf.mobj.phys_granule = SMALL_PAGE_SIZE;
204*00338334SJens Wiklander refcount_set(&m->mf.mobj.refc, 0);
205*00338334SJens Wiklander m->mf.inactive_refs = 0;
206*00338334SJens Wiklander m->use_case = use_case;
207*00338334SJens Wiklander
208*00338334SJens Wiklander return &m->mf;
20973e1d3f3SJens Wiklander }
21073e1d3f3SJens Wiklander
21173e1d3f3SJens Wiklander #ifdef CFG_CORE_SEL1_SPMC
mobj_ffa_sel1_spmc_new(uint64_t cookie,unsigned int num_pages,enum mobj_use_case use_case)212a65dd3a6SJens Wiklander struct mobj_ffa *mobj_ffa_sel1_spmc_new(uint64_t cookie,
213*00338334SJens Wiklander unsigned int num_pages,
214*00338334SJens Wiklander enum mobj_use_case use_case)
21573e1d3f3SJens Wiklander {
216*00338334SJens Wiklander struct mobj_ffa *m = NULL;
2173e0b361eSJens Wiklander bitstr_t *shm_bits = NULL;
21873e1d3f3SJens Wiklander uint32_t exceptions = 0;
21973e1d3f3SJens Wiklander int i = 0;
22073e1d3f3SJens Wiklander
221a65dd3a6SJens Wiklander if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
222a65dd3a6SJens Wiklander if (!(cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT))
22373e1d3f3SJens Wiklander return NULL;
224a65dd3a6SJens Wiklander if (virt_add_cookie_to_current_guest(cookie))
225a65dd3a6SJens Wiklander return NULL;
226a65dd3a6SJens Wiklander }
227a65dd3a6SJens Wiklander
228*00338334SJens Wiklander switch (use_case) {
229*00338334SJens Wiklander case MOBJ_USE_CASE_NS_SHM:
23046195e2fSJens Wiklander m = ffa_shm_new(num_pages);
231*00338334SJens Wiklander break;
232*00338334SJens Wiklander case MOBJ_USE_CASE_SEC_VIDEO_PLAY:
233*00338334SJens Wiklander case MOBJ_USE_CASE_TRUSED_UI:
234*00338334SJens Wiklander m = ffa_prm_new(num_pages, use_case);
235*00338334SJens Wiklander break;
236*00338334SJens Wiklander default:
237*00338334SJens Wiklander break;
238*00338334SJens Wiklander }
23946195e2fSJens Wiklander if (!m) {
240a65dd3a6SJens Wiklander if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
241a65dd3a6SJens Wiklander virt_remove_cookie(cookie);
242a65dd3a6SJens Wiklander return NULL;
243a65dd3a6SJens Wiklander }
244a65dd3a6SJens Wiklander
245a65dd3a6SJens Wiklander if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
246*00338334SJens Wiklander m->cookie = cookie;
247*00338334SJens Wiklander return m;
248a65dd3a6SJens Wiklander }
24973e1d3f3SJens Wiklander
2503e0b361eSJens Wiklander shm_bits = get_shm_bits();
25173e1d3f3SJens Wiklander exceptions = cpu_spin_lock_xsave(&shm_lock);
252070d197fSJens Wiklander bit_ffc(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
25373e1d3f3SJens Wiklander if (i != -1) {
25473e1d3f3SJens Wiklander bit_set(shm_bits, i);
255*00338334SJens Wiklander m->cookie = i;
256*00338334SJens Wiklander m->cookie |= FFA_MEMORY_HANDLE_NON_SECURE_BIT;
25773e1d3f3SJens Wiklander /*
258a65dd3a6SJens Wiklander * Encode the partition ID into the handle so we know which
259a65dd3a6SJens Wiklander * partition to switch to when reclaiming a handle.
26073e1d3f3SJens Wiklander */
261*00338334SJens Wiklander m->cookie |= SHIFT_U64(virt_get_current_guest_id(),
262a65dd3a6SJens Wiklander FFA_MEMORY_HANDLE_PRTN_SHIFT);
26373e1d3f3SJens Wiklander }
26473e1d3f3SJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
26573e1d3f3SJens Wiklander
26673e1d3f3SJens Wiklander if (i == -1) {
267*00338334SJens Wiklander mobj_ffa_sel1_spmc_delete(m);
26873e1d3f3SJens Wiklander return NULL;
26973e1d3f3SJens Wiklander }
27073e1d3f3SJens Wiklander
271*00338334SJens Wiklander return m;
27273e1d3f3SJens Wiklander }
27373e1d3f3SJens Wiklander #endif /*CFG_CORE_SEL1_SPMC*/
27473e1d3f3SJens Wiklander
get_page_count(struct mobj_ffa * mf)27573e1d3f3SJens Wiklander static size_t get_page_count(struct mobj_ffa *mf)
27673e1d3f3SJens Wiklander {
27704e46975SEtienne Carriere return ROUNDUP_DIV(mf->mobj.size, SMALL_PAGE_SIZE);
27873e1d3f3SJens Wiklander }
27973e1d3f3SJens Wiklander
cmp_cookie(struct mobj_ffa * mf,uint64_t cookie)28073e1d3f3SJens Wiklander static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
28173e1d3f3SJens Wiklander {
28273e1d3f3SJens Wiklander return mf->cookie == cookie;
28373e1d3f3SJens Wiklander }
28473e1d3f3SJens Wiklander
cmp_ptr(struct mobj_ffa * mf,uint64_t ptr)28573e1d3f3SJens Wiklander static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
28673e1d3f3SJens Wiklander {
28773e1d3f3SJens Wiklander return mf == (void *)(vaddr_t)ptr;
28873e1d3f3SJens Wiklander }
28973e1d3f3SJens Wiklander
check_shm_overlaps_prm(struct mobj_ffa_shm * shm,struct mobj_ffa_prm * prm)290*00338334SJens Wiklander static bool check_shm_overlaps_prm(struct mobj_ffa_shm *shm,
291*00338334SJens Wiklander struct mobj_ffa_prm *prm)
292*00338334SJens Wiklander {
293*00338334SJens Wiklander size_t n = 0;
294*00338334SJens Wiklander
295*00338334SJens Wiklander for (n = 0; n < shm->mf.mobj.size / SMALL_PAGE_SIZE; n++)
296*00338334SJens Wiklander if (core_is_buffer_intersect(prm->pa, prm->mf.mobj.size,
297*00338334SJens Wiklander shm->pages[n], SMALL_PAGE_SIZE))
298*00338334SJens Wiklander return true;
299*00338334SJens Wiklander
300*00338334SJens Wiklander return false;
301*00338334SJens Wiklander }
302*00338334SJens Wiklander
cmp_pa_overlap(struct mobj_ffa * mf,uint64_t ptr)303*00338334SJens Wiklander static bool cmp_pa_overlap(struct mobj_ffa *mf, uint64_t ptr)
304*00338334SJens Wiklander {
305*00338334SJens Wiklander struct mobj_ffa *mf2 = (void *)(vaddr_t)ptr;
306*00338334SJens Wiklander bool mf_is_shm = is_mobj_ffa_shm(&mf->mobj);
307*00338334SJens Wiklander bool mf2_is_shm = is_mobj_ffa_shm(&mf2->mobj);
308*00338334SJens Wiklander
309*00338334SJens Wiklander if (mf_is_shm && mf2_is_shm) {
310*00338334SJens Wiklander /*
311*00338334SJens Wiklander * Not a security issue and might be too expensive to check
312*00338334SJens Wiklander * if we have many pages in each registered shared memory
313*00338334SJens Wiklander * object.
314*00338334SJens Wiklander */
315*00338334SJens Wiklander return false;
316*00338334SJens Wiklander }
317*00338334SJens Wiklander
318*00338334SJens Wiklander if (mf_is_shm)
319*00338334SJens Wiklander return check_shm_overlaps_prm(to_mobj_ffa_shm(&mf->mobj),
320*00338334SJens Wiklander to_mobj_ffa_prm(&mf2->mobj));
321*00338334SJens Wiklander if (mf2_is_shm)
322*00338334SJens Wiklander return check_shm_overlaps_prm(to_mobj_ffa_shm(&mf2->mobj),
323*00338334SJens Wiklander to_mobj_ffa_prm(&mf->mobj));
324*00338334SJens Wiklander
325*00338334SJens Wiklander return core_is_buffer_intersect(to_mobj_ffa_prm(&mf->mobj)->pa,
326*00338334SJens Wiklander mf->mobj.size,
327*00338334SJens Wiklander to_mobj_ffa_prm(&mf2->mobj)->pa,
328*00338334SJens Wiklander mf2->mobj.size);
329*00338334SJens Wiklander }
330*00338334SJens Wiklander
pop_from_list(struct mobj_ffa_head * head,bool (* cmp_func)(struct mobj_ffa * mf,uint64_t val),uint64_t val)33173e1d3f3SJens Wiklander static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
33273e1d3f3SJens Wiklander bool (*cmp_func)(struct mobj_ffa *mf,
33373e1d3f3SJens Wiklander uint64_t val),
33473e1d3f3SJens Wiklander uint64_t val)
33573e1d3f3SJens Wiklander {
33673e1d3f3SJens Wiklander struct mobj_ffa *mf = SLIST_FIRST(head);
33773e1d3f3SJens Wiklander struct mobj_ffa *p = NULL;
33873e1d3f3SJens Wiklander
33973e1d3f3SJens Wiklander if (!mf)
34073e1d3f3SJens Wiklander return NULL;
34173e1d3f3SJens Wiklander
34273e1d3f3SJens Wiklander if (cmp_func(mf, val)) {
34373e1d3f3SJens Wiklander SLIST_REMOVE_HEAD(head, link);
34473e1d3f3SJens Wiklander return mf;
34573e1d3f3SJens Wiklander }
34673e1d3f3SJens Wiklander
34773e1d3f3SJens Wiklander while (true) {
34873e1d3f3SJens Wiklander p = SLIST_NEXT(mf, link);
34973e1d3f3SJens Wiklander if (!p)
35073e1d3f3SJens Wiklander return NULL;
35173e1d3f3SJens Wiklander if (cmp_func(p, val)) {
35273e1d3f3SJens Wiklander SLIST_REMOVE_AFTER(mf, link);
35373e1d3f3SJens Wiklander return p;
35473e1d3f3SJens Wiklander }
35573e1d3f3SJens Wiklander mf = p;
35673e1d3f3SJens Wiklander }
35773e1d3f3SJens Wiklander }
35873e1d3f3SJens Wiklander
find_in_list(struct mobj_ffa_head * head,bool (* cmp_func)(struct mobj_ffa * mf,uint64_t val),uint64_t val)35973e1d3f3SJens Wiklander static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
36073e1d3f3SJens Wiklander bool (*cmp_func)(struct mobj_ffa *mf,
36173e1d3f3SJens Wiklander uint64_t val),
36273e1d3f3SJens Wiklander uint64_t val)
36373e1d3f3SJens Wiklander {
36473e1d3f3SJens Wiklander struct mobj_ffa *mf = NULL;
36573e1d3f3SJens Wiklander
36673e1d3f3SJens Wiklander SLIST_FOREACH(mf, head, link)
36773e1d3f3SJens Wiklander if (cmp_func(mf, val))
36873e1d3f3SJens Wiklander return mf;
36973e1d3f3SJens Wiklander
37073e1d3f3SJens Wiklander return NULL;
37173e1d3f3SJens Wiklander }
37273e1d3f3SJens Wiklander
373e26b8354SJens Wiklander #if defined(CFG_CORE_SEL1_SPMC)
mobj_ffa_sel1_spmc_delete(struct mobj_ffa * mf)37473e1d3f3SJens Wiklander void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
37573e1d3f3SJens Wiklander {
376a65dd3a6SJens Wiklander if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) ||
377a65dd3a6SJens Wiklander !(mf->cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT)) {
378a65dd3a6SJens Wiklander uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT;
3793e0b361eSJens Wiklander bitstr_t *shm_bits = get_shm_bits();
380a65dd3a6SJens Wiklander uint32_t exceptions = 0;
381a65dd3a6SJens Wiklander int64_t i = 0;
382a65dd3a6SJens Wiklander
383a65dd3a6SJens Wiklander if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
384a65dd3a6SJens Wiklander mask |= SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
385a65dd3a6SJens Wiklander FFA_MEMORY_HANDLE_PRTN_SHIFT);
386a65dd3a6SJens Wiklander i = mf->cookie & ~mask;
387070d197fSJens Wiklander assert(i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT);
38873e1d3f3SJens Wiklander
38973e1d3f3SJens Wiklander exceptions = cpu_spin_lock_xsave(&shm_lock);
39073e1d3f3SJens Wiklander assert(bit_test(shm_bits, i));
39173e1d3f3SJens Wiklander bit_clear(shm_bits, i);
39273e1d3f3SJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
393a65dd3a6SJens Wiklander }
39473e1d3f3SJens Wiklander
395*00338334SJens Wiklander if (is_mobj_ffa_shm(&mf->mobj)) {
396*00338334SJens Wiklander struct mobj_ffa_shm *m = to_mobj_ffa_shm(&mf->mobj);
397*00338334SJens Wiklander
39846195e2fSJens Wiklander assert(!m->mm);
39946195e2fSJens Wiklander free(m);
400*00338334SJens Wiklander } else {
401*00338334SJens Wiklander free(to_mobj_ffa_prm(&mf->mobj));
402*00338334SJens Wiklander }
40373e1d3f3SJens Wiklander }
404e26b8354SJens Wiklander #else /* !defined(CFG_CORE_SEL1_SPMC) */
mobj_ffa_spmc_new(uint64_t cookie,unsigned int num_pages,enum mobj_use_case use_case)405*00338334SJens Wiklander struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages,
406*00338334SJens Wiklander enum mobj_use_case use_case)
407fb19e98eSJens Wiklander {
408*00338334SJens Wiklander struct mobj_ffa *mf = NULL;
409fb19e98eSJens Wiklander
410fb19e98eSJens Wiklander assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
411*00338334SJens Wiklander if (use_case == MOBJ_USE_CASE_NS_SHM)
412*00338334SJens Wiklander mf = ffa_shm_new(num_pages);
413*00338334SJens Wiklander else
414*00338334SJens Wiklander mf = ffa_prm_new(num_pages, use_case);
415*00338334SJens Wiklander if (mf)
416*00338334SJens Wiklander mf->cookie = cookie;
417*00338334SJens Wiklander return mf;
418fb19e98eSJens Wiklander }
419fb19e98eSJens Wiklander
mobj_ffa_spmc_delete(struct mobj_ffa * mf)420e26b8354SJens Wiklander void mobj_ffa_spmc_delete(struct mobj_ffa *mf)
421fb19e98eSJens Wiklander {
422*00338334SJens Wiklander if (is_mobj_ffa_shm(&mf->mobj))
42346195e2fSJens Wiklander free(to_mobj_ffa_shm(&mf->mobj));
424*00338334SJens Wiklander else
425*00338334SJens Wiklander free(to_mobj_ffa_prm(&mf->mobj));
426fb19e98eSJens Wiklander }
427e26b8354SJens Wiklander #endif /* !defined(CFG_CORE_SEL1_SPMC) */
428fb19e98eSJens Wiklander
mobj_ffa_add_pages_at(struct mobj_ffa * mf,unsigned int * idx,paddr_t pa,unsigned int num_pages)42973e1d3f3SJens Wiklander TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
43073e1d3f3SJens Wiklander paddr_t pa, unsigned int num_pages)
43173e1d3f3SJens Wiklander {
432*00338334SJens Wiklander size_t tot_page_count = tot_page_count = get_page_count(mf);
43346195e2fSJens Wiklander unsigned int n = 0;
43473e1d3f3SJens Wiklander
43573e1d3f3SJens Wiklander if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
43673e1d3f3SJens Wiklander return TEE_ERROR_BAD_PARAMETERS;
43773e1d3f3SJens Wiklander
438b80243afSJens Wiklander if (!IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
439b80243afSJens Wiklander !core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
44073e1d3f3SJens Wiklander return TEE_ERROR_BAD_PARAMETERS;
44173e1d3f3SJens Wiklander
442*00338334SJens Wiklander if (is_mobj_ffa_shm(&mf->mobj)) {
443*00338334SJens Wiklander struct mobj_ffa_shm *mfs = to_mobj_ffa_shm(&mf->mobj);
444*00338334SJens Wiklander
44573e1d3f3SJens Wiklander for (n = 0; n < num_pages; n++)
44646195e2fSJens Wiklander mfs->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
447*00338334SJens Wiklander } else {
448*00338334SJens Wiklander struct mobj_ffa_prm *mfr = to_mobj_ffa_prm(&mf->mobj);
449*00338334SJens Wiklander
450*00338334SJens Wiklander if (!*idx)
451*00338334SJens Wiklander mfr->pa = pa;
452*00338334SJens Wiklander else if (mfr->pa != pa + *idx * SMALL_PAGE_SIZE)
453*00338334SJens Wiklander return TEE_ERROR_BAD_PARAMETERS;
454*00338334SJens Wiklander }
45573e1d3f3SJens Wiklander
45673e1d3f3SJens Wiklander (*idx) += n;
457*00338334SJens Wiklander
45873e1d3f3SJens Wiklander return TEE_SUCCESS;
45973e1d3f3SJens Wiklander }
46073e1d3f3SJens Wiklander
mobj_ffa_get_cookie(struct mobj_ffa * mf)46173e1d3f3SJens Wiklander uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
46273e1d3f3SJens Wiklander {
46373e1d3f3SJens Wiklander return mf->cookie;
46473e1d3f3SJens Wiklander }
46573e1d3f3SJens Wiklander
protect_mem(struct mobj_ffa_prm * m)466*00338334SJens Wiklander static TEE_Result protect_mem(struct mobj_ffa_prm *m)
46773e1d3f3SJens Wiklander {
468*00338334SJens Wiklander DMSG("use_case %d pa %#"PRIxPA", size %#zx cookie %#"PRIx64,
469*00338334SJens Wiklander m->use_case, m->pa, m->mf.mobj.size, m->mf.cookie);
470*00338334SJens Wiklander
471*00338334SJens Wiklander return plat_set_protmem_range(m->use_case, m->pa, m->mf.mobj.size);
472*00338334SJens Wiklander }
473*00338334SJens Wiklander
restore_mem(struct mobj_ffa_prm * m)474*00338334SJens Wiklander static TEE_Result __maybe_unused restore_mem(struct mobj_ffa_prm *m)
475*00338334SJens Wiklander {
476*00338334SJens Wiklander DMSG("use_case %d pa %#" PRIxPA ", size %#zx cookie %#"PRIx64,
477*00338334SJens Wiklander m->use_case, m->pa, m->mf.mobj.size, m->mf.cookie);
478*00338334SJens Wiklander
479*00338334SJens Wiklander return plat_set_protmem_range(MOBJ_USE_CASE_NS_SHM, m->pa,
480*00338334SJens Wiklander m->mf.mobj.size);
481*00338334SJens Wiklander }
482*00338334SJens Wiklander
mobj_ffa_push_to_inactive(struct mobj_ffa * mf)483*00338334SJens Wiklander TEE_Result mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
484*00338334SJens Wiklander {
485*00338334SJens Wiklander TEE_Result res = TEE_SUCCESS;
48673e1d3f3SJens Wiklander uint32_t exceptions = 0;
48773e1d3f3SJens Wiklander
48873e1d3f3SJens Wiklander exceptions = cpu_spin_lock_xsave(&shm_lock);
48973e1d3f3SJens Wiklander assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
49073e1d3f3SJens Wiklander assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
49173e1d3f3SJens Wiklander assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
492*00338334SJens Wiklander
493*00338334SJens Wiklander if (find_in_list(&shm_inactive_head, cmp_pa_overlap, (vaddr_t)mf) ||
494*00338334SJens Wiklander find_in_list(&shm_head, cmp_pa_overlap, (vaddr_t)mf)) {
495*00338334SJens Wiklander res = TEE_ERROR_BAD_PARAMETERS;
496*00338334SJens Wiklander goto out;
497*00338334SJens Wiklander }
498*00338334SJens Wiklander if (is_mobj_ffa_prm(&mf->mobj)) {
499*00338334SJens Wiklander res = protect_mem(to_mobj_ffa_prm(&mf->mobj));
500*00338334SJens Wiklander if (res)
501*00338334SJens Wiklander goto out;
502*00338334SJens Wiklander }
503*00338334SJens Wiklander
50473e1d3f3SJens Wiklander SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
505*00338334SJens Wiklander
506*00338334SJens Wiklander out:
50773e1d3f3SJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
50873e1d3f3SJens Wiklander
509*00338334SJens Wiklander return res;
51073e1d3f3SJens Wiklander }
51173e1d3f3SJens Wiklander
unmap_helper(struct mobj_ffa_shm * m)51246195e2fSJens Wiklander static void unmap_helper(struct mobj_ffa_shm *m)
51373e1d3f3SJens Wiklander {
51446195e2fSJens Wiklander if (m->mm) {
51546195e2fSJens Wiklander core_mmu_unmap_pages(tee_mm_get_smem(m->mm),
51646195e2fSJens Wiklander get_page_count(&m->mf));
51746195e2fSJens Wiklander tee_mm_free(m->mm);
51846195e2fSJens Wiklander m->mm = NULL;
51973e1d3f3SJens Wiklander }
52073e1d3f3SJens Wiklander }
52173e1d3f3SJens Wiklander
52273e1d3f3SJens Wiklander #ifdef CFG_CORE_SEL1_SPMC
mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)52373e1d3f3SJens Wiklander TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
52473e1d3f3SJens Wiklander {
52573e1d3f3SJens Wiklander TEE_Result res = TEE_SUCCESS;
52673e1d3f3SJens Wiklander struct mobj_ffa *mf = NULL;
52773e1d3f3SJens Wiklander uint32_t exceptions = 0;
52873e1d3f3SJens Wiklander
52973e1d3f3SJens Wiklander exceptions = cpu_spin_lock_xsave(&shm_lock);
53073e1d3f3SJens Wiklander mf = find_in_list(&shm_head, cmp_cookie, cookie);
53173e1d3f3SJens Wiklander /*
53273e1d3f3SJens Wiklander * If the mobj is found here it's still active and cannot be
53373e1d3f3SJens Wiklander * reclaimed.
53473e1d3f3SJens Wiklander */
53573e1d3f3SJens Wiklander if (mf) {
53673e1d3f3SJens Wiklander DMSG("cookie %#"PRIx64" busy refc %u",
53773e1d3f3SJens Wiklander cookie, refcount_val(&mf->mobj.refc));
53873e1d3f3SJens Wiklander res = TEE_ERROR_BUSY;
53973e1d3f3SJens Wiklander goto out;
54073e1d3f3SJens Wiklander }
54173e1d3f3SJens Wiklander
54273e1d3f3SJens Wiklander mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
54373e1d3f3SJens Wiklander if (!mf) {
54473e1d3f3SJens Wiklander res = TEE_ERROR_ITEM_NOT_FOUND;
54573e1d3f3SJens Wiklander goto out;
54673e1d3f3SJens Wiklander }
54773e1d3f3SJens Wiklander /*
54873e1d3f3SJens Wiklander * If the mobj has been registered via mobj_ffa_get_by_cookie()
54973e1d3f3SJens Wiklander * but not unregistered yet with mobj_ffa_unregister_by_cookie().
55073e1d3f3SJens Wiklander */
551287e68f4SJens Wiklander if (mf->inactive_refs) {
552287e68f4SJens Wiklander DMSG("cookie %#"PRIx64" busy inactive_refs %u",
553287e68f4SJens Wiklander cookie, mf->inactive_refs);
55473e1d3f3SJens Wiklander res = TEE_ERROR_BUSY;
55573e1d3f3SJens Wiklander goto out;
55673e1d3f3SJens Wiklander }
55773e1d3f3SJens Wiklander
55873e1d3f3SJens Wiklander if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
55973e1d3f3SJens Wiklander panic();
560*00338334SJens Wiklander if (is_mobj_ffa_prm(&mf->mobj))
561*00338334SJens Wiklander res = restore_mem(to_mobj_ffa_prm(&mf->mobj));
562*00338334SJens Wiklander else
56373e1d3f3SJens Wiklander res = TEE_SUCCESS;
56473e1d3f3SJens Wiklander out:
56573e1d3f3SJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
566a65dd3a6SJens Wiklander if (!res) {
56773e1d3f3SJens Wiklander mobj_ffa_sel1_spmc_delete(mf);
568a65dd3a6SJens Wiklander virt_remove_cookie(cookie);
569a65dd3a6SJens Wiklander }
57073e1d3f3SJens Wiklander return res;
57173e1d3f3SJens Wiklander }
57273e1d3f3SJens Wiklander #endif /*CFG_CORE_SEL1_SPMC*/
57373e1d3f3SJens Wiklander
mobj_ffa_unregister_by_cookie(uint64_t cookie)574fb19e98eSJens Wiklander TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
575fb19e98eSJens Wiklander {
576fb19e98eSJens Wiklander TEE_Result res = TEE_SUCCESS;
577fb19e98eSJens Wiklander struct mobj_ffa *mf = NULL;
578fb19e98eSJens Wiklander uint32_t exceptions = 0;
579fb19e98eSJens Wiklander
580fb19e98eSJens Wiklander assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
581fb19e98eSJens Wiklander exceptions = cpu_spin_lock_xsave(&shm_lock);
582fb19e98eSJens Wiklander mf = find_in_list(&shm_head, cmp_cookie, cookie);
583fb19e98eSJens Wiklander /*
584fb19e98eSJens Wiklander * If the mobj is found here it's still active and cannot be
585fb19e98eSJens Wiklander * unregistered.
586fb19e98eSJens Wiklander */
587fb19e98eSJens Wiklander if (mf) {
588287e68f4SJens Wiklander EMSG("cookie %#"PRIx64" busy refc %u:%u",
589287e68f4SJens Wiklander cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
590fb19e98eSJens Wiklander res = TEE_ERROR_BUSY;
591fb19e98eSJens Wiklander goto out;
592fb19e98eSJens Wiklander }
593fb19e98eSJens Wiklander mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
594fb19e98eSJens Wiklander /*
595fb19e98eSJens Wiklander * If the mobj isn't found or if it already has been unregistered.
596fb19e98eSJens Wiklander */
597287e68f4SJens Wiklander if (!mf) {
598287e68f4SJens Wiklander EMSG("cookie %#"PRIx64" not found", cookie);
599fb19e98eSJens Wiklander res = TEE_ERROR_ITEM_NOT_FOUND;
600fb19e98eSJens Wiklander goto out;
601fb19e98eSJens Wiklander }
602287e68f4SJens Wiklander #if defined(CFG_CORE_SEL1_SPMC)
603287e68f4SJens Wiklander if (!mf->registered_by_cookie) {
604bf2b1c94SJens Wiklander /*
605bf2b1c94SJens Wiklander * This is expected behaviour if the normal world has
606bf2b1c94SJens Wiklander * registered the memory but OP-TEE has not yet used the
607bf2b1c94SJens Wiklander * corresponding cookie with mobj_ffa_get_by_cookie(). It
608bf2b1c94SJens Wiklander * can be non-trivial for the normal world to predict if
609bf2b1c94SJens Wiklander * the cookie really has been used or not. So even if we
610bf2b1c94SJens Wiklander * return it as an error it will be ignored by
611bf2b1c94SJens Wiklander * handle_unregister_shm().
612bf2b1c94SJens Wiklander */
613bf2b1c94SJens Wiklander EMSG("cookie %#"PRIx64" not registered refs %u:%u",
614bf2b1c94SJens Wiklander cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
615e26b8354SJens Wiklander res = TEE_ERROR_ITEM_NOT_FOUND;
616e26b8354SJens Wiklander goto out;
617e26b8354SJens Wiklander }
618287e68f4SJens Wiklander assert(mf->inactive_refs);
619287e68f4SJens Wiklander mf->inactive_refs--;
620287e68f4SJens Wiklander mf->registered_by_cookie = false;
621287e68f4SJens Wiklander #else
622287e68f4SJens Wiklander if (mf->inactive_refs) {
623287e68f4SJens Wiklander EMSG("cookie %#"PRIx64" busy refc %u:%u",
624287e68f4SJens Wiklander cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
625287e68f4SJens Wiklander res = TEE_ERROR_BUSY;
626287e68f4SJens Wiklander goto out;
627287e68f4SJens Wiklander }
628e26b8354SJens Wiklander mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
629e26b8354SJens Wiklander mobj_ffa_spmc_delete(mf);
630e26b8354SJens Wiklander thread_spmc_relinquish(cookie);
631fb19e98eSJens Wiklander #endif
632fb19e98eSJens Wiklander res = TEE_SUCCESS;
633fb19e98eSJens Wiklander
634fb19e98eSJens Wiklander out:
635fb19e98eSJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
636fb19e98eSJens Wiklander return res;
637fb19e98eSJens Wiklander }
638fb19e98eSJens Wiklander
mobj_ffa_get_by_cookie(uint64_t cookie,unsigned int internal_offs)639fb19e98eSJens Wiklander struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
640fb19e98eSJens Wiklander unsigned int internal_offs)
64173e1d3f3SJens Wiklander {
64246195e2fSJens Wiklander struct mobj_ffa_shm *mfs = NULL;
64373e1d3f3SJens Wiklander struct mobj_ffa *mf = NULL;
64473e1d3f3SJens Wiklander uint32_t exceptions = 0;
64546195e2fSJens Wiklander uint16_t offs = 0;
64673e1d3f3SJens Wiklander
64773e1d3f3SJens Wiklander if (internal_offs >= SMALL_PAGE_SIZE)
64873e1d3f3SJens Wiklander return NULL;
64973e1d3f3SJens Wiklander exceptions = cpu_spin_lock_xsave(&shm_lock);
65073e1d3f3SJens Wiklander mf = find_in_list(&shm_head, cmp_cookie, cookie);
65173e1d3f3SJens Wiklander if (mf) {
652*00338334SJens Wiklander if (is_mobj_ffa_shm(&mf->mobj))
653*00338334SJens Wiklander offs = to_mobj_ffa_shm(&mf->mobj)->page_offset;
654*00338334SJens Wiklander else
655*00338334SJens Wiklander offs = 0;
65646195e2fSJens Wiklander if (offs == internal_offs) {
65773e1d3f3SJens Wiklander if (!refcount_inc(&mf->mobj.refc)) {
65873e1d3f3SJens Wiklander /*
65973e1d3f3SJens Wiklander * If refcount is 0 some other thread has
66073e1d3f3SJens Wiklander * called mobj_put() on this reached 0 and
66146195e2fSJens Wiklander * before ffa_shm_inactivate() got the lock
66246195e2fSJens Wiklander * we found it. Let's reinitialize it.
66373e1d3f3SJens Wiklander */
66473e1d3f3SJens Wiklander refcount_set(&mf->mobj.refc, 1);
665287e68f4SJens Wiklander mf->inactive_refs++;
66673e1d3f3SJens Wiklander }
667287e68f4SJens Wiklander DMSG("cookie %#"PRIx64" active: refc %u:%u",
668287e68f4SJens Wiklander cookie, refcount_val(&mf->mobj.refc),
669287e68f4SJens Wiklander mf->inactive_refs);
67073e1d3f3SJens Wiklander } else {
67173e1d3f3SJens Wiklander EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
67246195e2fSJens Wiklander cookie, offs, internal_offs);
67373e1d3f3SJens Wiklander mf = NULL;
67473e1d3f3SJens Wiklander }
67573e1d3f3SJens Wiklander } else {
67673e1d3f3SJens Wiklander mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
677e26b8354SJens Wiklander #if !defined(CFG_CORE_SEL1_SPMC)
678fb19e98eSJens Wiklander /* Try to retrieve it from the SPM at S-EL2 */
67973e1d3f3SJens Wiklander if (mf) {
680fb19e98eSJens Wiklander DMSG("cookie %#"PRIx64" resurrecting", cookie);
681fb19e98eSJens Wiklander } else {
682*00338334SJens Wiklander enum mobj_use_case uc = MOBJ_USE_CASE_NS_SHM;
683*00338334SJens Wiklander
684287e68f4SJens Wiklander DMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
685fb19e98eSJens Wiklander cookie);
686*00338334SJens Wiklander mf = thread_spmc_populate_mobj_from_rx(cookie, uc);
687fb19e98eSJens Wiklander }
688fb19e98eSJens Wiklander #endif
689fb19e98eSJens Wiklander if (mf) {
690fb19e98eSJens Wiklander #if defined(CFG_CORE_SEL1_SPMC)
691287e68f4SJens Wiklander if (!mf->registered_by_cookie) {
692287e68f4SJens Wiklander mf->inactive_refs++;
69373e1d3f3SJens Wiklander mf->registered_by_cookie = true;
694287e68f4SJens Wiklander }
695fb19e98eSJens Wiklander #endif
69673e1d3f3SJens Wiklander assert(refcount_val(&mf->mobj.refc) == 0);
69773e1d3f3SJens Wiklander refcount_set(&mf->mobj.refc, 1);
698287e68f4SJens Wiklander mf->inactive_refs++;
699*00338334SJens Wiklander if (is_mobj_ffa_shm(&mf->mobj)) {
70046195e2fSJens Wiklander mfs = to_mobj_ffa_shm(&mf->mobj);
70146195e2fSJens Wiklander refcount_set(&mfs->mapcount, 0);
70260d883c8SJens Wiklander
70360d883c8SJens Wiklander /*
704*00338334SJens Wiklander * mfs->page_offset is offset into the
705*00338334SJens Wiklander * first page. This offset is assigned
706*00338334SJens Wiklander * from the internal_offs parameter to this
707*00338334SJens Wiklander * function.
70860d883c8SJens Wiklander *
709*00338334SJens Wiklander * While a mobj_ffa is active (ref_count >
710*00338334SJens Wiklander * 0) this will not change, but when being
711*00338334SJens Wiklander * pushed to the inactive list it can be
712*00338334SJens Wiklander * changed again.
71360d883c8SJens Wiklander *
71460d883c8SJens Wiklander * So below we're backing out the old
715*00338334SJens Wiklander * mfs->page_offset and then assigning a
716*00338334SJens Wiklander * new from internal_offset.
71760d883c8SJens Wiklander */
71846195e2fSJens Wiklander mf->mobj.size += mfs->page_offset;
71973e1d3f3SJens Wiklander assert(!(mf->mobj.size & SMALL_PAGE_MASK));
72073e1d3f3SJens Wiklander mf->mobj.size -= internal_offs;
72146195e2fSJens Wiklander mfs->page_offset = internal_offs;
722*00338334SJens Wiklander } else if (is_mobj_ffa_prm(&mf->mobj) &&
723*00338334SJens Wiklander internal_offs) {
724*00338334SJens Wiklander mf = NULL;
725*00338334SJens Wiklander }
72660d883c8SJens Wiklander
72773e1d3f3SJens Wiklander SLIST_INSERT_HEAD(&shm_head, mf, link);
72873e1d3f3SJens Wiklander }
72973e1d3f3SJens Wiklander }
73073e1d3f3SJens Wiklander
73173e1d3f3SJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
73273e1d3f3SJens Wiklander
73373e1d3f3SJens Wiklander if (!mf) {
73473e1d3f3SJens Wiklander EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
73573e1d3f3SJens Wiklander cookie, internal_offs);
73673e1d3f3SJens Wiklander return NULL;
73773e1d3f3SJens Wiklander }
73873e1d3f3SJens Wiklander return &mf->mobj;
73973e1d3f3SJens Wiklander }
74073e1d3f3SJens Wiklander
ffa_shm_get_pa(struct mobj * mobj,size_t offset,size_t granule,paddr_t * pa)74146195e2fSJens Wiklander static TEE_Result ffa_shm_get_pa(struct mobj *mobj, size_t offset,
74273e1d3f3SJens Wiklander size_t granule, paddr_t *pa)
74373e1d3f3SJens Wiklander {
74446195e2fSJens Wiklander struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
74573e1d3f3SJens Wiklander size_t full_offset = 0;
74673e1d3f3SJens Wiklander paddr_t p = 0;
74773e1d3f3SJens Wiklander
74873e1d3f3SJens Wiklander if (!pa)
74973e1d3f3SJens Wiklander return TEE_ERROR_GENERIC;
75073e1d3f3SJens Wiklander
75173e1d3f3SJens Wiklander if (offset >= mobj->size)
75273e1d3f3SJens Wiklander return TEE_ERROR_GENERIC;
75373e1d3f3SJens Wiklander
75446195e2fSJens Wiklander full_offset = offset + m->page_offset;
75573e1d3f3SJens Wiklander switch (granule) {
75673e1d3f3SJens Wiklander case 0:
75746195e2fSJens Wiklander p = m->pages[full_offset / SMALL_PAGE_SIZE] +
75873e1d3f3SJens Wiklander (full_offset & SMALL_PAGE_MASK);
75973e1d3f3SJens Wiklander break;
76073e1d3f3SJens Wiklander case SMALL_PAGE_SIZE:
76146195e2fSJens Wiklander p = m->pages[full_offset / SMALL_PAGE_SIZE];
76273e1d3f3SJens Wiklander break;
76373e1d3f3SJens Wiklander default:
76473e1d3f3SJens Wiklander return TEE_ERROR_GENERIC;
76573e1d3f3SJens Wiklander }
76673e1d3f3SJens Wiklander *pa = p;
76773e1d3f3SJens Wiklander
76873e1d3f3SJens Wiklander return TEE_SUCCESS;
76973e1d3f3SJens Wiklander }
77073e1d3f3SJens Wiklander
ffa_shm_get_phys_offs(struct mobj * mobj,size_t granule __maybe_unused)77146195e2fSJens Wiklander static size_t ffa_shm_get_phys_offs(struct mobj *mobj,
77273e1d3f3SJens Wiklander size_t granule __maybe_unused)
77373e1d3f3SJens Wiklander {
77473e1d3f3SJens Wiklander assert(granule >= mobj->phys_granule);
77573e1d3f3SJens Wiklander
77646195e2fSJens Wiklander return to_mobj_ffa_shm(mobj)->page_offset;
77773e1d3f3SJens Wiklander }
77873e1d3f3SJens Wiklander
ffa_shm_get_va(struct mobj * mobj,size_t offset,size_t len)77946195e2fSJens Wiklander static void *ffa_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
78073e1d3f3SJens Wiklander {
78146195e2fSJens Wiklander struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
78273e1d3f3SJens Wiklander
78346195e2fSJens Wiklander if (!m->mm || !mobj_check_offset_and_len(mobj, offset, len))
78473e1d3f3SJens Wiklander return NULL;
78573e1d3f3SJens Wiklander
78646195e2fSJens Wiklander return (void *)(tee_mm_get_smem(m->mm) + offset + m->page_offset);
78773e1d3f3SJens Wiklander }
78873e1d3f3SJens Wiklander
ffa_inactivate(struct mobj_ffa * mf)789*00338334SJens Wiklander static void ffa_inactivate(struct mobj_ffa *mf)
79073e1d3f3SJens Wiklander {
79173e1d3f3SJens Wiklander uint32_t exceptions = 0;
79273e1d3f3SJens Wiklander
79373e1d3f3SJens Wiklander exceptions = cpu_spin_lock_xsave(&shm_lock);
79473e1d3f3SJens Wiklander /*
79573e1d3f3SJens Wiklander * If refcount isn't 0 some other thread has found this mobj in
79673e1d3f3SJens Wiklander * shm_head after the mobj_put() that put us here and before we got
79773e1d3f3SJens Wiklander * the lock.
79873e1d3f3SJens Wiklander */
799*00338334SJens Wiklander if (refcount_val(&mf->mobj.refc)) {
800*00338334SJens Wiklander DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
80173e1d3f3SJens Wiklander goto out;
80273e1d3f3SJens Wiklander }
80373e1d3f3SJens Wiklander
804287e68f4SJens Wiklander /*
805287e68f4SJens Wiklander * pop_from_list() can fail to find the mobj if we had just
806287e68f4SJens Wiklander * decreased the refcount to 0 in mobj_put() and was going to
807287e68f4SJens Wiklander * acquire the shm_lock but another thread found this mobj and
808287e68f4SJens Wiklander * reinitialized the refcount to 1. Then before we got cpu time the
809287e68f4SJens Wiklander * other thread called mobj_put() and deactivated the mobj again.
810287e68f4SJens Wiklander *
811287e68f4SJens Wiklander * However, we still have the inactive count that guarantees
812287e68f4SJens Wiklander * that the mobj can't be freed until it reaches 0.
813287e68f4SJens Wiklander * At this point the mobj is in the inactive list.
814287e68f4SJens Wiklander */
815*00338334SJens Wiklander if (pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf)) {
816*00338334SJens Wiklander if (is_mobj_ffa_shm(&mf->mobj))
817*00338334SJens Wiklander unmap_helper(to_mobj_ffa_shm(&mf->mobj));
818*00338334SJens Wiklander SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
819287e68f4SJens Wiklander }
82073e1d3f3SJens Wiklander out:
821*00338334SJens Wiklander if (!mf->inactive_refs)
822287e68f4SJens Wiklander panic();
823*00338334SJens Wiklander mf->inactive_refs--;
82473e1d3f3SJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
82573e1d3f3SJens Wiklander }
82673e1d3f3SJens Wiklander
ffa_shm_inactivate(struct mobj * mobj)827*00338334SJens Wiklander static void ffa_shm_inactivate(struct mobj *mobj)
828*00338334SJens Wiklander {
829*00338334SJens Wiklander ffa_inactivate(&to_mobj_ffa_shm(mobj)->mf);
830*00338334SJens Wiklander }
831*00338334SJens Wiklander
ffa_shm_get_mem_type(struct mobj * mobj __unused,uint32_t * mt)83246195e2fSJens Wiklander static TEE_Result ffa_shm_get_mem_type(struct mobj *mobj __unused, uint32_t *mt)
83373e1d3f3SJens Wiklander {
8348afe7a7cSJens Wiklander if (!mt)
83573e1d3f3SJens Wiklander return TEE_ERROR_GENERIC;
83673e1d3f3SJens Wiklander
8378afe7a7cSJens Wiklander *mt = TEE_MATTR_MEM_TYPE_CACHED;
83873e1d3f3SJens Wiklander
83973e1d3f3SJens Wiklander return TEE_SUCCESS;
84073e1d3f3SJens Wiklander }
84173e1d3f3SJens Wiklander
ffa_shm_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)84246195e2fSJens Wiklander static bool ffa_shm_matches(struct mobj *mobj __maybe_unused,
84346195e2fSJens Wiklander enum buf_is_attr attr)
84473e1d3f3SJens Wiklander {
84546195e2fSJens Wiklander assert(is_mobj_ffa_shm(mobj));
84673e1d3f3SJens Wiklander
84773e1d3f3SJens Wiklander return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
84873e1d3f3SJens Wiklander }
84973e1d3f3SJens Wiklander
ffa_shm_get_cookie(struct mobj * mobj)85046195e2fSJens Wiklander static uint64_t ffa_shm_get_cookie(struct mobj *mobj)
85173e1d3f3SJens Wiklander {
85246195e2fSJens Wiklander return to_mobj_ffa_shm(mobj)->mf.cookie;
85373e1d3f3SJens Wiklander }
85473e1d3f3SJens Wiklander
ffa_shm_inc_map(struct mobj * mobj)85546195e2fSJens Wiklander static TEE_Result ffa_shm_inc_map(struct mobj *mobj)
85673e1d3f3SJens Wiklander {
85746195e2fSJens Wiklander struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
85873e1d3f3SJens Wiklander TEE_Result res = TEE_SUCCESS;
8593d02add2SJens Wiklander uint32_t exceptions = 0;
8603d02add2SJens Wiklander size_t sz = 0;
86173e1d3f3SJens Wiklander
8623d02add2SJens Wiklander while (true) {
86346195e2fSJens Wiklander if (refcount_inc(&m->mapcount))
86473e1d3f3SJens Wiklander return TEE_SUCCESS;
86573e1d3f3SJens Wiklander
86673e1d3f3SJens Wiklander exceptions = cpu_spin_lock_xsave(&shm_lock);
86773e1d3f3SJens Wiklander
86846195e2fSJens Wiklander if (!refcount_val(&m->mapcount))
8693d02add2SJens Wiklander break; /* continue to reinitialize */
8703d02add2SJens Wiklander /*
8713d02add2SJens Wiklander * If another thread beat us to initialize mapcount,
8723d02add2SJens Wiklander * restart to make sure we still increase it.
8733d02add2SJens Wiklander */
8743d02add2SJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
8753d02add2SJens Wiklander }
87673e1d3f3SJens Wiklander
8773d02add2SJens Wiklander /*
8783d02add2SJens Wiklander * If we have beated another thread calling ffa_dec_map()
8793d02add2SJens Wiklander * to get the lock we need only to reinitialize mapcount to 1.
8803d02add2SJens Wiklander */
88146195e2fSJens Wiklander if (!m->mm) {
88246195e2fSJens Wiklander sz = ROUNDUP(mobj->size + m->page_offset, SMALL_PAGE_SIZE);
88346195e2fSJens Wiklander m->mm = tee_mm_alloc(&core_virt_shm_pool, sz);
88446195e2fSJens Wiklander if (!m->mm) {
88573e1d3f3SJens Wiklander res = TEE_ERROR_OUT_OF_MEMORY;
88673e1d3f3SJens Wiklander goto out;
88773e1d3f3SJens Wiklander }
88873e1d3f3SJens Wiklander
88946195e2fSJens Wiklander res = core_mmu_map_pages(tee_mm_get_smem(m->mm), m->pages,
8903d02add2SJens Wiklander sz / SMALL_PAGE_SIZE,
8913d02add2SJens Wiklander MEM_AREA_NSEC_SHM);
89273e1d3f3SJens Wiklander if (res) {
89346195e2fSJens Wiklander tee_mm_free(m->mm);
89446195e2fSJens Wiklander m->mm = NULL;
89573e1d3f3SJens Wiklander goto out;
89673e1d3f3SJens Wiklander }
8973d02add2SJens Wiklander }
89873e1d3f3SJens Wiklander
89946195e2fSJens Wiklander refcount_set(&m->mapcount, 1);
90073e1d3f3SJens Wiklander out:
90173e1d3f3SJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
90273e1d3f3SJens Wiklander
90373e1d3f3SJens Wiklander return res;
90473e1d3f3SJens Wiklander }
90573e1d3f3SJens Wiklander
ffa_shm_dec_map(struct mobj * mobj)90646195e2fSJens Wiklander static TEE_Result ffa_shm_dec_map(struct mobj *mobj)
90773e1d3f3SJens Wiklander {
90846195e2fSJens Wiklander struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
90973e1d3f3SJens Wiklander uint32_t exceptions = 0;
91073e1d3f3SJens Wiklander
91146195e2fSJens Wiklander if (!refcount_dec(&m->mapcount))
91273e1d3f3SJens Wiklander return TEE_SUCCESS;
91373e1d3f3SJens Wiklander
91473e1d3f3SJens Wiklander exceptions = cpu_spin_lock_xsave(&shm_lock);
91546195e2fSJens Wiklander if (!refcount_val(&m->mapcount))
91646195e2fSJens Wiklander unmap_helper(m);
91773e1d3f3SJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
91873e1d3f3SJens Wiklander
91973e1d3f3SJens Wiklander return TEE_SUCCESS;
92073e1d3f3SJens Wiklander }
92173e1d3f3SJens Wiklander
mapped_shm_init(void)92273e1d3f3SJens Wiklander static TEE_Result mapped_shm_init(void)
92373e1d3f3SJens Wiklander {
92473e1d3f3SJens Wiklander vaddr_t pool_start = 0;
92573e1d3f3SJens Wiklander vaddr_t pool_end = 0;
92673e1d3f3SJens Wiklander
92773e1d3f3SJens Wiklander core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
92873e1d3f3SJens Wiklander if (!pool_start || !pool_end)
92973e1d3f3SJens Wiklander panic("Can't find region for shmem pool");
93073e1d3f3SJens Wiklander
931fdf696b7SJens Wiklander if (!tee_mm_init(&core_virt_shm_pool, pool_start, pool_end - pool_start,
932d9f0ee43Sjames.jiang SMALL_PAGE_SHIFT,
93373e1d3f3SJens Wiklander TEE_MM_POOL_NO_FLAGS))
93473e1d3f3SJens Wiklander panic("Could not create shmem pool");
93573e1d3f3SJens Wiklander
93673e1d3f3SJens Wiklander DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
93773e1d3f3SJens Wiklander pool_start, pool_end);
93873e1d3f3SJens Wiklander return TEE_SUCCESS;
93973e1d3f3SJens Wiklander }
94073e1d3f3SJens Wiklander
94146195e2fSJens Wiklander static const struct mobj_ops mobj_ffa_shm_ops = {
94246195e2fSJens Wiklander .get_pa = ffa_shm_get_pa,
94346195e2fSJens Wiklander .get_phys_offs = ffa_shm_get_phys_offs,
94446195e2fSJens Wiklander .get_va = ffa_shm_get_va,
94546195e2fSJens Wiklander .get_mem_type = ffa_shm_get_mem_type,
94646195e2fSJens Wiklander .matches = ffa_shm_matches,
94746195e2fSJens Wiklander .free = ffa_shm_inactivate,
94846195e2fSJens Wiklander .get_cookie = ffa_shm_get_cookie,
94946195e2fSJens Wiklander .inc_map = ffa_shm_inc_map,
95046195e2fSJens Wiklander .dec_map = ffa_shm_dec_map,
951af5e7dc7SJens Wiklander };
952af5e7dc7SJens Wiklander
9538f97fe77SJens Wiklander preinit(mapped_shm_init);
954*00338334SJens Wiklander
955*00338334SJens Wiklander #ifdef CFG_CORE_DYN_PROTMEM
ffa_prm_get_pa(struct mobj * mobj,size_t offset,size_t granule,paddr_t * pa)956*00338334SJens Wiklander static TEE_Result ffa_prm_get_pa(struct mobj *mobj, size_t offset,
957*00338334SJens Wiklander size_t granule, paddr_t *pa)
958*00338334SJens Wiklander {
959*00338334SJens Wiklander struct mobj_ffa_prm *m = to_mobj_ffa_prm(mobj);
960*00338334SJens Wiklander paddr_t p;
961*00338334SJens Wiklander
962*00338334SJens Wiklander if (!pa || offset >= mobj->size)
963*00338334SJens Wiklander return TEE_ERROR_GENERIC;
964*00338334SJens Wiklander
965*00338334SJens Wiklander p = m->pa + offset;
966*00338334SJens Wiklander
967*00338334SJens Wiklander if (granule) {
968*00338334SJens Wiklander if (granule != SMALL_PAGE_SIZE &&
969*00338334SJens Wiklander granule != CORE_MMU_PGDIR_SIZE)
970*00338334SJens Wiklander return TEE_ERROR_GENERIC;
971*00338334SJens Wiklander p &= ~(granule - 1);
972*00338334SJens Wiklander }
973*00338334SJens Wiklander
974*00338334SJens Wiklander *pa = p;
975*00338334SJens Wiklander return TEE_SUCCESS;
976*00338334SJens Wiklander }
977*00338334SJens Wiklander
ffa_prm_get_mem_type(struct mobj * mobj __maybe_unused,uint32_t * mt)978*00338334SJens Wiklander static TEE_Result ffa_prm_get_mem_type(struct mobj *mobj __maybe_unused,
979*00338334SJens Wiklander uint32_t *mt)
980*00338334SJens Wiklander {
981*00338334SJens Wiklander assert(is_mobj_ffa_prm(mobj));
982*00338334SJens Wiklander
983*00338334SJens Wiklander if (!mt)
984*00338334SJens Wiklander return TEE_ERROR_GENERIC;
985*00338334SJens Wiklander
986*00338334SJens Wiklander *mt = TEE_MATTR_MEM_TYPE_CACHED;
987*00338334SJens Wiklander
988*00338334SJens Wiklander return TEE_SUCCESS;
989*00338334SJens Wiklander }
990*00338334SJens Wiklander
ffa_prm_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)991*00338334SJens Wiklander static bool ffa_prm_matches(struct mobj *mobj __maybe_unused,
992*00338334SJens Wiklander enum buf_is_attr attr)
993*00338334SJens Wiklander {
994*00338334SJens Wiklander assert(is_mobj_ffa_prm(mobj));
995*00338334SJens Wiklander
996*00338334SJens Wiklander return attr == CORE_MEM_SEC || attr == CORE_MEM_SDP_MEM;
997*00338334SJens Wiklander }
998*00338334SJens Wiklander
ffa_prm_inactivate(struct mobj * mobj)999*00338334SJens Wiklander static void ffa_prm_inactivate(struct mobj *mobj)
1000*00338334SJens Wiklander {
1001*00338334SJens Wiklander ffa_inactivate(&to_mobj_ffa_prm(mobj)->mf);
1002*00338334SJens Wiklander }
1003*00338334SJens Wiklander
ffa_prm_get_cookie(struct mobj * mobj)1004*00338334SJens Wiklander static uint64_t ffa_prm_get_cookie(struct mobj *mobj)
1005*00338334SJens Wiklander {
1006*00338334SJens Wiklander return to_mobj_ffa_prm(mobj)->mf.cookie;
1007*00338334SJens Wiklander }
1008*00338334SJens Wiklander
ffa_prm_no_map(struct mobj * mobj __maybe_unused)1009*00338334SJens Wiklander static TEE_Result ffa_prm_no_map(struct mobj *mobj __maybe_unused)
1010*00338334SJens Wiklander {
1011*00338334SJens Wiklander assert(is_mobj_ffa_prm(mobj));
1012*00338334SJens Wiklander
1013*00338334SJens Wiklander return TEE_ERROR_GENERIC;
1014*00338334SJens Wiklander }
1015*00338334SJens Wiklander
1016*00338334SJens Wiklander static const struct mobj_ops mobj_ffa_prm_ops = {
1017*00338334SJens Wiklander .get_pa = ffa_prm_get_pa,
1018*00338334SJens Wiklander .get_mem_type = ffa_prm_get_mem_type,
1019*00338334SJens Wiklander .matches = ffa_prm_matches,
1020*00338334SJens Wiklander .free = ffa_prm_inactivate,
1021*00338334SJens Wiklander .get_cookie = ffa_prm_get_cookie,
1022*00338334SJens Wiklander .inc_map = ffa_prm_no_map,
1023*00338334SJens Wiklander .dec_map = ffa_prm_no_map,
1024*00338334SJens Wiklander };
1025*00338334SJens Wiklander
cmp_protmem_pa(struct mobj_ffa * mf,uint64_t pa)1026*00338334SJens Wiklander static bool cmp_protmem_pa(struct mobj_ffa *mf, uint64_t pa)
1027*00338334SJens Wiklander {
1028*00338334SJens Wiklander struct mobj_ffa_prm *m = NULL;
1029*00338334SJens Wiklander
1030*00338334SJens Wiklander if (!is_mobj_ffa_prm(&mf->mobj))
1031*00338334SJens Wiklander return false;
1032*00338334SJens Wiklander
1033*00338334SJens Wiklander m = to_mobj_ffa_prm(&mf->mobj);
1034*00338334SJens Wiklander return pa >= m->pa && pa < m->pa + m->mf.mobj.size;
1035*00338334SJens Wiklander }
1036*00338334SJens Wiklander
mobj_ffa_protmem_get_by_pa(paddr_t pa,paddr_size_t size)1037*00338334SJens Wiklander struct mobj *mobj_ffa_protmem_get_by_pa(paddr_t pa, paddr_size_t size)
1038*00338334SJens Wiklander {
1039*00338334SJens Wiklander struct mobj_ffa_prm *m = NULL;
1040*00338334SJens Wiklander struct mobj_ffa *mf = NULL;
1041*00338334SJens Wiklander struct mobj *mobj = NULL;
1042*00338334SJens Wiklander uint32_t exceptions = 0;
1043*00338334SJens Wiklander
1044*00338334SJens Wiklander if (!size)
1045*00338334SJens Wiklander size = 1;
1046*00338334SJens Wiklander
1047*00338334SJens Wiklander exceptions = cpu_spin_lock_xsave(&shm_lock);
1048*00338334SJens Wiklander
1049*00338334SJens Wiklander mf = find_in_list(&shm_head, cmp_protmem_pa, pa);
1050*00338334SJens Wiklander if (mf) {
1051*00338334SJens Wiklander m = to_mobj_ffa_prm(&mf->mobj);
1052*00338334SJens Wiklander if (core_is_buffer_inside(pa, size, m->pa, m->mf.mobj.size))
1053*00338334SJens Wiklander mobj = mobj_get(&mf->mobj);
1054*00338334SJens Wiklander }
1055*00338334SJens Wiklander
1056*00338334SJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
1057*00338334SJens Wiklander return mobj;
1058*00338334SJens Wiklander }
1059*00338334SJens Wiklander
mobj_ffa_assign_protmem(uint64_t cookie,enum mobj_use_case use_case)1060*00338334SJens Wiklander TEE_Result mobj_ffa_assign_protmem(uint64_t cookie, enum mobj_use_case use_case)
1061*00338334SJens Wiklander {
1062*00338334SJens Wiklander TEE_Result res = TEE_SUCCESS;
1063*00338334SJens Wiklander struct mobj_ffa_prm *m = NULL;
1064*00338334SJens Wiklander struct mobj_ffa *mf = NULL;
1065*00338334SJens Wiklander uint32_t exceptions = 0;
1066*00338334SJens Wiklander
1067*00338334SJens Wiklander exceptions = cpu_spin_lock_xsave(&shm_lock);
1068*00338334SJens Wiklander mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
1069*00338334SJens Wiklander if (mf) {
1070*00338334SJens Wiklander if (!is_mobj_ffa_prm(&mf->mobj)) {
1071*00338334SJens Wiklander res = TEE_ERROR_ITEM_NOT_FOUND;
1072*00338334SJens Wiklander goto out;
1073*00338334SJens Wiklander }
1074*00338334SJens Wiklander m = to_mobj_ffa_prm(&mf->mobj);
1075*00338334SJens Wiklander if (m->assigned_use_case) {
1076*00338334SJens Wiklander res = TEE_ERROR_BUSY;
1077*00338334SJens Wiklander goto out;
1078*00338334SJens Wiklander }
1079*00338334SJens Wiklander if (m->use_case != use_case) {
1080*00338334SJens Wiklander res = TEE_ERROR_BAD_PARAMETERS;
1081*00338334SJens Wiklander goto out;
1082*00338334SJens Wiklander }
1083*00338334SJens Wiklander m->assigned_use_case = true;
1084*00338334SJens Wiklander goto out;
1085*00338334SJens Wiklander }
1086*00338334SJens Wiklander mf = find_in_list(&shm_head, cmp_cookie, cookie);
1087*00338334SJens Wiklander if (mf) {
1088*00338334SJens Wiklander if (!is_mobj_ffa_prm(&mf->mobj))
1089*00338334SJens Wiklander res = TEE_ERROR_BUSY;
1090*00338334SJens Wiklander else
1091*00338334SJens Wiklander res = TEE_ERROR_ITEM_NOT_FOUND;
1092*00338334SJens Wiklander goto out;
1093*00338334SJens Wiklander }
1094*00338334SJens Wiklander #if !defined(CFG_CORE_SEL1_SPMC)
1095*00338334SJens Wiklander /* Try to retrieve it from the SPM at S-EL2 */
1096*00338334SJens Wiklander DMSG("Populating mobj from rx buffer, cookie %#"PRIx64" use-case %d",
1097*00338334SJens Wiklander cookie, use_case);
1098*00338334SJens Wiklander mf = thread_spmc_populate_mobj_from_rx(cookie, use_case);
1099*00338334SJens Wiklander if (mf) {
1100*00338334SJens Wiklander SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
1101*00338334SJens Wiklander } else {
1102*00338334SJens Wiklander EMSG("Failed to assign use-case %d to cookie %#"PRIx64"",
1103*00338334SJens Wiklander use_case, cookie);
1104*00338334SJens Wiklander res = TEE_ERROR_ITEM_NOT_FOUND;
1105*00338334SJens Wiklander goto out;
1106*00338334SJens Wiklander }
1107*00338334SJens Wiklander #endif
1108*00338334SJens Wiklander out:
1109*00338334SJens Wiklander cpu_spin_unlock_xrestore(&shm_lock, exceptions);
1110*00338334SJens Wiklander return res;
1111*00338334SJens Wiklander }
1112*00338334SJens Wiklander #endif /*CFG_CORE_DYN_PROTMEM*/
1113