15c1143a8SJelle Sels // SPDX-License-Identifier: BSD-2-Clause
25c1143a8SJelle Sels /*
36f3a5646SJelle Sels * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
45c1143a8SJelle Sels */
55c1143a8SJelle Sels #include <assert.h>
65c1143a8SJelle Sels #include <bitstring.h>
75c1143a8SJelle Sels #include <ffa.h>
85c1143a8SJelle Sels #include <kernel/spinlock.h>
95c1143a8SJelle Sels #include <mm/mobj.h>
105c1143a8SJelle Sels #include <mm/sp_mem.h>
115c1143a8SJelle Sels
125c1143a8SJelle Sels #define NUM_SHARES 64
135c1143a8SJelle Sels
145c1143a8SJelle Sels static bitstr_t bit_decl(share_bits, NUM_SHARES);
155c1143a8SJelle Sels static unsigned int sp_mem_lock = SPINLOCK_UNLOCK;
165c1143a8SJelle Sels
175c1143a8SJelle Sels /* mem_shares stores all active FF-A shares. */
185c1143a8SJelle Sels SLIST_HEAD(sp_mem_head, sp_mem);
195c1143a8SJelle Sels static struct sp_mem_head mem_shares = SLIST_HEAD_INITIALIZER(sp_mem_head);
20593b94eeSJens Wiklander static const struct mobj_ops mobj_sp_ops;
216a1b230cSJelle Sels
226a1b230cSJelle Sels struct mobj_sp {
236a1b230cSJelle Sels struct mobj mobj;
246f3a5646SJelle Sels uint32_t mem_type;
25036559a5SJelle Sels bool is_secure;
266a1b230cSJelle Sels paddr_t pages[];
276a1b230cSJelle Sels };
286a1b230cSJelle Sels
to_mobj_sp(struct mobj * mobj)296a1b230cSJelle Sels static struct mobj_sp *to_mobj_sp(struct mobj *mobj)
306a1b230cSJelle Sels {
316a1b230cSJelle Sels assert(mobj->ops == &mobj_sp_ops);
326a1b230cSJelle Sels return container_of(mobj, struct mobj_sp, mobj);
336a1b230cSJelle Sels }
346a1b230cSJelle Sels
mobj_sp_size(size_t num_pages)356a1b230cSJelle Sels static size_t mobj_sp_size(size_t num_pages)
366a1b230cSJelle Sels {
376a1b230cSJelle Sels size_t s = 0;
386a1b230cSJelle Sels
396a1b230cSJelle Sels if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
406a1b230cSJelle Sels return 0;
416a1b230cSJelle Sels if (ADD_OVERFLOW(sizeof(struct mobj_sp), s, &s))
426a1b230cSJelle Sels return 0;
436a1b230cSJelle Sels return s;
446a1b230cSJelle Sels }
456a1b230cSJelle Sels
sp_mem_new_mobj(uint64_t pages,uint32_t mem_type,bool is_secure)46c0bb2059SBalint Dobszay struct mobj *sp_mem_new_mobj(uint64_t pages, uint32_t mem_type, bool is_secure)
476a1b230cSJelle Sels {
486a1b230cSJelle Sels struct mobj_sp *m = NULL;
496a1b230cSJelle Sels size_t s = 0;
506a1b230cSJelle Sels
516a1b230cSJelle Sels s = mobj_sp_size(pages);
526a1b230cSJelle Sels if (!s)
536a1b230cSJelle Sels return NULL;
546a1b230cSJelle Sels
556a1b230cSJelle Sels m = calloc(1, s);
566a1b230cSJelle Sels if (!m)
576a1b230cSJelle Sels return NULL;
586a1b230cSJelle Sels
596a1b230cSJelle Sels m->mobj.ops = &mobj_sp_ops;
606a1b230cSJelle Sels m->mobj.size = pages * SMALL_PAGE_SIZE;
616a1b230cSJelle Sels m->mobj.phys_granule = SMALL_PAGE_SIZE;
626a1b230cSJelle Sels
63c0bb2059SBalint Dobszay m->mem_type = mem_type;
64036559a5SJelle Sels m->is_secure = is_secure;
656f3a5646SJelle Sels
666a1b230cSJelle Sels refcount_set(&m->mobj.refc, 1);
676a1b230cSJelle Sels return &m->mobj;
686a1b230cSJelle Sels }
696a1b230cSJelle Sels
get_page_count(struct mobj_sp * ms)706a1b230cSJelle Sels static size_t get_page_count(struct mobj_sp *ms)
716a1b230cSJelle Sels {
72*04e46975SEtienne Carriere return ROUNDUP_DIV(ms->mobj.size, SMALL_PAGE_SIZE);
736a1b230cSJelle Sels }
746a1b230cSJelle Sels
756a1b230cSJelle Sels /* Add some physical pages to the mobj object. */
sp_mem_add_pages(struct mobj * mobj,unsigned int * idx,paddr_t pa,unsigned int num_pages)766a1b230cSJelle Sels int sp_mem_add_pages(struct mobj *mobj, unsigned int *idx,
776a1b230cSJelle Sels paddr_t pa, unsigned int num_pages)
786a1b230cSJelle Sels {
796a1b230cSJelle Sels struct mobj_sp *ms = to_mobj_sp(mobj);
806a1b230cSJelle Sels unsigned int n = 0;
816a1b230cSJelle Sels size_t tot_page_count = get_page_count(ms);
826a1b230cSJelle Sels
836a1b230cSJelle Sels if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
846a1b230cSJelle Sels return TEE_ERROR_BAD_PARAMETERS;
856a1b230cSJelle Sels
86036559a5SJelle Sels /* Don't check for device memory */
87036559a5SJelle Sels if (ms->mem_type == TEE_MATTR_MEM_TYPE_CACHED) {
88036559a5SJelle Sels if (ms->is_secure) {
89036559a5SJelle Sels if (!tee_pbuf_is_sec(pa, num_pages * SMALL_PAGE_SIZE))
906a1b230cSJelle Sels return TEE_ERROR_BAD_PARAMETERS;
91036559a5SJelle Sels } else {
92036559a5SJelle Sels if (!tee_pbuf_is_non_sec(pa,
93036559a5SJelle Sels num_pages * SMALL_PAGE_SIZE))
94036559a5SJelle Sels return TEE_ERROR_BAD_PARAMETERS;
95036559a5SJelle Sels }
96036559a5SJelle Sels }
976a1b230cSJelle Sels
986a1b230cSJelle Sels for (n = 0; n < num_pages; n++)
996a1b230cSJelle Sels ms->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
1006a1b230cSJelle Sels
1016a1b230cSJelle Sels *idx += n;
1026a1b230cSJelle Sels return TEE_SUCCESS;
1036a1b230cSJelle Sels }
1046a1b230cSJelle Sels
get_mem_type(struct mobj * mobj,uint32_t * mt)105c0bb2059SBalint Dobszay static TEE_Result get_mem_type(struct mobj *mobj, uint32_t *mt)
1066a1b230cSJelle Sels {
1076f3a5646SJelle Sels struct mobj_sp *m = to_mobj_sp(mobj);
1086f3a5646SJelle Sels
109c0bb2059SBalint Dobszay *mt = m->mem_type;
1106a1b230cSJelle Sels
1116a1b230cSJelle Sels return TEE_SUCCESS;
1126a1b230cSJelle Sels }
1136a1b230cSJelle Sels
mobj_sp_matches(struct mobj * mobj,enum buf_is_attr attr)114036559a5SJelle Sels static bool mobj_sp_matches(struct mobj *mobj, enum buf_is_attr attr)
1156a1b230cSJelle Sels {
116036559a5SJelle Sels struct mobj_sp *m = to_mobj_sp(mobj);
1176a1b230cSJelle Sels
118036559a5SJelle Sels if (m->is_secure)
119036559a5SJelle Sels return attr == CORE_MEM_SEC;
120036559a5SJelle Sels else
1216a1b230cSJelle Sels return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
1226a1b230cSJelle Sels }
1236a1b230cSJelle Sels
get_pa(struct mobj * mobj,size_t offset,size_t granule,paddr_t * pa)1246a1b230cSJelle Sels static TEE_Result get_pa(struct mobj *mobj, size_t offset,
1256a1b230cSJelle Sels size_t granule, paddr_t *pa)
1266a1b230cSJelle Sels {
1276a1b230cSJelle Sels struct mobj_sp *ms = to_mobj_sp(mobj);
1286a1b230cSJelle Sels paddr_t p = 0;
1296a1b230cSJelle Sels
1306a1b230cSJelle Sels if (!pa)
1316a1b230cSJelle Sels return TEE_ERROR_GENERIC;
1326a1b230cSJelle Sels
1336a1b230cSJelle Sels if (offset >= mobj->size)
1346a1b230cSJelle Sels return TEE_ERROR_GENERIC;
1356a1b230cSJelle Sels
1366a1b230cSJelle Sels switch (granule) {
1376a1b230cSJelle Sels case 0:
1386a1b230cSJelle Sels p = ms->pages[offset / SMALL_PAGE_SIZE] +
1396a1b230cSJelle Sels (offset & SMALL_PAGE_MASK);
1406a1b230cSJelle Sels break;
1416a1b230cSJelle Sels case SMALL_PAGE_SIZE:
1426a1b230cSJelle Sels p = ms->pages[offset / SMALL_PAGE_SIZE];
1436a1b230cSJelle Sels break;
1446a1b230cSJelle Sels default:
1456a1b230cSJelle Sels return TEE_ERROR_GENERIC;
1466a1b230cSJelle Sels }
1476a1b230cSJelle Sels *pa = p;
1486a1b230cSJelle Sels
1496a1b230cSJelle Sels return TEE_SUCCESS;
1506a1b230cSJelle Sels }
1516a1b230cSJelle Sels DECLARE_KEEP_PAGER(get_pa);
1526a1b230cSJelle Sels
get_phys_offs(struct mobj * mobj __maybe_unused,size_t granule __maybe_unused)1536a1b230cSJelle Sels static size_t get_phys_offs(struct mobj *mobj __maybe_unused,
1546a1b230cSJelle Sels size_t granule __maybe_unused)
1556a1b230cSJelle Sels {
1566a1b230cSJelle Sels return 0;
1576a1b230cSJelle Sels }
1586a1b230cSJelle Sels
inactivate(struct mobj * mobj)1596a1b230cSJelle Sels static void inactivate(struct mobj *mobj)
1606a1b230cSJelle Sels {
1616a1b230cSJelle Sels struct mobj_sp *ms = to_mobj_sp(mobj);
1626a1b230cSJelle Sels uint32_t exceptions = 0;
1636a1b230cSJelle Sels
1646a1b230cSJelle Sels exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
1656a1b230cSJelle Sels /*
1666a1b230cSJelle Sels * If refcount isn't 0 some other thread has found this mobj in
1676a1b230cSJelle Sels * shm_head after the mobj_put() that put us here and before we got
1686a1b230cSJelle Sels * the lock.
1696a1b230cSJelle Sels */
1706a1b230cSJelle Sels if (!refcount_val(&mobj->refc))
1716a1b230cSJelle Sels free(ms);
1726a1b230cSJelle Sels
1736a1b230cSJelle Sels cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
1746a1b230cSJelle Sels }
1756a1b230cSJelle Sels
176593b94eeSJens Wiklander static const struct mobj_ops mobj_sp_ops = {
1776a1b230cSJelle Sels .get_pa = get_pa,
1786a1b230cSJelle Sels .get_phys_offs = get_phys_offs,
179c0bb2059SBalint Dobszay .get_mem_type = get_mem_type,
1806a1b230cSJelle Sels .matches = mobj_sp_matches,
1816a1b230cSJelle Sels .free = inactivate,
1826a1b230cSJelle Sels };
1835c1143a8SJelle Sels
sp_mem_get_receiver(uint32_t s_id,struct sp_mem * smem)184de66193dSJelle Sels struct sp_mem_receiver *sp_mem_get_receiver(uint32_t s_id, struct sp_mem *smem)
185de66193dSJelle Sels {
186de66193dSJelle Sels struct sp_mem_receiver *r = NULL;
187de66193dSJelle Sels
188de66193dSJelle Sels SLIST_FOREACH(r, &smem->receivers, link) {
189de66193dSJelle Sels if (r->perm.endpoint_id == s_id)
190de66193dSJelle Sels return r;
191de66193dSJelle Sels }
192de66193dSJelle Sels return NULL;
193de66193dSJelle Sels }
194de66193dSJelle Sels
sp_mem_get(uint64_t handle)195de66193dSJelle Sels struct sp_mem *sp_mem_get(uint64_t handle)
196de66193dSJelle Sels {
197de66193dSJelle Sels struct sp_mem *smem = NULL;
198de66193dSJelle Sels uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
199de66193dSJelle Sels
200de66193dSJelle Sels SLIST_FOREACH(smem, &mem_shares, link) {
201de66193dSJelle Sels if (smem->global_handle == handle)
202de66193dSJelle Sels break;
203de66193dSJelle Sels }
204de66193dSJelle Sels
205de66193dSJelle Sels cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
206de66193dSJelle Sels return smem;
207de66193dSJelle Sels }
208de66193dSJelle Sels
sp_mem_get_va(const struct user_mode_ctx * uctx,size_t offset,struct mobj * mobj)209de66193dSJelle Sels void *sp_mem_get_va(const struct user_mode_ctx *uctx, size_t offset,
210de66193dSJelle Sels struct mobj *mobj)
211de66193dSJelle Sels {
212de66193dSJelle Sels struct vm_region *region = NULL;
213de66193dSJelle Sels
214de66193dSJelle Sels TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
215de66193dSJelle Sels if (region->mobj == mobj && region->offset == offset)
216de66193dSJelle Sels return (void *)region->va;
217de66193dSJelle Sels }
218de66193dSJelle Sels return NULL;
219de66193dSJelle Sels }
220de66193dSJelle Sels
sp_mem_new(void)2215c1143a8SJelle Sels struct sp_mem *sp_mem_new(void)
2225c1143a8SJelle Sels {
2235c1143a8SJelle Sels struct sp_mem *smem = NULL;
2245c1143a8SJelle Sels uint32_t exceptions = 0;
2255c1143a8SJelle Sels int i = 0;
2265c1143a8SJelle Sels
227b70970feSEtienne Carriere smem = calloc(1, sizeof(*smem));
2285c1143a8SJelle Sels if (!smem)
2295c1143a8SJelle Sels return NULL;
2305c1143a8SJelle Sels
2315c1143a8SJelle Sels exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
2325c1143a8SJelle Sels
2335c1143a8SJelle Sels bit_ffc(share_bits, NUM_SHARES, &i);
2345c1143a8SJelle Sels if (i == -1) {
2355c1143a8SJelle Sels cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
2365c1143a8SJelle Sels free(smem);
2375c1143a8SJelle Sels return NULL;
2385c1143a8SJelle Sels }
2395c1143a8SJelle Sels
2405c1143a8SJelle Sels bit_set(share_bits, i);
2415c1143a8SJelle Sels /*
2425c1143a8SJelle Sels * OP-TEE SHAREs use bit 44 use bit 45 instead.
2435c1143a8SJelle Sels */
2445c1143a8SJelle Sels smem->global_handle = i | FFA_MEMORY_HANDLE_SECURE_BIT;
2455c1143a8SJelle Sels SLIST_INIT(&smem->regions);
2465c1143a8SJelle Sels SLIST_INIT(&smem->receivers);
2475c1143a8SJelle Sels
2485c1143a8SJelle Sels cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
2495c1143a8SJelle Sels
2505c1143a8SJelle Sels return smem;
2515c1143a8SJelle Sels }
2525c1143a8SJelle Sels
sp_mem_add(struct sp_mem * smem)2535c1143a8SJelle Sels void sp_mem_add(struct sp_mem *smem)
2545c1143a8SJelle Sels {
2555c1143a8SJelle Sels uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
2565c1143a8SJelle Sels
2575c1143a8SJelle Sels SLIST_INSERT_HEAD(&mem_shares, smem, link);
2585c1143a8SJelle Sels
2595c1143a8SJelle Sels cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
2605c1143a8SJelle Sels }
2615c1143a8SJelle Sels
sp_mem_is_shared(struct sp_mem_map_region * new_reg)26272ede99eSJelle Sels bool sp_mem_is_shared(struct sp_mem_map_region *new_reg)
26372ede99eSJelle Sels {
26472ede99eSJelle Sels struct sp_mem *smem = NULL;
26572ede99eSJelle Sels uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
26672ede99eSJelle Sels uint64_t new_reg_end = new_reg->page_offset +
26772ede99eSJelle Sels (new_reg->page_count * SMALL_PAGE_SIZE);
26872ede99eSJelle Sels
26972ede99eSJelle Sels SLIST_FOREACH(smem, &mem_shares, link) {
27072ede99eSJelle Sels struct sp_mem_map_region *reg = NULL;
27172ede99eSJelle Sels
27272ede99eSJelle Sels SLIST_FOREACH(reg, &smem->regions, link) {
27372ede99eSJelle Sels if (new_reg->mobj == reg->mobj) {
27472ede99eSJelle Sels uint64_t reg_end = 0;
27572ede99eSJelle Sels
27672ede99eSJelle Sels reg_end = reg->page_offset +
27772ede99eSJelle Sels (reg->page_count * SMALL_PAGE_SIZE);
27872ede99eSJelle Sels
27972ede99eSJelle Sels if (new_reg->page_offset < reg_end &&
28072ede99eSJelle Sels new_reg_end > reg->page_offset) {
28172ede99eSJelle Sels cpu_spin_unlock_xrestore(&sp_mem_lock,
28272ede99eSJelle Sels exceptions);
28372ede99eSJelle Sels return true;
28472ede99eSJelle Sels }
28572ede99eSJelle Sels }
28672ede99eSJelle Sels }
28772ede99eSJelle Sels }
28872ede99eSJelle Sels
28972ede99eSJelle Sels cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
29072ede99eSJelle Sels return false;
29172ede99eSJelle Sels }
29272ede99eSJelle Sels
sp_mem_remove(struct sp_mem * smem)2935c1143a8SJelle Sels void sp_mem_remove(struct sp_mem *smem)
2945c1143a8SJelle Sels {
2955c1143a8SJelle Sels uint32_t exceptions = 0;
2965c1143a8SJelle Sels int i = 0;
2975c1143a8SJelle Sels struct sp_mem *tsmem = NULL;
2985c1143a8SJelle Sels
2995c1143a8SJelle Sels if (!smem)
3005c1143a8SJelle Sels return;
3015c1143a8SJelle Sels
3025c1143a8SJelle Sels /* Remove all receivers */
3035c1143a8SJelle Sels while (!SLIST_EMPTY(&smem->receivers)) {
3045c1143a8SJelle Sels struct sp_mem_receiver *receiver = NULL;
3055c1143a8SJelle Sels
3065c1143a8SJelle Sels receiver = SLIST_FIRST(&smem->receivers);
3075c1143a8SJelle Sels SLIST_REMOVE_HEAD(&smem->receivers, link);
3085c1143a8SJelle Sels free(receiver);
3095c1143a8SJelle Sels }
3105c1143a8SJelle Sels /* Remove all regions */
3115c1143a8SJelle Sels while (!SLIST_EMPTY(&smem->regions)) {
3125c1143a8SJelle Sels struct sp_mem_map_region *region = SLIST_FIRST(&smem->regions);
3135c1143a8SJelle Sels
3145c1143a8SJelle Sels mobj_put(region->mobj);
3155c1143a8SJelle Sels
3165c1143a8SJelle Sels SLIST_REMOVE_HEAD(&smem->regions, link);
3175c1143a8SJelle Sels free(region);
3185c1143a8SJelle Sels }
3195c1143a8SJelle Sels
3205c1143a8SJelle Sels exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
3215c1143a8SJelle Sels
3225c1143a8SJelle Sels i = smem->global_handle & ~FFA_MEMORY_HANDLE_SECURE_BIT;
3235c1143a8SJelle Sels assert(i < NUM_SHARES);
3245c1143a8SJelle Sels
3255c1143a8SJelle Sels bit_clear(share_bits, i);
3265c1143a8SJelle Sels
3275c1143a8SJelle Sels SLIST_FOREACH(tsmem, &mem_shares, link) {
3285c1143a8SJelle Sels if (tsmem == smem) {
3295c1143a8SJelle Sels SLIST_REMOVE(&mem_shares, smem, sp_mem, link);
3305c1143a8SJelle Sels break;
3315c1143a8SJelle Sels }
3325c1143a8SJelle Sels }
3335c1143a8SJelle Sels
3345c1143a8SJelle Sels cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
3355c1143a8SJelle Sels
3365c1143a8SJelle Sels free(smem);
3375c1143a8SJelle Sels }
338