xref: /optee_os/core/mm/mobj_dyn_shm.c (revision 003383344c26be3589383acc87c1ebb2860e9317)
1e92be0c6SAlvin Chang // SPDX-License-Identifier: BSD-2-Clause
2e92be0c6SAlvin Chang /*
3*00338334SJens Wiklander  * Copyright (c) 2016-2024, Linaro Limited
4e92be0c6SAlvin Chang  */
5e92be0c6SAlvin Chang 
6e92be0c6SAlvin Chang #include <assert.h>
7e92be0c6SAlvin Chang #include <initcall.h>
8e92be0c6SAlvin Chang #include <keep.h>
9e92be0c6SAlvin Chang #include <kernel/linker.h>
10e92be0c6SAlvin Chang #include <kernel/mutex.h>
11e92be0c6SAlvin Chang #include <kernel/panic.h>
12e92be0c6SAlvin Chang #include <kernel/refcount.h>
13e92be0c6SAlvin Chang #include <kernel/spinlock.h>
14*00338334SJens Wiklander #include <kernel/tee_misc.h>
15e92be0c6SAlvin Chang #include <mm/core_mmu.h>
16e92be0c6SAlvin Chang #include <mm/mobj.h>
17e92be0c6SAlvin Chang #include <mm/tee_pager.h>
18e92be0c6SAlvin Chang #include <optee_msg.h>
19e92be0c6SAlvin Chang #include <stdlib.h>
20e92be0c6SAlvin Chang #include <tee_api_types.h>
21e92be0c6SAlvin Chang #include <types_ext.h>
22e92be0c6SAlvin Chang #include <util.h>
23e92be0c6SAlvin Chang 
24e92be0c6SAlvin Chang static struct mutex shm_mu = MUTEX_INITIALIZER;
25e92be0c6SAlvin Chang static struct condvar shm_cv = CONDVAR_INITIALIZER;
26e92be0c6SAlvin Chang static size_t shm_release_waiters;
27e92be0c6SAlvin Chang 
28e92be0c6SAlvin Chang /*
29e92be0c6SAlvin Chang  * mobj_reg_shm implementation. Describes shared memory provided by normal world
30e92be0c6SAlvin Chang  */
31e92be0c6SAlvin Chang 
32e92be0c6SAlvin Chang struct mobj_reg_shm {
33e92be0c6SAlvin Chang 	struct mobj mobj;
34e92be0c6SAlvin Chang 	SLIST_ENTRY(mobj_reg_shm) next;
35e92be0c6SAlvin Chang 	uint64_t cookie;
36e92be0c6SAlvin Chang 	tee_mm_entry_t *mm;
37e92be0c6SAlvin Chang 	paddr_t page_offset;
38e92be0c6SAlvin Chang 	struct refcount mapcount;
39e92be0c6SAlvin Chang 	bool guarded;
40e92be0c6SAlvin Chang 	bool releasing;
41e92be0c6SAlvin Chang 	bool release_frees;
42e92be0c6SAlvin Chang 	paddr_t pages[];
43e92be0c6SAlvin Chang };
44e92be0c6SAlvin Chang 
45*00338334SJens Wiklander /*
46*00338334SJens Wiklander  * struct mobj_protmem - describes protected memory lent by normal world
47*00338334SJens Wiklander  */
48*00338334SJens Wiklander struct mobj_protmem {
49*00338334SJens Wiklander 	struct mobj mobj;
50*00338334SJens Wiklander 	SLIST_ENTRY(mobj_protmem) next;
51*00338334SJens Wiklander 	uint64_t cookie;
52*00338334SJens Wiklander 	paddr_t pa;
53*00338334SJens Wiklander 	enum mobj_use_case use_case;
54*00338334SJens Wiklander 	bool releasing;
55*00338334SJens Wiklander 	bool release_frees;
56*00338334SJens Wiklander };
57*00338334SJens Wiklander 
mobj_reg_shm_size(size_t nr_pages)58e92be0c6SAlvin Chang static size_t mobj_reg_shm_size(size_t nr_pages)
59e92be0c6SAlvin Chang {
60e92be0c6SAlvin Chang 	size_t s = 0;
61e92be0c6SAlvin Chang 
62e92be0c6SAlvin Chang 	if (MUL_OVERFLOW(sizeof(paddr_t), nr_pages, &s))
63e92be0c6SAlvin Chang 		return 0;
64e92be0c6SAlvin Chang 	if (ADD_OVERFLOW(sizeof(struct mobj_reg_shm), s, &s))
65e92be0c6SAlvin Chang 		return 0;
66e92be0c6SAlvin Chang 	return s;
67e92be0c6SAlvin Chang }
68e92be0c6SAlvin Chang 
69e92be0c6SAlvin Chang static SLIST_HEAD(reg_shm_head, mobj_reg_shm) reg_shm_list =
70e92be0c6SAlvin Chang 	SLIST_HEAD_INITIALIZER(reg_shm_head);
71e92be0c6SAlvin Chang 
72e92be0c6SAlvin Chang static unsigned int reg_shm_slist_lock = SPINLOCK_UNLOCK;
73e92be0c6SAlvin Chang static unsigned int reg_shm_map_lock = SPINLOCK_UNLOCK;
74e92be0c6SAlvin Chang 
75*00338334SJens Wiklander /* Access is serialized with reg_shm_slist_lock */
76*00338334SJens Wiklander static SLIST_HEAD(protmem_head, mobj_protmem) protmem_list =
77*00338334SJens Wiklander 	SLIST_HEAD_INITIALIZER(protmem_head);
78*00338334SJens Wiklander 
79e92be0c6SAlvin Chang static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj);
80e92be0c6SAlvin Chang 
mobj_reg_shm_get_pa(struct mobj * mobj,size_t offst,size_t granule,paddr_t * pa)81e92be0c6SAlvin Chang static TEE_Result mobj_reg_shm_get_pa(struct mobj *mobj, size_t offst,
82e92be0c6SAlvin Chang 				      size_t granule, paddr_t *pa)
83e92be0c6SAlvin Chang {
84e92be0c6SAlvin Chang 	struct mobj_reg_shm *mobj_reg_shm = to_mobj_reg_shm(mobj);
85e92be0c6SAlvin Chang 	size_t full_offset = 0;
86e92be0c6SAlvin Chang 	paddr_t p = 0;
87e92be0c6SAlvin Chang 
88e92be0c6SAlvin Chang 	if (!pa)
89e92be0c6SAlvin Chang 		return TEE_ERROR_GENERIC;
90e92be0c6SAlvin Chang 
91e92be0c6SAlvin Chang 	if (offst >= mobj->size)
92e92be0c6SAlvin Chang 		return TEE_ERROR_GENERIC;
93e92be0c6SAlvin Chang 
94e92be0c6SAlvin Chang 	full_offset = offst + mobj_reg_shm->page_offset;
95e92be0c6SAlvin Chang 	switch (granule) {
96e92be0c6SAlvin Chang 	case 0:
97e92be0c6SAlvin Chang 		p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE] +
98e92be0c6SAlvin Chang 			(full_offset & SMALL_PAGE_MASK);
99e92be0c6SAlvin Chang 		break;
100e92be0c6SAlvin Chang 	case SMALL_PAGE_SIZE:
101e92be0c6SAlvin Chang 		p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE];
102e92be0c6SAlvin Chang 		break;
103e92be0c6SAlvin Chang 	default:
104e92be0c6SAlvin Chang 		return TEE_ERROR_GENERIC;
105e92be0c6SAlvin Chang 	}
106e92be0c6SAlvin Chang 	*pa = p;
107e92be0c6SAlvin Chang 
108e92be0c6SAlvin Chang 	return TEE_SUCCESS;
109e92be0c6SAlvin Chang }
110e92be0c6SAlvin Chang DECLARE_KEEP_PAGER(mobj_reg_shm_get_pa);
111e92be0c6SAlvin Chang 
mobj_reg_shm_get_phys_offs(struct mobj * mobj,size_t granule __maybe_unused)112e92be0c6SAlvin Chang static size_t mobj_reg_shm_get_phys_offs(struct mobj *mobj,
113e92be0c6SAlvin Chang 					 size_t granule __maybe_unused)
114e92be0c6SAlvin Chang {
115e92be0c6SAlvin Chang 	assert(granule >= mobj->phys_granule);
116e92be0c6SAlvin Chang 	return to_mobj_reg_shm(mobj)->page_offset;
117e92be0c6SAlvin Chang }
118e92be0c6SAlvin Chang 
mobj_reg_shm_get_va(struct mobj * mobj,size_t offst,size_t len)119e92be0c6SAlvin Chang static void *mobj_reg_shm_get_va(struct mobj *mobj, size_t offst, size_t len)
120e92be0c6SAlvin Chang {
121e92be0c6SAlvin Chang 	struct mobj_reg_shm *mrs = to_mobj_reg_shm(mobj);
122e92be0c6SAlvin Chang 
123e92be0c6SAlvin Chang 	if (!mrs->mm || !mobj_check_offset_and_len(mobj, offst, len))
124e92be0c6SAlvin Chang 		return NULL;
125e92be0c6SAlvin Chang 
126e92be0c6SAlvin Chang 	return (void *)(vaddr_t)(tee_mm_get_smem(mrs->mm) + offst +
127e92be0c6SAlvin Chang 				 mrs->page_offset);
128e92be0c6SAlvin Chang }
129e92be0c6SAlvin Chang 
reg_shm_unmap_helper(struct mobj_reg_shm * r)130e92be0c6SAlvin Chang static void reg_shm_unmap_helper(struct mobj_reg_shm *r)
131e92be0c6SAlvin Chang {
132e92be0c6SAlvin Chang 	assert(r->mm);
133e92be0c6SAlvin Chang 	assert(r->mm->pool->shift == SMALL_PAGE_SHIFT);
134e92be0c6SAlvin Chang 	core_mmu_unmap_pages(tee_mm_get_smem(r->mm), r->mm->size);
135e92be0c6SAlvin Chang 	tee_mm_free(r->mm);
136e92be0c6SAlvin Chang 	r->mm = NULL;
137e92be0c6SAlvin Chang }
138e92be0c6SAlvin Chang 
reg_shm_free_helper(struct mobj_reg_shm * mobj_reg_shm)139e92be0c6SAlvin Chang static void reg_shm_free_helper(struct mobj_reg_shm *mobj_reg_shm)
140e92be0c6SAlvin Chang {
141e92be0c6SAlvin Chang 	uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
142e92be0c6SAlvin Chang 
143e92be0c6SAlvin Chang 	if (mobj_reg_shm->mm)
144e92be0c6SAlvin Chang 		reg_shm_unmap_helper(mobj_reg_shm);
145e92be0c6SAlvin Chang 
146e92be0c6SAlvin Chang 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
147e92be0c6SAlvin Chang 
148e92be0c6SAlvin Chang 	SLIST_REMOVE(&reg_shm_list, mobj_reg_shm, mobj_reg_shm, next);
149e92be0c6SAlvin Chang 	free(mobj_reg_shm);
150e92be0c6SAlvin Chang }
151e92be0c6SAlvin Chang 
mobj_reg_shm_free(struct mobj * mobj)152e92be0c6SAlvin Chang static void mobj_reg_shm_free(struct mobj *mobj)
153e92be0c6SAlvin Chang {
154e92be0c6SAlvin Chang 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
155e92be0c6SAlvin Chang 	uint32_t exceptions = 0;
156e92be0c6SAlvin Chang 
157e92be0c6SAlvin Chang 	if (r->guarded && !r->releasing) {
158e92be0c6SAlvin Chang 		/*
159e92be0c6SAlvin Chang 		 * Guarded registersted shared memory can't be released
160e92be0c6SAlvin Chang 		 * by cookie, only by mobj_put(). However, unguarded
161e92be0c6SAlvin Chang 		 * registered shared memory can also be freed by mobj_put()
162e92be0c6SAlvin Chang 		 * unless mobj_reg_shm_release_by_cookie() is waiting for
163e92be0c6SAlvin Chang 		 * the mobj to be released.
164e92be0c6SAlvin Chang 		 */
165e92be0c6SAlvin Chang 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
166e92be0c6SAlvin Chang 		reg_shm_free_helper(r);
167e92be0c6SAlvin Chang 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
168e92be0c6SAlvin Chang 	} else {
169e92be0c6SAlvin Chang 		/*
170e92be0c6SAlvin Chang 		 * We've reached the point where an unguarded reg shm can
171e92be0c6SAlvin Chang 		 * be released by cookie. Notify eventual waiters.
172e92be0c6SAlvin Chang 		 */
173e92be0c6SAlvin Chang 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
174e92be0c6SAlvin Chang 		r->release_frees = true;
175e92be0c6SAlvin Chang 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
176e92be0c6SAlvin Chang 
177e92be0c6SAlvin Chang 		mutex_lock(&shm_mu);
178e92be0c6SAlvin Chang 		if (shm_release_waiters)
179e92be0c6SAlvin Chang 			condvar_broadcast(&shm_cv);
180e92be0c6SAlvin Chang 		mutex_unlock(&shm_mu);
181e92be0c6SAlvin Chang 	}
182e92be0c6SAlvin Chang }
183e92be0c6SAlvin Chang 
mobj_reg_shm_get_mem_type(struct mobj * mobj __unused,uint32_t * mt)184e92be0c6SAlvin Chang static TEE_Result mobj_reg_shm_get_mem_type(struct mobj *mobj __unused,
185e92be0c6SAlvin Chang 					    uint32_t *mt)
186e92be0c6SAlvin Chang {
187e92be0c6SAlvin Chang 	if (!mt)
188e92be0c6SAlvin Chang 		return TEE_ERROR_GENERIC;
189e92be0c6SAlvin Chang 
190e92be0c6SAlvin Chang 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
191e92be0c6SAlvin Chang 
192e92be0c6SAlvin Chang 	return TEE_SUCCESS;
193e92be0c6SAlvin Chang }
194e92be0c6SAlvin Chang 
mobj_reg_shm_inc_map(struct mobj * mobj)195e92be0c6SAlvin Chang static TEE_Result mobj_reg_shm_inc_map(struct mobj *mobj)
196e92be0c6SAlvin Chang {
197e92be0c6SAlvin Chang 	TEE_Result res = TEE_SUCCESS;
198e92be0c6SAlvin Chang 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
199e92be0c6SAlvin Chang 	uint32_t exceptions = 0;
200e92be0c6SAlvin Chang 	size_t sz = 0;
201e92be0c6SAlvin Chang 
202e92be0c6SAlvin Chang 	while (true) {
203e92be0c6SAlvin Chang 		if (refcount_inc(&r->mapcount))
204e92be0c6SAlvin Chang 			return TEE_SUCCESS;
205e92be0c6SAlvin Chang 
206e92be0c6SAlvin Chang 		exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
207e92be0c6SAlvin Chang 
208e92be0c6SAlvin Chang 		if (!refcount_val(&r->mapcount))
209e92be0c6SAlvin Chang 			break; /* continue to reinitialize */
210e92be0c6SAlvin Chang 		/*
211e92be0c6SAlvin Chang 		 * If another thread beat us to initialize mapcount,
212e92be0c6SAlvin Chang 		 * restart to make sure we still increase it.
213e92be0c6SAlvin Chang 		 */
214e92be0c6SAlvin Chang 		cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
215e92be0c6SAlvin Chang 	}
216e92be0c6SAlvin Chang 
217e92be0c6SAlvin Chang 	/*
218e92be0c6SAlvin Chang 	 * If we have beaten another thread calling mobj_reg_shm_dec_map()
219e92be0c6SAlvin Chang 	 * to get the lock we need only to reinitialize mapcount to 1.
220e92be0c6SAlvin Chang 	 */
221e92be0c6SAlvin Chang 	if (!r->mm) {
222e92be0c6SAlvin Chang 		sz = ROUNDUP(mobj->size + r->page_offset, SMALL_PAGE_SIZE);
223fdf696b7SJens Wiklander 		r->mm = tee_mm_alloc(&core_virt_shm_pool, sz);
224e92be0c6SAlvin Chang 		if (!r->mm) {
225e92be0c6SAlvin Chang 			res = TEE_ERROR_OUT_OF_MEMORY;
226e92be0c6SAlvin Chang 			goto out;
227e92be0c6SAlvin Chang 		}
228e92be0c6SAlvin Chang 
229e92be0c6SAlvin Chang 		res = core_mmu_map_pages(tee_mm_get_smem(r->mm), r->pages,
230e92be0c6SAlvin Chang 					 sz / SMALL_PAGE_SIZE,
231e92be0c6SAlvin Chang 					 MEM_AREA_NSEC_SHM);
232e92be0c6SAlvin Chang 		if (res) {
233e92be0c6SAlvin Chang 			tee_mm_free(r->mm);
234e92be0c6SAlvin Chang 			r->mm = NULL;
235e92be0c6SAlvin Chang 			goto out;
236e92be0c6SAlvin Chang 		}
237e92be0c6SAlvin Chang 	}
238e92be0c6SAlvin Chang 
239e92be0c6SAlvin Chang 	refcount_set(&r->mapcount, 1);
240e92be0c6SAlvin Chang out:
241e92be0c6SAlvin Chang 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
242e92be0c6SAlvin Chang 
243e92be0c6SAlvin Chang 	return res;
244e92be0c6SAlvin Chang }
245e92be0c6SAlvin Chang 
mobj_reg_shm_dec_map(struct mobj * mobj)246e92be0c6SAlvin Chang static TEE_Result mobj_reg_shm_dec_map(struct mobj *mobj)
247e92be0c6SAlvin Chang {
248e92be0c6SAlvin Chang 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
249e92be0c6SAlvin Chang 	uint32_t exceptions = 0;
250e92be0c6SAlvin Chang 
251e92be0c6SAlvin Chang 	if (!refcount_dec(&r->mapcount))
252e92be0c6SAlvin Chang 		return TEE_SUCCESS;
253e92be0c6SAlvin Chang 
254e92be0c6SAlvin Chang 	exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
255e92be0c6SAlvin Chang 
256e92be0c6SAlvin Chang 	/*
257e92be0c6SAlvin Chang 	 * Check that another thread hasn't been able to:
258e92be0c6SAlvin Chang 	 * - increase the mapcount
259e92be0c6SAlvin Chang 	 * - or, increase the mapcount, decrease it again, and set r->mm to
260e92be0c6SAlvin Chang 	 *   NULL
261e92be0c6SAlvin Chang 	 * before we acquired the spinlock
262e92be0c6SAlvin Chang 	 */
263e92be0c6SAlvin Chang 	if (!refcount_val(&r->mapcount) && r->mm)
264e92be0c6SAlvin Chang 		reg_shm_unmap_helper(r);
265e92be0c6SAlvin Chang 
266e92be0c6SAlvin Chang 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
267e92be0c6SAlvin Chang 
268e92be0c6SAlvin Chang 	return TEE_SUCCESS;
269e92be0c6SAlvin Chang }
270e92be0c6SAlvin Chang 
271e92be0c6SAlvin Chang static bool mobj_reg_shm_matches(struct mobj *mobj, enum buf_is_attr attr);
272e92be0c6SAlvin Chang 
mobj_reg_shm_get_cookie(struct mobj * mobj)273e92be0c6SAlvin Chang static uint64_t mobj_reg_shm_get_cookie(struct mobj *mobj)
274e92be0c6SAlvin Chang {
275e92be0c6SAlvin Chang 	return to_mobj_reg_shm(mobj)->cookie;
276e92be0c6SAlvin Chang }
277e92be0c6SAlvin Chang 
278e92be0c6SAlvin Chang /*
279e92be0c6SAlvin Chang  * When CFG_PREALLOC_RPC_CACHE is disabled, this variable is weak just
280e92be0c6SAlvin Chang  * to ease breaking its dependency chain when added to the unpaged area.
281e92be0c6SAlvin Chang  * When CFG_PREALLOC_RPC_CACHE is enabled, releasing RPC preallocated
282e92be0c6SAlvin Chang  * shm mandates these resources to be unpaged.
283e92be0c6SAlvin Chang  */
284e92be0c6SAlvin Chang const struct mobj_ops mobj_reg_shm_ops
285e92be0c6SAlvin Chang __weak __relrodata_unpaged("mobj_reg_shm_ops") = {
286e92be0c6SAlvin Chang 	.get_pa = mobj_reg_shm_get_pa,
287e92be0c6SAlvin Chang 	.get_phys_offs = mobj_reg_shm_get_phys_offs,
288e92be0c6SAlvin Chang 	.get_va = mobj_reg_shm_get_va,
289e92be0c6SAlvin Chang 	.get_mem_type = mobj_reg_shm_get_mem_type,
290e92be0c6SAlvin Chang 	.matches = mobj_reg_shm_matches,
291e92be0c6SAlvin Chang 	.free = mobj_reg_shm_free,
292e92be0c6SAlvin Chang 	.get_cookie = mobj_reg_shm_get_cookie,
293e92be0c6SAlvin Chang 	.inc_map = mobj_reg_shm_inc_map,
294e92be0c6SAlvin Chang 	.dec_map = mobj_reg_shm_dec_map,
295e92be0c6SAlvin Chang };
296e92be0c6SAlvin Chang 
297e92be0c6SAlvin Chang #ifdef CFG_PREALLOC_RPC_CACHE
298e92be0c6SAlvin Chang /* Releasing RPC preallocated shm mandates few resources to be unpaged */
299e92be0c6SAlvin Chang DECLARE_KEEP_PAGER(mobj_reg_shm_get_cookie);
300e92be0c6SAlvin Chang DECLARE_KEEP_PAGER(mobj_reg_shm_matches);
301e92be0c6SAlvin Chang DECLARE_KEEP_PAGER(mobj_reg_shm_free);
302e92be0c6SAlvin Chang #endif
303e92be0c6SAlvin Chang 
mobj_reg_shm_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)304e92be0c6SAlvin Chang static bool mobj_reg_shm_matches(struct mobj *mobj __maybe_unused,
305e92be0c6SAlvin Chang 				   enum buf_is_attr attr)
306e92be0c6SAlvin Chang {
307e92be0c6SAlvin Chang 	assert(mobj->ops == &mobj_reg_shm_ops);
308e92be0c6SAlvin Chang 
309e92be0c6SAlvin Chang 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
310e92be0c6SAlvin Chang }
311e92be0c6SAlvin Chang 
to_mobj_reg_shm(struct mobj * mobj)312e92be0c6SAlvin Chang static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj)
313e92be0c6SAlvin Chang {
314e92be0c6SAlvin Chang 	assert(mobj->ops == &mobj_reg_shm_ops);
315e92be0c6SAlvin Chang 	return container_of(mobj, struct mobj_reg_shm, mobj);
316e92be0c6SAlvin Chang }
317e92be0c6SAlvin Chang 
check_reg_shm_conflict(struct mobj_reg_shm * r,paddr_t pa,paddr_size_t size)318*00338334SJens Wiklander static TEE_Result check_reg_shm_conflict(struct mobj_reg_shm *r, paddr_t pa,
319*00338334SJens Wiklander 					 paddr_size_t size)
320*00338334SJens Wiklander {
321*00338334SJens Wiklander 	size_t n = 0;
322*00338334SJens Wiklander 
323*00338334SJens Wiklander 	for (n = 0; n < r->mobj.size / SMALL_PAGE_SIZE; n++)
324*00338334SJens Wiklander 		if (core_is_buffer_intersect(pa, size, r->pages[n],
325*00338334SJens Wiklander 					     SMALL_PAGE_SIZE))
326*00338334SJens Wiklander 			return TEE_ERROR_BAD_PARAMETERS;
327*00338334SJens Wiklander 
328*00338334SJens Wiklander 	return TEE_SUCCESS;
329*00338334SJens Wiklander }
330*00338334SJens Wiklander 
check_protmem_conflict(struct mobj_reg_shm * r)331*00338334SJens Wiklander static TEE_Result check_protmem_conflict(struct mobj_reg_shm *r)
332*00338334SJens Wiklander {
333*00338334SJens Wiklander 	struct mobj_protmem *m = NULL;
334*00338334SJens Wiklander 	TEE_Result res = TEE_SUCCESS;
335*00338334SJens Wiklander 
336*00338334SJens Wiklander 	SLIST_FOREACH(m, &protmem_list, next) {
337*00338334SJens Wiklander 		res = check_reg_shm_conflict(r, m->pa, m->mobj.size);
338*00338334SJens Wiklander 		if (res)
339*00338334SJens Wiklander 			break;
340*00338334SJens Wiklander 	}
341*00338334SJens Wiklander 
342*00338334SJens Wiklander 	return res;
343*00338334SJens Wiklander }
344*00338334SJens Wiklander 
mobj_reg_shm_alloc(paddr_t * pages,size_t num_pages,paddr_t page_offset,uint64_t cookie)345e92be0c6SAlvin Chang struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
346e92be0c6SAlvin Chang 				paddr_t page_offset, uint64_t cookie)
347e92be0c6SAlvin Chang {
348e92be0c6SAlvin Chang 	struct mobj_reg_shm *mobj_reg_shm = NULL;
349*00338334SJens Wiklander 	TEE_Result res = TEE_SUCCESS;
350e92be0c6SAlvin Chang 	size_t i = 0;
351e92be0c6SAlvin Chang 	uint32_t exceptions = 0;
352e92be0c6SAlvin Chang 	size_t s = 0;
353e92be0c6SAlvin Chang 
354e92be0c6SAlvin Chang 	if (!num_pages || page_offset >= SMALL_PAGE_SIZE)
355e92be0c6SAlvin Chang 		return NULL;
356e92be0c6SAlvin Chang 
357e92be0c6SAlvin Chang 	s = mobj_reg_shm_size(num_pages);
358e92be0c6SAlvin Chang 	if (!s)
359e92be0c6SAlvin Chang 		return NULL;
360e92be0c6SAlvin Chang 	mobj_reg_shm = calloc(1, s);
361e92be0c6SAlvin Chang 	if (!mobj_reg_shm)
362e92be0c6SAlvin Chang 		return NULL;
363e92be0c6SAlvin Chang 
364e92be0c6SAlvin Chang 	mobj_reg_shm->mobj.ops = &mobj_reg_shm_ops;
365e92be0c6SAlvin Chang 	mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE - page_offset;
366e92be0c6SAlvin Chang 	mobj_reg_shm->mobj.phys_granule = SMALL_PAGE_SIZE;
367e92be0c6SAlvin Chang 	refcount_set(&mobj_reg_shm->mobj.refc, 1);
368e92be0c6SAlvin Chang 	mobj_reg_shm->cookie = cookie;
369e92be0c6SAlvin Chang 	mobj_reg_shm->guarded = true;
370e92be0c6SAlvin Chang 	mobj_reg_shm->page_offset = page_offset;
371e92be0c6SAlvin Chang 	memcpy(mobj_reg_shm->pages, pages, sizeof(*pages) * num_pages);
372e92be0c6SAlvin Chang 
373e92be0c6SAlvin Chang 	/* Ensure loaded references match format and security constraints */
374e92be0c6SAlvin Chang 	for (i = 0; i < num_pages; i++) {
375e92be0c6SAlvin Chang 		if (mobj_reg_shm->pages[i] & SMALL_PAGE_MASK)
376e92be0c6SAlvin Chang 			goto err;
377e92be0c6SAlvin Chang 
378e92be0c6SAlvin Chang 		/* Only Non-secure memory can be mapped there */
379e92be0c6SAlvin Chang 		if (!core_pbuf_is(CORE_MEM_NON_SEC, mobj_reg_shm->pages[i],
380e92be0c6SAlvin Chang 				  SMALL_PAGE_SIZE))
381e92be0c6SAlvin Chang 			goto err;
382e92be0c6SAlvin Chang 	}
383e92be0c6SAlvin Chang 
384e92be0c6SAlvin Chang 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
385*00338334SJens Wiklander 	res = check_protmem_conflict(mobj_reg_shm);
386*00338334SJens Wiklander 	if (!res)
387e92be0c6SAlvin Chang 		SLIST_INSERT_HEAD(&reg_shm_list, mobj_reg_shm, next);
388e92be0c6SAlvin Chang 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
389e92be0c6SAlvin Chang 
390*00338334SJens Wiklander 	if (res)
391*00338334SJens Wiklander 		goto err;
392*00338334SJens Wiklander 
393e92be0c6SAlvin Chang 	return &mobj_reg_shm->mobj;
394e92be0c6SAlvin Chang err:
395e92be0c6SAlvin Chang 	free(mobj_reg_shm);
396e92be0c6SAlvin Chang 	return NULL;
397e92be0c6SAlvin Chang }
398e92be0c6SAlvin Chang 
mobj_reg_shm_unguard(struct mobj * mobj)399e92be0c6SAlvin Chang void mobj_reg_shm_unguard(struct mobj *mobj)
400e92be0c6SAlvin Chang {
401e92be0c6SAlvin Chang 	uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
402e92be0c6SAlvin Chang 
403e92be0c6SAlvin Chang 	to_mobj_reg_shm(mobj)->guarded = false;
404e92be0c6SAlvin Chang 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
405e92be0c6SAlvin Chang }
406e92be0c6SAlvin Chang 
reg_shm_find_unlocked(uint64_t cookie)407e92be0c6SAlvin Chang static struct mobj_reg_shm *reg_shm_find_unlocked(uint64_t cookie)
408e92be0c6SAlvin Chang {
409e92be0c6SAlvin Chang 	struct mobj_reg_shm *mobj_reg_shm = NULL;
410e92be0c6SAlvin Chang 
411e92be0c6SAlvin Chang 	SLIST_FOREACH(mobj_reg_shm, &reg_shm_list, next)
412e92be0c6SAlvin Chang 		if (mobj_reg_shm->cookie == cookie)
413e92be0c6SAlvin Chang 			return mobj_reg_shm;
414e92be0c6SAlvin Chang 
415e92be0c6SAlvin Chang 	return NULL;
416e92be0c6SAlvin Chang }
417e92be0c6SAlvin Chang 
protmem_find_unlocked(uint64_t cookie)418*00338334SJens Wiklander static struct mobj_protmem *protmem_find_unlocked(uint64_t cookie)
419*00338334SJens Wiklander {
420*00338334SJens Wiklander 	struct mobj_protmem *m = NULL;
421*00338334SJens Wiklander 
422*00338334SJens Wiklander 	SLIST_FOREACH(m, &protmem_list, next)
423*00338334SJens Wiklander 		if (m->cookie == cookie)
424*00338334SJens Wiklander 			return m;
425*00338334SJens Wiklander 
426*00338334SJens Wiklander 	return NULL;
427*00338334SJens Wiklander }
428*00338334SJens Wiklander 
mobj_reg_shm_get_by_cookie(uint64_t cookie)429e92be0c6SAlvin Chang struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie)
430e92be0c6SAlvin Chang {
431*00338334SJens Wiklander 	struct mobj_reg_shm *rs = NULL;
432*00338334SJens Wiklander 	struct mobj_protmem *rm = NULL;
4337c04952cSJens Wiklander 	uint32_t exceptions = 0;
4347c04952cSJens Wiklander 	struct mobj *m = NULL;
435e92be0c6SAlvin Chang 
4367c04952cSJens Wiklander 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
437*00338334SJens Wiklander 	rs = reg_shm_find_unlocked(cookie);
438*00338334SJens Wiklander 	if (rs) {
439*00338334SJens Wiklander 		m = mobj_get(&rs->mobj);
440*00338334SJens Wiklander 		goto out;
441*00338334SJens Wiklander 	}
442*00338334SJens Wiklander 	rm = protmem_find_unlocked(cookie);
443*00338334SJens Wiklander 	if (rm)
444*00338334SJens Wiklander 		m = mobj_get(&rm->mobj);
445*00338334SJens Wiklander 
446*00338334SJens Wiklander out:
447e92be0c6SAlvin Chang 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
448e92be0c6SAlvin Chang 
4497c04952cSJens Wiklander 	return m;
450e92be0c6SAlvin Chang }
451e92be0c6SAlvin Chang 
mobj_reg_shm_release_by_cookie(uint64_t cookie)452e92be0c6SAlvin Chang TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie)
453e92be0c6SAlvin Chang {
454e92be0c6SAlvin Chang 	uint32_t exceptions = 0;
455e92be0c6SAlvin Chang 	struct mobj_reg_shm *r = NULL;
456e92be0c6SAlvin Chang 
457e92be0c6SAlvin Chang 	/*
458e92be0c6SAlvin Chang 	 * Try to find r and see can be released by this function, if so
459e92be0c6SAlvin Chang 	 * call mobj_put(). Otherwise this function is called either by
460e92be0c6SAlvin Chang 	 * wrong cookie and perhaps a second time, regardless return
461e92be0c6SAlvin Chang 	 * TEE_ERROR_BAD_PARAMETERS.
462e92be0c6SAlvin Chang 	 */
463e92be0c6SAlvin Chang 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
464e92be0c6SAlvin Chang 	r = reg_shm_find_unlocked(cookie);
465e92be0c6SAlvin Chang 	if (!r || r->guarded || r->releasing)
466e92be0c6SAlvin Chang 		r = NULL;
467e92be0c6SAlvin Chang 	else
468e92be0c6SAlvin Chang 		r->releasing = true;
469e92be0c6SAlvin Chang 
470e92be0c6SAlvin Chang 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
471e92be0c6SAlvin Chang 
472e92be0c6SAlvin Chang 	if (!r)
473e92be0c6SAlvin Chang 		return TEE_ERROR_BAD_PARAMETERS;
474e92be0c6SAlvin Chang 
475e92be0c6SAlvin Chang 	mobj_put(&r->mobj);
476e92be0c6SAlvin Chang 
477e92be0c6SAlvin Chang 	/*
478e92be0c6SAlvin Chang 	 * We've established that this function can release the cookie.
479e92be0c6SAlvin Chang 	 * Now we wait until mobj_reg_shm_free() is called by the last
480e92be0c6SAlvin Chang 	 * mobj_put() needed to free this mobj. Note that the call to
481e92be0c6SAlvin Chang 	 * mobj_put() above could very well be that call.
482e92be0c6SAlvin Chang 	 *
483e92be0c6SAlvin Chang 	 * Once mobj_reg_shm_free() is called it will set r->release_frees
484e92be0c6SAlvin Chang 	 * to true and we can free the mobj here.
485e92be0c6SAlvin Chang 	 */
486e92be0c6SAlvin Chang 	mutex_lock(&shm_mu);
487e92be0c6SAlvin Chang 	shm_release_waiters++;
488e92be0c6SAlvin Chang 	assert(shm_release_waiters);
489e92be0c6SAlvin Chang 
490e92be0c6SAlvin Chang 	while (true) {
491e92be0c6SAlvin Chang 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
492e92be0c6SAlvin Chang 		if (r->release_frees) {
493e92be0c6SAlvin Chang 			reg_shm_free_helper(r);
494e92be0c6SAlvin Chang 			r = NULL;
495e92be0c6SAlvin Chang 		}
496e92be0c6SAlvin Chang 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
497e92be0c6SAlvin Chang 
498e92be0c6SAlvin Chang 		if (!r)
499e92be0c6SAlvin Chang 			break;
500e92be0c6SAlvin Chang 		condvar_wait(&shm_cv, &shm_mu);
501e92be0c6SAlvin Chang 	}
502e92be0c6SAlvin Chang 
503e92be0c6SAlvin Chang 	assert(shm_release_waiters);
504e92be0c6SAlvin Chang 	shm_release_waiters--;
505e92be0c6SAlvin Chang 	mutex_unlock(&shm_mu);
506e92be0c6SAlvin Chang 
507e92be0c6SAlvin Chang 	return TEE_SUCCESS;
508e92be0c6SAlvin Chang }
509e92be0c6SAlvin Chang 
mobj_mapped_shm_alloc(paddr_t * pages,size_t num_pages,paddr_t page_offset,uint64_t cookie)510e92be0c6SAlvin Chang struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
511e92be0c6SAlvin Chang 				  paddr_t page_offset, uint64_t cookie)
512e92be0c6SAlvin Chang {
513e92be0c6SAlvin Chang 	struct mobj *mobj = mobj_reg_shm_alloc(pages, num_pages,
514e92be0c6SAlvin Chang 					       page_offset, cookie);
515e92be0c6SAlvin Chang 
516e92be0c6SAlvin Chang 	if (!mobj)
517e92be0c6SAlvin Chang 		return NULL;
518e92be0c6SAlvin Chang 
519e92be0c6SAlvin Chang 	if (mobj_inc_map(mobj)) {
520e92be0c6SAlvin Chang 		mobj_put(mobj);
521e92be0c6SAlvin Chang 		return NULL;
522e92be0c6SAlvin Chang 	}
523e92be0c6SAlvin Chang 
524e92be0c6SAlvin Chang 	return mobj;
525e92be0c6SAlvin Chang }
526e92be0c6SAlvin Chang 
mobj_mapped_shm_init(void)527e92be0c6SAlvin Chang static TEE_Result mobj_mapped_shm_init(void)
528e92be0c6SAlvin Chang {
529e92be0c6SAlvin Chang 	vaddr_t pool_start = 0;
530e92be0c6SAlvin Chang 	vaddr_t pool_end = 0;
531e92be0c6SAlvin Chang 
532e92be0c6SAlvin Chang 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
533e92be0c6SAlvin Chang 	if (!pool_start || !pool_end)
534e92be0c6SAlvin Chang 		panic("Can't find region for shmem pool");
535e92be0c6SAlvin Chang 
536fdf696b7SJens Wiklander 	if (!tee_mm_init(&core_virt_shm_pool, pool_start, pool_end - pool_start,
537e92be0c6SAlvin Chang 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NO_FLAGS))
538e92be0c6SAlvin Chang 		panic("Could not create shmem pool");
539e92be0c6SAlvin Chang 
540e92be0c6SAlvin Chang 	DMSG("Shared memory address range: %" PRIxVA ", %" PRIxVA,
541e92be0c6SAlvin Chang 	     pool_start, pool_end);
542e92be0c6SAlvin Chang 	return TEE_SUCCESS;
543e92be0c6SAlvin Chang }
544e92be0c6SAlvin Chang 
545e92be0c6SAlvin Chang preinit(mobj_mapped_shm_init);
546*00338334SJens Wiklander 
547*00338334SJens Wiklander #ifdef CFG_CORE_DYN_PROTMEM
548*00338334SJens Wiklander static struct mobj_protmem *to_mobj_protmem(struct mobj *mobj);
549*00338334SJens Wiklander 
check_reg_shm_list_conflict(paddr_t pa,paddr_size_t size)550*00338334SJens Wiklander static TEE_Result check_reg_shm_list_conflict(paddr_t pa, paddr_size_t size)
551*00338334SJens Wiklander {
552*00338334SJens Wiklander 	struct mobj_reg_shm *r = NULL;
553*00338334SJens Wiklander 	TEE_Result res = TEE_SUCCESS;
554*00338334SJens Wiklander 
555*00338334SJens Wiklander 	SLIST_FOREACH(r, &reg_shm_list, next) {
556*00338334SJens Wiklander 		res = check_reg_shm_conflict(r, pa, size);
557*00338334SJens Wiklander 		if (res)
558*00338334SJens Wiklander 			break;
559*00338334SJens Wiklander 	}
560*00338334SJens Wiklander 
561*00338334SJens Wiklander 	return res;
562*00338334SJens Wiklander }
563*00338334SJens Wiklander 
protect_mem(struct mobj_protmem * m)564*00338334SJens Wiklander static TEE_Result protect_mem(struct mobj_protmem *m)
565*00338334SJens Wiklander {
566*00338334SJens Wiklander 	if ((m->pa | m->mobj.size) & SMALL_PAGE_MASK)
567*00338334SJens Wiklander 		return TEE_ERROR_BAD_PARAMETERS;
568*00338334SJens Wiklander 
569*00338334SJens Wiklander 	DMSG("use_case %d pa %#"PRIxPA", size %#zx",
570*00338334SJens Wiklander 	     m->use_case, m->pa, m->mobj.size);
571*00338334SJens Wiklander 
572*00338334SJens Wiklander 	return plat_set_protmem_range(m->use_case, m->pa, m->mobj.size);
573*00338334SJens Wiklander }
574*00338334SJens Wiklander 
restore_mem(struct mobj_protmem * m)575*00338334SJens Wiklander static TEE_Result restore_mem(struct mobj_protmem *m)
576*00338334SJens Wiklander {
577*00338334SJens Wiklander 	DMSG("use_case %d pa %#"PRIxPA", size %#zx",
578*00338334SJens Wiklander 	     m->use_case, m->pa, m->mobj.size);
579*00338334SJens Wiklander 
580*00338334SJens Wiklander 	return plat_set_protmem_range(MOBJ_USE_CASE_NS_SHM, m->pa,
581*00338334SJens Wiklander 				      m->mobj.size);
582*00338334SJens Wiklander }
583*00338334SJens Wiklander 
mobj_protmem_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)584*00338334SJens Wiklander static TEE_Result mobj_protmem_get_pa(struct mobj *mobj, size_t offs,
585*00338334SJens Wiklander 				      size_t granule, paddr_t *pa)
586*00338334SJens Wiklander {
587*00338334SJens Wiklander 	struct mobj_protmem *m = to_mobj_protmem(mobj);
588*00338334SJens Wiklander 	paddr_t p = 0;
589*00338334SJens Wiklander 
590*00338334SJens Wiklander 	if (!pa)
591*00338334SJens Wiklander 		return TEE_ERROR_GENERIC;
592*00338334SJens Wiklander 
593*00338334SJens Wiklander 	if (offs >= mobj->size)
594*00338334SJens Wiklander 		return TEE_ERROR_GENERIC;
595*00338334SJens Wiklander 
596*00338334SJens Wiklander 	p = m->pa + offs;
597*00338334SJens Wiklander 	if (granule) {
598*00338334SJens Wiklander 		if (granule != SMALL_PAGE_SIZE)
599*00338334SJens Wiklander 			return TEE_ERROR_GENERIC;
600*00338334SJens Wiklander 		p &= ~(granule - 1);
601*00338334SJens Wiklander 	}
602*00338334SJens Wiklander 	*pa = p;
603*00338334SJens Wiklander 
604*00338334SJens Wiklander 	return TEE_SUCCESS;
605*00338334SJens Wiklander }
606*00338334SJens Wiklander 
mobj_protmem_get_mem_type(struct mobj * mobj __unused,uint32_t * mt)607*00338334SJens Wiklander static TEE_Result mobj_protmem_get_mem_type(struct mobj *mobj __unused,
608*00338334SJens Wiklander 					    uint32_t *mt)
609*00338334SJens Wiklander {
610*00338334SJens Wiklander 	if (!mt)
611*00338334SJens Wiklander 		return TEE_ERROR_GENERIC;
612*00338334SJens Wiklander 
613*00338334SJens Wiklander 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
614*00338334SJens Wiklander 
615*00338334SJens Wiklander 	return TEE_SUCCESS;
616*00338334SJens Wiklander }
617*00338334SJens Wiklander 
mobj_protmem_matches(struct mobj * mobj __unused,enum buf_is_attr attr)618*00338334SJens Wiklander static bool mobj_protmem_matches(struct mobj *mobj __unused,
619*00338334SJens Wiklander 				 enum buf_is_attr attr)
620*00338334SJens Wiklander {
621*00338334SJens Wiklander 	return attr == CORE_MEM_SEC || attr == CORE_MEM_SDP_MEM;
622*00338334SJens Wiklander }
623*00338334SJens Wiklander 
protmem_free_helper(struct mobj_protmem * mobj_protmem)624*00338334SJens Wiklander static void protmem_free_helper(struct mobj_protmem *mobj_protmem)
625*00338334SJens Wiklander {
626*00338334SJens Wiklander 	uint32_t exceptions = 0;
627*00338334SJens Wiklander 
628*00338334SJens Wiklander 	exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
629*00338334SJens Wiklander 	SLIST_REMOVE(&protmem_list, mobj_protmem, mobj_protmem, next);
630*00338334SJens Wiklander 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
631*00338334SJens Wiklander 
632*00338334SJens Wiklander 	restore_mem(mobj_protmem);
633*00338334SJens Wiklander 	free(mobj_protmem);
634*00338334SJens Wiklander }
635*00338334SJens Wiklander 
mobj_protmem_free(struct mobj * mobj)636*00338334SJens Wiklander static void mobj_protmem_free(struct mobj *mobj)
637*00338334SJens Wiklander {
638*00338334SJens Wiklander 	struct mobj_protmem *r = to_mobj_protmem(mobj);
639*00338334SJens Wiklander 	uint32_t exceptions = 0;
640*00338334SJens Wiklander 
641*00338334SJens Wiklander 	if (!r->releasing) {
642*00338334SJens Wiklander 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
643*00338334SJens Wiklander 		protmem_free_helper(r);
644*00338334SJens Wiklander 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
645*00338334SJens Wiklander 	} else {
646*00338334SJens Wiklander 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
647*00338334SJens Wiklander 		r->release_frees = true;
648*00338334SJens Wiklander 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
649*00338334SJens Wiklander 
650*00338334SJens Wiklander 		mutex_lock(&shm_mu);
651*00338334SJens Wiklander 		if (shm_release_waiters)
652*00338334SJens Wiklander 			condvar_broadcast(&shm_cv);
653*00338334SJens Wiklander 		mutex_unlock(&shm_mu);
654*00338334SJens Wiklander 	}
655*00338334SJens Wiklander }
656*00338334SJens Wiklander 
mobj_protmem_get_cookie(struct mobj * mobj)657*00338334SJens Wiklander static uint64_t mobj_protmem_get_cookie(struct mobj *mobj)
658*00338334SJens Wiklander {
659*00338334SJens Wiklander 	return to_mobj_protmem(mobj)->cookie;
660*00338334SJens Wiklander }
661*00338334SJens Wiklander 
mobj_protmem_inc_map(struct mobj * mobj __maybe_unused)662*00338334SJens Wiklander static TEE_Result mobj_protmem_inc_map(struct mobj *mobj __maybe_unused)
663*00338334SJens Wiklander {
664*00338334SJens Wiklander 	assert(to_mobj_protmem(mobj));
665*00338334SJens Wiklander 	return TEE_ERROR_BAD_PARAMETERS;
666*00338334SJens Wiklander }
667*00338334SJens Wiklander 
mobj_protmem_dec_map(struct mobj * mobj __maybe_unused)668*00338334SJens Wiklander static TEE_Result mobj_protmem_dec_map(struct mobj *mobj __maybe_unused)
669*00338334SJens Wiklander {
670*00338334SJens Wiklander 	assert(to_mobj_protmem(mobj));
671*00338334SJens Wiklander 	return TEE_ERROR_BAD_PARAMETERS;
672*00338334SJens Wiklander }
673*00338334SJens Wiklander 
674*00338334SJens Wiklander const struct mobj_ops mobj_protmem_ops
675*00338334SJens Wiklander 	__relrodata_unpaged("mobj_protmem_ops") = {
676*00338334SJens Wiklander 	.get_pa = mobj_protmem_get_pa,
677*00338334SJens Wiklander 	.get_mem_type = mobj_protmem_get_mem_type,
678*00338334SJens Wiklander 	.matches = mobj_protmem_matches,
679*00338334SJens Wiklander 	.free = mobj_protmem_free,
680*00338334SJens Wiklander 	.get_cookie = mobj_protmem_get_cookie,
681*00338334SJens Wiklander 	.inc_map = mobj_protmem_inc_map,
682*00338334SJens Wiklander 	.dec_map = mobj_protmem_dec_map,
683*00338334SJens Wiklander };
684*00338334SJens Wiklander 
to_mobj_protmem(struct mobj * mobj)685*00338334SJens Wiklander static struct mobj_protmem *to_mobj_protmem(struct mobj *mobj)
686*00338334SJens Wiklander {
687*00338334SJens Wiklander 	assert(mobj->ops == &mobj_protmem_ops);
688*00338334SJens Wiklander 	return container_of(mobj, struct mobj_protmem, mobj);
689*00338334SJens Wiklander }
690*00338334SJens Wiklander 
mobj_protmem_alloc(paddr_t pa,paddr_size_t size,uint64_t cookie,enum mobj_use_case use_case)691*00338334SJens Wiklander struct mobj *mobj_protmem_alloc(paddr_t pa, paddr_size_t size, uint64_t cookie,
692*00338334SJens Wiklander 				enum mobj_use_case use_case)
693*00338334SJens Wiklander {
694*00338334SJens Wiklander 	TEE_Result res = TEE_SUCCESS;
695*00338334SJens Wiklander 	struct mobj_protmem *m = NULL;
696*00338334SJens Wiklander 	uint32_t exceptions = 0;
697*00338334SJens Wiklander 
698*00338334SJens Wiklander 	if (use_case == MOBJ_USE_CASE_NS_SHM ||
699*00338334SJens Wiklander 	    !core_pbuf_is(CORE_MEM_NON_SEC, pa, size))
700*00338334SJens Wiklander 		return NULL;
701*00338334SJens Wiklander 
702*00338334SJens Wiklander 	m = calloc(1, sizeof(*m));
703*00338334SJens Wiklander 	if (!m)
704*00338334SJens Wiklander 		return NULL;
705*00338334SJens Wiklander 
706*00338334SJens Wiklander 	m->mobj.ops = &mobj_protmem_ops;
707*00338334SJens Wiklander 	m->use_case = use_case;
708*00338334SJens Wiklander 	m->mobj.size = size;
709*00338334SJens Wiklander 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
710*00338334SJens Wiklander 	refcount_set(&m->mobj.refc, 1);
711*00338334SJens Wiklander 	m->cookie = cookie;
712*00338334SJens Wiklander 	m->pa = pa;
713*00338334SJens Wiklander 
714*00338334SJens Wiklander 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
715*00338334SJens Wiklander 	res = check_reg_shm_list_conflict(pa, size);
716*00338334SJens Wiklander 	if (res)
717*00338334SJens Wiklander 		goto out;
718*00338334SJens Wiklander 
719*00338334SJens Wiklander 	res = protect_mem(m);
720*00338334SJens Wiklander 	if (res)
721*00338334SJens Wiklander 		goto out;
722*00338334SJens Wiklander 	SLIST_INSERT_HEAD(&protmem_list, m, next);
723*00338334SJens Wiklander out:
724*00338334SJens Wiklander 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
725*00338334SJens Wiklander 
726*00338334SJens Wiklander 	if (res) {
727*00338334SJens Wiklander 		free(m);
728*00338334SJens Wiklander 		return NULL;
729*00338334SJens Wiklander 	}
730*00338334SJens Wiklander 
731*00338334SJens Wiklander 	return &m->mobj;
732*00338334SJens Wiklander }
733*00338334SJens Wiklander 
mobj_protmem_release_by_cookie(uint64_t cookie)734*00338334SJens Wiklander TEE_Result mobj_protmem_release_by_cookie(uint64_t cookie)
735*00338334SJens Wiklander {
736*00338334SJens Wiklander 	uint32_t exceptions = 0;
737*00338334SJens Wiklander 	struct mobj_protmem *rm = NULL;
738*00338334SJens Wiklander 
739*00338334SJens Wiklander 	/*
740*00338334SJens Wiklander 	 * Try to find m and see can be released by this function, if so
741*00338334SJens Wiklander 	 * call mobj_put(). Otherwise this function is called either by
742*00338334SJens Wiklander 	 * wrong cookie and perhaps a second time, regardless return
743*00338334SJens Wiklander 	 * TEE_ERROR_BAD_PARAMETERS.
744*00338334SJens Wiklander 	 */
745*00338334SJens Wiklander 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
746*00338334SJens Wiklander 	rm = protmem_find_unlocked(cookie);
747*00338334SJens Wiklander 	if (!rm || rm->releasing)
748*00338334SJens Wiklander 		rm = NULL;
749*00338334SJens Wiklander 	else
750*00338334SJens Wiklander 		rm->releasing = true;
751*00338334SJens Wiklander 
752*00338334SJens Wiklander 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
753*00338334SJens Wiklander 
754*00338334SJens Wiklander 	if (!rm)
755*00338334SJens Wiklander 		return TEE_ERROR_BAD_PARAMETERS;
756*00338334SJens Wiklander 
757*00338334SJens Wiklander 	mobj_put(&rm->mobj);
758*00338334SJens Wiklander 
759*00338334SJens Wiklander 	/*
760*00338334SJens Wiklander 	 * We've established that this function can release the cookie.
761*00338334SJens Wiklander 	 * Now we wait until mobj_reg_shm_free() is called by the last
762*00338334SJens Wiklander 	 * mobj_put() needed to free this mobj. Note that the call to
763*00338334SJens Wiklander 	 * mobj_put() above could very well be that call.
764*00338334SJens Wiklander 	 *
765*00338334SJens Wiklander 	 * Once mobj_reg_shm_free() is called it will set r->release_frees
766*00338334SJens Wiklander 	 * to true and we can free the mobj here.
767*00338334SJens Wiklander 	 */
768*00338334SJens Wiklander 	mutex_lock(&shm_mu);
769*00338334SJens Wiklander 	shm_release_waiters++;
770*00338334SJens Wiklander 	assert(shm_release_waiters);
771*00338334SJens Wiklander 
772*00338334SJens Wiklander 	while (true) {
773*00338334SJens Wiklander 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
774*00338334SJens Wiklander 		if (rm->release_frees) {
775*00338334SJens Wiklander 			protmem_free_helper(rm);
776*00338334SJens Wiklander 			rm = NULL;
777*00338334SJens Wiklander 		}
778*00338334SJens Wiklander 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
779*00338334SJens Wiklander 
780*00338334SJens Wiklander 		if (!rm)
781*00338334SJens Wiklander 			break;
782*00338334SJens Wiklander 		condvar_wait(&shm_cv, &shm_mu);
783*00338334SJens Wiklander 	}
784*00338334SJens Wiklander 
785*00338334SJens Wiklander 	assert(shm_release_waiters);
786*00338334SJens Wiklander 	shm_release_waiters--;
787*00338334SJens Wiklander 	mutex_unlock(&shm_mu);
788*00338334SJens Wiklander 
789*00338334SJens Wiklander 	return TEE_SUCCESS;
790*00338334SJens Wiklander }
791*00338334SJens Wiklander 
protmem_find_by_pa_unlocked(paddr_t pa,paddr_size_t sz)792*00338334SJens Wiklander static struct mobj_protmem *protmem_find_by_pa_unlocked(paddr_t pa,
793*00338334SJens Wiklander 							paddr_size_t sz)
794*00338334SJens Wiklander {
795*00338334SJens Wiklander 	struct mobj_protmem *m = NULL;
796*00338334SJens Wiklander 
797*00338334SJens Wiklander 	if (!sz)
798*00338334SJens Wiklander 		sz = 1;
799*00338334SJens Wiklander 
800*00338334SJens Wiklander 	SLIST_FOREACH(m, &protmem_list, next)
801*00338334SJens Wiklander 		if (core_is_buffer_inside(pa, sz, m->pa, m->mobj.size))
802*00338334SJens Wiklander 			return m;
803*00338334SJens Wiklander 
804*00338334SJens Wiklander 	return NULL;
805*00338334SJens Wiklander }
806*00338334SJens Wiklander 
mobj_protmem_get_by_pa(paddr_t pa,paddr_size_t size)807*00338334SJens Wiklander struct mobj *mobj_protmem_get_by_pa(paddr_t pa, paddr_size_t size)
808*00338334SJens Wiklander {
809*00338334SJens Wiklander 	struct mobj_protmem *rm = NULL;
810*00338334SJens Wiklander 	struct mobj *mobj = NULL;
811*00338334SJens Wiklander 	uint32_t exceptions = 0;
812*00338334SJens Wiklander 
813*00338334SJens Wiklander 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
814*00338334SJens Wiklander 
815*00338334SJens Wiklander 	rm = protmem_find_by_pa_unlocked(pa, size);
816*00338334SJens Wiklander 	if (rm && !rm->releasing)
817*00338334SJens Wiklander 		mobj = mobj_get(&rm->mobj);
818*00338334SJens Wiklander 
819*00338334SJens Wiklander 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
820*00338334SJens Wiklander 
821*00338334SJens Wiklander 	return mobj;
822*00338334SJens Wiklander }
823*00338334SJens Wiklander #endif /*CFG_CORE_DYN_PROTMEM*/
824