xref: /optee_os/core/mm/mobj_dyn_shm.c (revision 1868eb206733e931b6c6c2d85d55e646bc8a2496)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <initcall.h>
8 #include <keep.h>
9 #include <kernel/linker.h>
10 #include <kernel/mutex.h>
11 #include <kernel/panic.h>
12 #include <kernel/refcount.h>
13 #include <kernel/spinlock.h>
14 #include <mm/core_mmu.h>
15 #include <mm/mobj.h>
16 #include <mm/tee_pager.h>
17 #include <optee_msg.h>
18 #include <stdlib.h>
19 #include <tee_api_types.h>
20 #include <types_ext.h>
21 #include <util.h>
22 
23 static struct mutex shm_mu = MUTEX_INITIALIZER;
24 static struct condvar shm_cv = CONDVAR_INITIALIZER;
25 static size_t shm_release_waiters;
26 
27 /*
28  * mobj_reg_shm implementation. Describes shared memory provided by normal world
29  */
30 
31 struct mobj_reg_shm {
32 	struct mobj mobj;
33 	SLIST_ENTRY(mobj_reg_shm) next;
34 	uint64_t cookie;
35 	tee_mm_entry_t *mm;
36 	paddr_t page_offset;
37 	struct refcount mapcount;
38 	bool guarded;
39 	bool releasing;
40 	bool release_frees;
41 	paddr_t pages[];
42 };
43 
44 static size_t mobj_reg_shm_size(size_t nr_pages)
45 {
46 	size_t s = 0;
47 
48 	if (MUL_OVERFLOW(sizeof(paddr_t), nr_pages, &s))
49 		return 0;
50 	if (ADD_OVERFLOW(sizeof(struct mobj_reg_shm), s, &s))
51 		return 0;
52 	return s;
53 }
54 
55 static SLIST_HEAD(reg_shm_head, mobj_reg_shm) reg_shm_list =
56 	SLIST_HEAD_INITIALIZER(reg_shm_head);
57 
58 static unsigned int reg_shm_slist_lock = SPINLOCK_UNLOCK;
59 static unsigned int reg_shm_map_lock = SPINLOCK_UNLOCK;
60 
61 static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj);
62 
63 static TEE_Result mobj_reg_shm_get_pa(struct mobj *mobj, size_t offst,
64 				      size_t granule, paddr_t *pa)
65 {
66 	struct mobj_reg_shm *mobj_reg_shm = to_mobj_reg_shm(mobj);
67 	size_t full_offset = 0;
68 	paddr_t p = 0;
69 
70 	if (!pa)
71 		return TEE_ERROR_GENERIC;
72 
73 	if (offst >= mobj->size)
74 		return TEE_ERROR_GENERIC;
75 
76 	full_offset = offst + mobj_reg_shm->page_offset;
77 	switch (granule) {
78 	case 0:
79 		p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE] +
80 			(full_offset & SMALL_PAGE_MASK);
81 		break;
82 	case SMALL_PAGE_SIZE:
83 		p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE];
84 		break;
85 	default:
86 		return TEE_ERROR_GENERIC;
87 	}
88 	*pa = p;
89 
90 	return TEE_SUCCESS;
91 }
92 DECLARE_KEEP_PAGER(mobj_reg_shm_get_pa);
93 
94 static size_t mobj_reg_shm_get_phys_offs(struct mobj *mobj,
95 					 size_t granule __maybe_unused)
96 {
97 	assert(granule >= mobj->phys_granule);
98 	return to_mobj_reg_shm(mobj)->page_offset;
99 }
100 
101 static void *mobj_reg_shm_get_va(struct mobj *mobj, size_t offst, size_t len)
102 {
103 	struct mobj_reg_shm *mrs = to_mobj_reg_shm(mobj);
104 
105 	if (!mrs->mm || !mobj_check_offset_and_len(mobj, offst, len))
106 		return NULL;
107 
108 	return (void *)(vaddr_t)(tee_mm_get_smem(mrs->mm) + offst +
109 				 mrs->page_offset);
110 }
111 
112 static void reg_shm_unmap_helper(struct mobj_reg_shm *r)
113 {
114 	assert(r->mm);
115 	assert(r->mm->pool->shift == SMALL_PAGE_SHIFT);
116 	core_mmu_unmap_pages(tee_mm_get_smem(r->mm), r->mm->size);
117 	tee_mm_free(r->mm);
118 	r->mm = NULL;
119 }
120 
121 static void reg_shm_free_helper(struct mobj_reg_shm *mobj_reg_shm)
122 {
123 	uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
124 
125 	if (mobj_reg_shm->mm)
126 		reg_shm_unmap_helper(mobj_reg_shm);
127 
128 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
129 
130 	SLIST_REMOVE(&reg_shm_list, mobj_reg_shm, mobj_reg_shm, next);
131 	free(mobj_reg_shm);
132 }
133 
134 static void mobj_reg_shm_free(struct mobj *mobj)
135 {
136 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
137 	uint32_t exceptions = 0;
138 
139 	if (r->guarded && !r->releasing) {
140 		/*
141 		 * Guarded registersted shared memory can't be released
142 		 * by cookie, only by mobj_put(). However, unguarded
143 		 * registered shared memory can also be freed by mobj_put()
144 		 * unless mobj_reg_shm_release_by_cookie() is waiting for
145 		 * the mobj to be released.
146 		 */
147 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
148 		reg_shm_free_helper(r);
149 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
150 	} else {
151 		/*
152 		 * We've reached the point where an unguarded reg shm can
153 		 * be released by cookie. Notify eventual waiters.
154 		 */
155 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
156 		r->release_frees = true;
157 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
158 
159 		mutex_lock(&shm_mu);
160 		if (shm_release_waiters)
161 			condvar_broadcast(&shm_cv);
162 		mutex_unlock(&shm_mu);
163 	}
164 }
165 
166 static TEE_Result mobj_reg_shm_get_mem_type(struct mobj *mobj __unused,
167 					    uint32_t *mt)
168 {
169 	if (!mt)
170 		return TEE_ERROR_GENERIC;
171 
172 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
173 
174 	return TEE_SUCCESS;
175 }
176 
177 static TEE_Result mobj_reg_shm_inc_map(struct mobj *mobj)
178 {
179 	TEE_Result res = TEE_SUCCESS;
180 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
181 	uint32_t exceptions = 0;
182 	size_t sz = 0;
183 
184 	while (true) {
185 		if (refcount_inc(&r->mapcount))
186 			return TEE_SUCCESS;
187 
188 		exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
189 
190 		if (!refcount_val(&r->mapcount))
191 			break; /* continue to reinitialize */
192 		/*
193 		 * If another thread beat us to initialize mapcount,
194 		 * restart to make sure we still increase it.
195 		 */
196 		cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
197 	}
198 
199 	/*
200 	 * If we have beaten another thread calling mobj_reg_shm_dec_map()
201 	 * to get the lock we need only to reinitialize mapcount to 1.
202 	 */
203 	if (!r->mm) {
204 		sz = ROUNDUP(mobj->size + r->page_offset, SMALL_PAGE_SIZE);
205 		r->mm = tee_mm_alloc(&core_virt_shm_pool, sz);
206 		if (!r->mm) {
207 			res = TEE_ERROR_OUT_OF_MEMORY;
208 			goto out;
209 		}
210 
211 		res = core_mmu_map_pages(tee_mm_get_smem(r->mm), r->pages,
212 					 sz / SMALL_PAGE_SIZE,
213 					 MEM_AREA_NSEC_SHM);
214 		if (res) {
215 			tee_mm_free(r->mm);
216 			r->mm = NULL;
217 			goto out;
218 		}
219 	}
220 
221 	refcount_set(&r->mapcount, 1);
222 out:
223 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
224 
225 	return res;
226 }
227 
228 static TEE_Result mobj_reg_shm_dec_map(struct mobj *mobj)
229 {
230 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
231 	uint32_t exceptions = 0;
232 
233 	if (!refcount_dec(&r->mapcount))
234 		return TEE_SUCCESS;
235 
236 	exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
237 
238 	/*
239 	 * Check that another thread hasn't been able to:
240 	 * - increase the mapcount
241 	 * - or, increase the mapcount, decrease it again, and set r->mm to
242 	 *   NULL
243 	 * before we acquired the spinlock
244 	 */
245 	if (!refcount_val(&r->mapcount) && r->mm)
246 		reg_shm_unmap_helper(r);
247 
248 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
249 
250 	return TEE_SUCCESS;
251 }
252 
253 static bool mobj_reg_shm_matches(struct mobj *mobj, enum buf_is_attr attr);
254 
255 static uint64_t mobj_reg_shm_get_cookie(struct mobj *mobj)
256 {
257 	return to_mobj_reg_shm(mobj)->cookie;
258 }
259 
260 /*
261  * When CFG_PREALLOC_RPC_CACHE is disabled, this variable is weak just
262  * to ease breaking its dependency chain when added to the unpaged area.
263  * When CFG_PREALLOC_RPC_CACHE is enabled, releasing RPC preallocated
264  * shm mandates these resources to be unpaged.
265  */
266 const struct mobj_ops mobj_reg_shm_ops
267 __weak __relrodata_unpaged("mobj_reg_shm_ops") = {
268 	.get_pa = mobj_reg_shm_get_pa,
269 	.get_phys_offs = mobj_reg_shm_get_phys_offs,
270 	.get_va = mobj_reg_shm_get_va,
271 	.get_mem_type = mobj_reg_shm_get_mem_type,
272 	.matches = mobj_reg_shm_matches,
273 	.free = mobj_reg_shm_free,
274 	.get_cookie = mobj_reg_shm_get_cookie,
275 	.inc_map = mobj_reg_shm_inc_map,
276 	.dec_map = mobj_reg_shm_dec_map,
277 };
278 
279 #ifdef CFG_PREALLOC_RPC_CACHE
280 /* Releasing RPC preallocated shm mandates few resources to be unpaged */
281 DECLARE_KEEP_PAGER(mobj_reg_shm_get_cookie);
282 DECLARE_KEEP_PAGER(mobj_reg_shm_matches);
283 DECLARE_KEEP_PAGER(mobj_reg_shm_free);
284 #endif
285 
286 static bool mobj_reg_shm_matches(struct mobj *mobj __maybe_unused,
287 				   enum buf_is_attr attr)
288 {
289 	assert(mobj->ops == &mobj_reg_shm_ops);
290 
291 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
292 }
293 
294 static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj)
295 {
296 	assert(mobj->ops == &mobj_reg_shm_ops);
297 	return container_of(mobj, struct mobj_reg_shm, mobj);
298 }
299 
300 struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
301 				paddr_t page_offset, uint64_t cookie)
302 {
303 	struct mobj_reg_shm *mobj_reg_shm = NULL;
304 	size_t i = 0;
305 	uint32_t exceptions = 0;
306 	size_t s = 0;
307 
308 	if (!num_pages || page_offset >= SMALL_PAGE_SIZE)
309 		return NULL;
310 
311 	s = mobj_reg_shm_size(num_pages);
312 	if (!s)
313 		return NULL;
314 	mobj_reg_shm = calloc(1, s);
315 	if (!mobj_reg_shm)
316 		return NULL;
317 
318 	mobj_reg_shm->mobj.ops = &mobj_reg_shm_ops;
319 	mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE - page_offset;
320 	mobj_reg_shm->mobj.phys_granule = SMALL_PAGE_SIZE;
321 	refcount_set(&mobj_reg_shm->mobj.refc, 1);
322 	mobj_reg_shm->cookie = cookie;
323 	mobj_reg_shm->guarded = true;
324 	mobj_reg_shm->page_offset = page_offset;
325 	memcpy(mobj_reg_shm->pages, pages, sizeof(*pages) * num_pages);
326 
327 	/* Ensure loaded references match format and security constraints */
328 	for (i = 0; i < num_pages; i++) {
329 		if (mobj_reg_shm->pages[i] & SMALL_PAGE_MASK)
330 			goto err;
331 
332 		/* Only Non-secure memory can be mapped there */
333 		if (!core_pbuf_is(CORE_MEM_NON_SEC, mobj_reg_shm->pages[i],
334 				  SMALL_PAGE_SIZE))
335 			goto err;
336 	}
337 
338 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
339 	SLIST_INSERT_HEAD(&reg_shm_list, mobj_reg_shm, next);
340 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
341 
342 	return &mobj_reg_shm->mobj;
343 err:
344 	free(mobj_reg_shm);
345 	return NULL;
346 }
347 
348 void mobj_reg_shm_unguard(struct mobj *mobj)
349 {
350 	uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
351 
352 	to_mobj_reg_shm(mobj)->guarded = false;
353 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
354 }
355 
356 static struct mobj_reg_shm *reg_shm_find_unlocked(uint64_t cookie)
357 {
358 	struct mobj_reg_shm *mobj_reg_shm = NULL;
359 
360 	SLIST_FOREACH(mobj_reg_shm, &reg_shm_list, next)
361 		if (mobj_reg_shm->cookie == cookie)
362 			return mobj_reg_shm;
363 
364 	return NULL;
365 }
366 
367 struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie)
368 {
369 	uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
370 	struct mobj_reg_shm *r = reg_shm_find_unlocked(cookie);
371 
372 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
373 	if (!r)
374 		return NULL;
375 
376 	return mobj_get(&r->mobj);
377 }
378 
379 TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie)
380 {
381 	uint32_t exceptions = 0;
382 	struct mobj_reg_shm *r = NULL;
383 
384 	/*
385 	 * Try to find r and see can be released by this function, if so
386 	 * call mobj_put(). Otherwise this function is called either by
387 	 * wrong cookie and perhaps a second time, regardless return
388 	 * TEE_ERROR_BAD_PARAMETERS.
389 	 */
390 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
391 	r = reg_shm_find_unlocked(cookie);
392 	if (!r || r->guarded || r->releasing)
393 		r = NULL;
394 	else
395 		r->releasing = true;
396 
397 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
398 
399 	if (!r)
400 		return TEE_ERROR_BAD_PARAMETERS;
401 
402 	mobj_put(&r->mobj);
403 
404 	/*
405 	 * We've established that this function can release the cookie.
406 	 * Now we wait until mobj_reg_shm_free() is called by the last
407 	 * mobj_put() needed to free this mobj. Note that the call to
408 	 * mobj_put() above could very well be that call.
409 	 *
410 	 * Once mobj_reg_shm_free() is called it will set r->release_frees
411 	 * to true and we can free the mobj here.
412 	 */
413 	mutex_lock(&shm_mu);
414 	shm_release_waiters++;
415 	assert(shm_release_waiters);
416 
417 	while (true) {
418 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
419 		if (r->release_frees) {
420 			reg_shm_free_helper(r);
421 			r = NULL;
422 		}
423 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
424 
425 		if (!r)
426 			break;
427 		condvar_wait(&shm_cv, &shm_mu);
428 	}
429 
430 	assert(shm_release_waiters);
431 	shm_release_waiters--;
432 	mutex_unlock(&shm_mu);
433 
434 	return TEE_SUCCESS;
435 }
436 
437 struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
438 				  paddr_t page_offset, uint64_t cookie)
439 {
440 	struct mobj *mobj = mobj_reg_shm_alloc(pages, num_pages,
441 					       page_offset, cookie);
442 
443 	if (!mobj)
444 		return NULL;
445 
446 	if (mobj_inc_map(mobj)) {
447 		mobj_put(mobj);
448 		return NULL;
449 	}
450 
451 	return mobj;
452 }
453 
454 static TEE_Result mobj_mapped_shm_init(void)
455 {
456 	vaddr_t pool_start = 0;
457 	vaddr_t pool_end = 0;
458 
459 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
460 	if (!pool_start || !pool_end)
461 		panic("Can't find region for shmem pool");
462 
463 	if (!tee_mm_init(&core_virt_shm_pool, pool_start, pool_end - pool_start,
464 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NO_FLAGS))
465 		panic("Could not create shmem pool");
466 
467 	DMSG("Shared memory address range: %" PRIxVA ", %" PRIxVA,
468 	     pool_start, pool_end);
469 	return TEE_SUCCESS;
470 }
471 
472 preinit(mobj_mapped_shm_init);
473