xref: /optee_os/core/mm/mobj_dyn_shm.c (revision 3bb5c167aa7e4e6de668c11cb40a4dc2ffd16184)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2024, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <initcall.h>
8 #include <keep.h>
9 #include <kernel/linker.h>
10 #include <kernel/mutex.h>
11 #include <kernel/panic.h>
12 #include <kernel/refcount.h>
13 #include <kernel/spinlock.h>
14 #include <kernel/tee_misc.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <optee_msg.h>
19 #include <stdlib.h>
20 #include <tee_api_types.h>
21 #include <types_ext.h>
22 #include <util.h>
23 
24 static struct mutex shm_mu = MUTEX_INITIALIZER;
25 static struct condvar shm_cv = CONDVAR_INITIALIZER;
26 static size_t shm_release_waiters;
27 
28 /*
29  * mobj_reg_shm implementation. Describes shared memory provided by normal world
30  */
31 
32 struct mobj_reg_shm {
33 	struct mobj mobj;
34 	SLIST_ENTRY(mobj_reg_shm) next;
35 	uint64_t cookie;
36 	tee_mm_entry_t *mm;
37 	paddr_t page_offset;
38 	struct refcount mapcount;
39 	bool guarded;
40 	bool releasing;
41 	bool release_frees;
42 	paddr_t pages[];
43 };
44 
45 /*
46  * struct mobj_protmem - describes protected memory lent by normal world
47  */
48 struct mobj_protmem {
49 	struct mobj mobj;
50 	SLIST_ENTRY(mobj_protmem) next;
51 	uint64_t cookie;
52 	paddr_t pa;
53 	enum mobj_use_case use_case;
54 	bool releasing;
55 	bool release_frees;
56 };
57 
mobj_reg_shm_size(size_t nr_pages)58 static size_t mobj_reg_shm_size(size_t nr_pages)
59 {
60 	size_t s = 0;
61 
62 	if (MUL_OVERFLOW(sizeof(paddr_t), nr_pages, &s))
63 		return 0;
64 	if (ADD_OVERFLOW(sizeof(struct mobj_reg_shm), s, &s))
65 		return 0;
66 	return s;
67 }
68 
69 static SLIST_HEAD(reg_shm_head, mobj_reg_shm) reg_shm_list =
70 	SLIST_HEAD_INITIALIZER(reg_shm_head);
71 
72 static unsigned int reg_shm_slist_lock = SPINLOCK_UNLOCK;
73 static unsigned int reg_shm_map_lock = SPINLOCK_UNLOCK;
74 
75 /* Access is serialized with reg_shm_slist_lock */
76 static SLIST_HEAD(protmem_head, mobj_protmem) protmem_list =
77 	SLIST_HEAD_INITIALIZER(protmem_head);
78 
79 static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj);
80 
mobj_reg_shm_get_pa(struct mobj * mobj,size_t offst,size_t granule,paddr_t * pa)81 static TEE_Result mobj_reg_shm_get_pa(struct mobj *mobj, size_t offst,
82 				      size_t granule, paddr_t *pa)
83 {
84 	struct mobj_reg_shm *mobj_reg_shm = to_mobj_reg_shm(mobj);
85 	size_t full_offset = 0;
86 	paddr_t p = 0;
87 
88 	if (!pa)
89 		return TEE_ERROR_GENERIC;
90 
91 	if (offst >= mobj->size)
92 		return TEE_ERROR_GENERIC;
93 
94 	full_offset = offst + mobj_reg_shm->page_offset;
95 	switch (granule) {
96 	case 0:
97 		p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE] +
98 			(full_offset & SMALL_PAGE_MASK);
99 		break;
100 	case SMALL_PAGE_SIZE:
101 		p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE];
102 		break;
103 	default:
104 		return TEE_ERROR_GENERIC;
105 	}
106 	*pa = p;
107 
108 	return TEE_SUCCESS;
109 }
110 DECLARE_KEEP_PAGER(mobj_reg_shm_get_pa);
111 
mobj_reg_shm_get_phys_offs(struct mobj * mobj,size_t granule __maybe_unused)112 static size_t mobj_reg_shm_get_phys_offs(struct mobj *mobj,
113 					 size_t granule __maybe_unused)
114 {
115 	assert(granule >= mobj->phys_granule);
116 	return to_mobj_reg_shm(mobj)->page_offset;
117 }
118 
mobj_reg_shm_get_va(struct mobj * mobj,size_t offst,size_t len)119 static void *mobj_reg_shm_get_va(struct mobj *mobj, size_t offst, size_t len)
120 {
121 	struct mobj_reg_shm *mrs = to_mobj_reg_shm(mobj);
122 
123 	if (!mrs->mm || !mobj_check_offset_and_len(mobj, offst, len))
124 		return NULL;
125 
126 	return (void *)(vaddr_t)(tee_mm_get_smem(mrs->mm) + offst +
127 				 mrs->page_offset);
128 }
129 
reg_shm_unmap_helper(struct mobj_reg_shm * r)130 static void reg_shm_unmap_helper(struct mobj_reg_shm *r)
131 {
132 	assert(r->mm);
133 	assert(r->mm->pool->shift == SMALL_PAGE_SHIFT);
134 	core_mmu_unmap_pages(tee_mm_get_smem(r->mm), r->mm->size);
135 	tee_mm_free(r->mm);
136 	r->mm = NULL;
137 }
138 
reg_shm_free_helper(struct mobj_reg_shm * mobj_reg_shm)139 static void reg_shm_free_helper(struct mobj_reg_shm *mobj_reg_shm)
140 {
141 	uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
142 
143 	if (mobj_reg_shm->mm)
144 		reg_shm_unmap_helper(mobj_reg_shm);
145 
146 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
147 
148 	SLIST_REMOVE(&reg_shm_list, mobj_reg_shm, mobj_reg_shm, next);
149 	free(mobj_reg_shm);
150 }
151 
mobj_reg_shm_free(struct mobj * mobj)152 static void mobj_reg_shm_free(struct mobj *mobj)
153 {
154 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
155 	uint32_t exceptions = 0;
156 
157 	if (r->guarded && !r->releasing) {
158 		/*
159 		 * Guarded registersted shared memory can't be released
160 		 * by cookie, only by mobj_put(). However, unguarded
161 		 * registered shared memory can also be freed by mobj_put()
162 		 * unless mobj_reg_shm_release_by_cookie() is waiting for
163 		 * the mobj to be released.
164 		 */
165 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
166 		reg_shm_free_helper(r);
167 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
168 	} else {
169 		/*
170 		 * We've reached the point where an unguarded reg shm can
171 		 * be released by cookie. Notify eventual waiters.
172 		 */
173 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
174 		r->release_frees = true;
175 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
176 
177 		mutex_lock(&shm_mu);
178 		if (shm_release_waiters)
179 			condvar_broadcast(&shm_cv);
180 		mutex_unlock(&shm_mu);
181 	}
182 }
183 
mobj_reg_shm_get_mem_type(struct mobj * mobj __unused,uint32_t * mt)184 static TEE_Result mobj_reg_shm_get_mem_type(struct mobj *mobj __unused,
185 					    uint32_t *mt)
186 {
187 	if (!mt)
188 		return TEE_ERROR_GENERIC;
189 
190 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
191 
192 	return TEE_SUCCESS;
193 }
194 
mobj_reg_shm_inc_map(struct mobj * mobj)195 static TEE_Result mobj_reg_shm_inc_map(struct mobj *mobj)
196 {
197 	TEE_Result res = TEE_SUCCESS;
198 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
199 	uint32_t exceptions = 0;
200 	size_t sz = 0;
201 
202 	while (true) {
203 		if (refcount_inc(&r->mapcount))
204 			return TEE_SUCCESS;
205 
206 		exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
207 
208 		if (!refcount_val(&r->mapcount))
209 			break; /* continue to reinitialize */
210 		/*
211 		 * If another thread beat us to initialize mapcount,
212 		 * restart to make sure we still increase it.
213 		 */
214 		cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
215 	}
216 
217 	/*
218 	 * If we have beaten another thread calling mobj_reg_shm_dec_map()
219 	 * to get the lock we need only to reinitialize mapcount to 1.
220 	 */
221 	if (!r->mm) {
222 		sz = ROUNDUP(mobj->size + r->page_offset, SMALL_PAGE_SIZE);
223 		r->mm = tee_mm_alloc(&core_virt_shm_pool, sz);
224 		if (!r->mm) {
225 			res = TEE_ERROR_OUT_OF_MEMORY;
226 			goto out;
227 		}
228 
229 		res = core_mmu_map_pages(tee_mm_get_smem(r->mm), r->pages,
230 					 sz / SMALL_PAGE_SIZE,
231 					 MEM_AREA_NSEC_SHM);
232 		if (res) {
233 			tee_mm_free(r->mm);
234 			r->mm = NULL;
235 			goto out;
236 		}
237 	}
238 
239 	refcount_set(&r->mapcount, 1);
240 out:
241 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
242 
243 	return res;
244 }
245 
mobj_reg_shm_dec_map(struct mobj * mobj)246 static TEE_Result mobj_reg_shm_dec_map(struct mobj *mobj)
247 {
248 	struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
249 	uint32_t exceptions = 0;
250 
251 	if (!refcount_dec(&r->mapcount))
252 		return TEE_SUCCESS;
253 
254 	exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
255 
256 	/*
257 	 * Check that another thread hasn't been able to:
258 	 * - increase the mapcount
259 	 * - or, increase the mapcount, decrease it again, and set r->mm to
260 	 *   NULL
261 	 * before we acquired the spinlock
262 	 */
263 	if (!refcount_val(&r->mapcount) && r->mm)
264 		reg_shm_unmap_helper(r);
265 
266 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
267 
268 	return TEE_SUCCESS;
269 }
270 
271 static bool mobj_reg_shm_matches(struct mobj *mobj, enum buf_is_attr attr);
272 
mobj_reg_shm_get_cookie(struct mobj * mobj)273 static uint64_t mobj_reg_shm_get_cookie(struct mobj *mobj)
274 {
275 	return to_mobj_reg_shm(mobj)->cookie;
276 }
277 
278 /*
279  * When CFG_PREALLOC_RPC_CACHE is disabled, this variable is weak just
280  * to ease breaking its dependency chain when added to the unpaged area.
281  * When CFG_PREALLOC_RPC_CACHE is enabled, releasing RPC preallocated
282  * shm mandates these resources to be unpaged.
283  */
284 const struct mobj_ops mobj_reg_shm_ops
285 __weak __relrodata_unpaged("mobj_reg_shm_ops") = {
286 	.get_pa = mobj_reg_shm_get_pa,
287 	.get_phys_offs = mobj_reg_shm_get_phys_offs,
288 	.get_va = mobj_reg_shm_get_va,
289 	.get_mem_type = mobj_reg_shm_get_mem_type,
290 	.matches = mobj_reg_shm_matches,
291 	.free = mobj_reg_shm_free,
292 	.get_cookie = mobj_reg_shm_get_cookie,
293 	.inc_map = mobj_reg_shm_inc_map,
294 	.dec_map = mobj_reg_shm_dec_map,
295 };
296 
297 #ifdef CFG_PREALLOC_RPC_CACHE
298 /* Releasing RPC preallocated shm mandates few resources to be unpaged */
299 DECLARE_KEEP_PAGER(mobj_reg_shm_get_cookie);
300 DECLARE_KEEP_PAGER(mobj_reg_shm_matches);
301 DECLARE_KEEP_PAGER(mobj_reg_shm_free);
302 #endif
303 
mobj_reg_shm_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)304 static bool mobj_reg_shm_matches(struct mobj *mobj __maybe_unused,
305 				   enum buf_is_attr attr)
306 {
307 	assert(mobj->ops == &mobj_reg_shm_ops);
308 
309 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
310 }
311 
to_mobj_reg_shm(struct mobj * mobj)312 static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj)
313 {
314 	assert(mobj->ops == &mobj_reg_shm_ops);
315 	return container_of(mobj, struct mobj_reg_shm, mobj);
316 }
317 
check_reg_shm_conflict(struct mobj_reg_shm * r,paddr_t pa,paddr_size_t size)318 static TEE_Result check_reg_shm_conflict(struct mobj_reg_shm *r, paddr_t pa,
319 					 paddr_size_t size)
320 {
321 	size_t page_count = 0;
322 	size_t n = 0;
323 
324 	page_count = ROUNDUP2_DIV(r->mobj.size + r->page_offset,
325 				  SMALL_PAGE_SIZE);
326 	for (n = 0; n < page_count; n++)
327 		if (core_is_buffer_intersect(pa, size, r->pages[n],
328 					     SMALL_PAGE_SIZE))
329 			return TEE_ERROR_BAD_PARAMETERS;
330 
331 	return TEE_SUCCESS;
332 }
333 
check_protmem_conflict(struct mobj_reg_shm * r)334 static TEE_Result check_protmem_conflict(struct mobj_reg_shm *r)
335 {
336 	struct mobj_protmem *m = NULL;
337 	TEE_Result res = TEE_SUCCESS;
338 
339 	SLIST_FOREACH(m, &protmem_list, next) {
340 		res = check_reg_shm_conflict(r, m->pa, m->mobj.size);
341 		if (res)
342 			break;
343 	}
344 
345 	return res;
346 }
347 
mobj_reg_shm_alloc(paddr_t * pages,size_t num_pages,paddr_t page_offset,uint64_t cookie)348 struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
349 				paddr_t page_offset, uint64_t cookie)
350 {
351 	struct mobj_reg_shm *mobj_reg_shm = NULL;
352 	TEE_Result res = TEE_SUCCESS;
353 	size_t i = 0;
354 	uint32_t exceptions = 0;
355 	size_t s = 0;
356 
357 	if (!num_pages || page_offset >= SMALL_PAGE_SIZE)
358 		return NULL;
359 
360 	s = mobj_reg_shm_size(num_pages);
361 	if (!s)
362 		return NULL;
363 	mobj_reg_shm = calloc(1, s);
364 	if (!mobj_reg_shm)
365 		return NULL;
366 
367 	mobj_reg_shm->mobj.ops = &mobj_reg_shm_ops;
368 	mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE - page_offset;
369 	mobj_reg_shm->mobj.phys_granule = SMALL_PAGE_SIZE;
370 	refcount_set(&mobj_reg_shm->mobj.refc, 1);
371 	mobj_reg_shm->cookie = cookie;
372 	mobj_reg_shm->guarded = true;
373 	mobj_reg_shm->page_offset = page_offset;
374 	memcpy(mobj_reg_shm->pages, pages, sizeof(*pages) * num_pages);
375 
376 	/* Ensure loaded references match format and security constraints */
377 	for (i = 0; i < num_pages; i++) {
378 		if (mobj_reg_shm->pages[i] & SMALL_PAGE_MASK)
379 			goto err;
380 
381 		/* Only Non-secure memory can be mapped there */
382 		if (!core_pbuf_is(CORE_MEM_NON_SEC, mobj_reg_shm->pages[i],
383 				  SMALL_PAGE_SIZE))
384 			goto err;
385 	}
386 
387 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
388 	res = check_protmem_conflict(mobj_reg_shm);
389 	if (!res)
390 		SLIST_INSERT_HEAD(&reg_shm_list, mobj_reg_shm, next);
391 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
392 
393 	if (res)
394 		goto err;
395 
396 	return &mobj_reg_shm->mobj;
397 err:
398 	free(mobj_reg_shm);
399 	return NULL;
400 }
401 
mobj_reg_shm_unguard(struct mobj * mobj)402 void mobj_reg_shm_unguard(struct mobj *mobj)
403 {
404 	uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
405 
406 	to_mobj_reg_shm(mobj)->guarded = false;
407 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
408 }
409 
reg_shm_find_unlocked(uint64_t cookie)410 static struct mobj_reg_shm *reg_shm_find_unlocked(uint64_t cookie)
411 {
412 	struct mobj_reg_shm *mobj_reg_shm = NULL;
413 
414 	SLIST_FOREACH(mobj_reg_shm, &reg_shm_list, next)
415 		if (mobj_reg_shm->cookie == cookie)
416 			return mobj_reg_shm;
417 
418 	return NULL;
419 }
420 
protmem_find_unlocked(uint64_t cookie)421 static struct mobj_protmem *protmem_find_unlocked(uint64_t cookie)
422 {
423 	struct mobj_protmem *m = NULL;
424 
425 	SLIST_FOREACH(m, &protmem_list, next)
426 		if (m->cookie == cookie)
427 			return m;
428 
429 	return NULL;
430 }
431 
mobj_reg_shm_get_by_cookie(uint64_t cookie)432 struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie)
433 {
434 	struct mobj_reg_shm *rs = NULL;
435 	struct mobj_protmem *rm = NULL;
436 	uint32_t exceptions = 0;
437 	struct mobj *m = NULL;
438 
439 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
440 	rs = reg_shm_find_unlocked(cookie);
441 	if (rs) {
442 		m = mobj_get(&rs->mobj);
443 		goto out;
444 	}
445 	rm = protmem_find_unlocked(cookie);
446 	if (rm)
447 		m = mobj_get(&rm->mobj);
448 
449 out:
450 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
451 
452 	return m;
453 }
454 
mobj_reg_shm_release_by_cookie(uint64_t cookie)455 TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie)
456 {
457 	uint32_t exceptions = 0;
458 	struct mobj_reg_shm *r = NULL;
459 
460 	/*
461 	 * Try to find r and see can be released by this function, if so
462 	 * call mobj_put(). Otherwise this function is called either by
463 	 * wrong cookie and perhaps a second time, regardless return
464 	 * TEE_ERROR_BAD_PARAMETERS.
465 	 */
466 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
467 	r = reg_shm_find_unlocked(cookie);
468 	if (!r || r->guarded || r->releasing)
469 		r = NULL;
470 	else
471 		r->releasing = true;
472 
473 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
474 
475 	if (!r)
476 		return TEE_ERROR_BAD_PARAMETERS;
477 
478 	mobj_put(&r->mobj);
479 
480 	/*
481 	 * We've established that this function can release the cookie.
482 	 * Now we wait until mobj_reg_shm_free() is called by the last
483 	 * mobj_put() needed to free this mobj. Note that the call to
484 	 * mobj_put() above could very well be that call.
485 	 *
486 	 * Once mobj_reg_shm_free() is called it will set r->release_frees
487 	 * to true and we can free the mobj here.
488 	 */
489 	mutex_lock(&shm_mu);
490 	shm_release_waiters++;
491 	assert(shm_release_waiters);
492 
493 	while (true) {
494 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
495 		if (r->release_frees) {
496 			reg_shm_free_helper(r);
497 			r = NULL;
498 		}
499 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
500 
501 		if (!r)
502 			break;
503 		condvar_wait(&shm_cv, &shm_mu);
504 	}
505 
506 	assert(shm_release_waiters);
507 	shm_release_waiters--;
508 	mutex_unlock(&shm_mu);
509 
510 	return TEE_SUCCESS;
511 }
512 
mobj_mapped_shm_alloc(paddr_t * pages,size_t num_pages,paddr_t page_offset,uint64_t cookie)513 struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
514 				  paddr_t page_offset, uint64_t cookie)
515 {
516 	struct mobj *mobj = mobj_reg_shm_alloc(pages, num_pages,
517 					       page_offset, cookie);
518 
519 	if (!mobj)
520 		return NULL;
521 
522 	if (mobj_inc_map(mobj)) {
523 		mobj_put(mobj);
524 		return NULL;
525 	}
526 
527 	return mobj;
528 }
529 
mobj_mapped_shm_init(void)530 static TEE_Result mobj_mapped_shm_init(void)
531 {
532 	vaddr_t pool_start = 0;
533 	vaddr_t pool_end = 0;
534 
535 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
536 	if (!pool_start || !pool_end)
537 		panic("Can't find region for shmem pool");
538 
539 	if (!tee_mm_init(&core_virt_shm_pool, pool_start, pool_end - pool_start,
540 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NO_FLAGS))
541 		panic("Could not create shmem pool");
542 
543 	DMSG("Shared memory address range: %" PRIxVA ", %" PRIxVA,
544 	     pool_start, pool_end);
545 	return TEE_SUCCESS;
546 }
547 
548 preinit(mobj_mapped_shm_init);
549 
550 #ifdef CFG_CORE_DYN_PROTMEM
551 static struct mobj_protmem *to_mobj_protmem(struct mobj *mobj);
552 
check_reg_shm_list_conflict(paddr_t pa,paddr_size_t size)553 static TEE_Result check_reg_shm_list_conflict(paddr_t pa, paddr_size_t size)
554 {
555 	struct mobj_reg_shm *r = NULL;
556 	TEE_Result res = TEE_SUCCESS;
557 
558 	SLIST_FOREACH(r, &reg_shm_list, next) {
559 		res = check_reg_shm_conflict(r, pa, size);
560 		if (res)
561 			break;
562 	}
563 
564 	return res;
565 }
566 
protect_mem(struct mobj_protmem * m)567 static TEE_Result protect_mem(struct mobj_protmem *m)
568 {
569 	if ((m->pa | m->mobj.size) & SMALL_PAGE_MASK)
570 		return TEE_ERROR_BAD_PARAMETERS;
571 
572 	DMSG("use_case %d pa %#"PRIxPA", size %#zx",
573 	     m->use_case, m->pa, m->mobj.size);
574 
575 	return plat_set_protmem_range(m->use_case, m->pa, m->mobj.size);
576 }
577 
restore_mem(struct mobj_protmem * m)578 static TEE_Result restore_mem(struct mobj_protmem *m)
579 {
580 	DMSG("use_case %d pa %#"PRIxPA", size %#zx",
581 	     m->use_case, m->pa, m->mobj.size);
582 
583 	return plat_set_protmem_range(MOBJ_USE_CASE_NS_SHM, m->pa,
584 				      m->mobj.size);
585 }
586 
mobj_protmem_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)587 static TEE_Result mobj_protmem_get_pa(struct mobj *mobj, size_t offs,
588 				      size_t granule, paddr_t *pa)
589 {
590 	struct mobj_protmem *m = to_mobj_protmem(mobj);
591 	paddr_t p = 0;
592 
593 	if (!pa)
594 		return TEE_ERROR_GENERIC;
595 
596 	if (offs >= mobj->size)
597 		return TEE_ERROR_GENERIC;
598 
599 	p = m->pa + offs;
600 	if (granule) {
601 		if (granule != SMALL_PAGE_SIZE)
602 			return TEE_ERROR_GENERIC;
603 		p &= ~(granule - 1);
604 	}
605 	*pa = p;
606 
607 	return TEE_SUCCESS;
608 }
609 
mobj_protmem_get_mem_type(struct mobj * mobj __unused,uint32_t * mt)610 static TEE_Result mobj_protmem_get_mem_type(struct mobj *mobj __unused,
611 					    uint32_t *mt)
612 {
613 	if (!mt)
614 		return TEE_ERROR_GENERIC;
615 
616 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
617 
618 	return TEE_SUCCESS;
619 }
620 
mobj_protmem_matches(struct mobj * mobj __unused,enum buf_is_attr attr)621 static bool mobj_protmem_matches(struct mobj *mobj __unused,
622 				 enum buf_is_attr attr)
623 {
624 	return attr == CORE_MEM_SEC || attr == CORE_MEM_SDP_MEM;
625 }
626 
protmem_free_helper(struct mobj_protmem * mobj_protmem)627 static void protmem_free_helper(struct mobj_protmem *mobj_protmem)
628 {
629 	uint32_t exceptions = 0;
630 
631 	exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
632 	SLIST_REMOVE(&protmem_list, mobj_protmem, mobj_protmem, next);
633 	cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
634 
635 	restore_mem(mobj_protmem);
636 	free(mobj_protmem);
637 }
638 
mobj_protmem_free(struct mobj * mobj)639 static void mobj_protmem_free(struct mobj *mobj)
640 {
641 	struct mobj_protmem *r = to_mobj_protmem(mobj);
642 	uint32_t exceptions = 0;
643 
644 	if (!r->releasing) {
645 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
646 		protmem_free_helper(r);
647 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
648 	} else {
649 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
650 		r->release_frees = true;
651 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
652 
653 		mutex_lock(&shm_mu);
654 		if (shm_release_waiters)
655 			condvar_broadcast(&shm_cv);
656 		mutex_unlock(&shm_mu);
657 	}
658 }
659 
mobj_protmem_get_cookie(struct mobj * mobj)660 static uint64_t mobj_protmem_get_cookie(struct mobj *mobj)
661 {
662 	return to_mobj_protmem(mobj)->cookie;
663 }
664 
mobj_protmem_inc_map(struct mobj * mobj __maybe_unused)665 static TEE_Result mobj_protmem_inc_map(struct mobj *mobj __maybe_unused)
666 {
667 	assert(to_mobj_protmem(mobj));
668 	return TEE_ERROR_BAD_PARAMETERS;
669 }
670 
mobj_protmem_dec_map(struct mobj * mobj __maybe_unused)671 static TEE_Result mobj_protmem_dec_map(struct mobj *mobj __maybe_unused)
672 {
673 	assert(to_mobj_protmem(mobj));
674 	return TEE_ERROR_BAD_PARAMETERS;
675 }
676 
677 const struct mobj_ops mobj_protmem_ops
678 	__relrodata_unpaged("mobj_protmem_ops") = {
679 	.get_pa = mobj_protmem_get_pa,
680 	.get_mem_type = mobj_protmem_get_mem_type,
681 	.matches = mobj_protmem_matches,
682 	.free = mobj_protmem_free,
683 	.get_cookie = mobj_protmem_get_cookie,
684 	.inc_map = mobj_protmem_inc_map,
685 	.dec_map = mobj_protmem_dec_map,
686 };
687 
to_mobj_protmem(struct mobj * mobj)688 static struct mobj_protmem *to_mobj_protmem(struct mobj *mobj)
689 {
690 	assert(mobj->ops == &mobj_protmem_ops);
691 	return container_of(mobj, struct mobj_protmem, mobj);
692 }
693 
mobj_protmem_alloc(paddr_t pa,paddr_size_t size,uint64_t cookie,enum mobj_use_case use_case)694 struct mobj *mobj_protmem_alloc(paddr_t pa, paddr_size_t size, uint64_t cookie,
695 				enum mobj_use_case use_case)
696 {
697 	TEE_Result res = TEE_SUCCESS;
698 	struct mobj_protmem *m = NULL;
699 	uint32_t exceptions = 0;
700 
701 	if (use_case == MOBJ_USE_CASE_NS_SHM ||
702 	    !core_pbuf_is(CORE_MEM_NON_SEC, pa, size))
703 		return NULL;
704 
705 	m = calloc(1, sizeof(*m));
706 	if (!m)
707 		return NULL;
708 
709 	m->mobj.ops = &mobj_protmem_ops;
710 	m->use_case = use_case;
711 	m->mobj.size = size;
712 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
713 	refcount_set(&m->mobj.refc, 1);
714 	m->cookie = cookie;
715 	m->pa = pa;
716 
717 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
718 	res = check_reg_shm_list_conflict(pa, size);
719 	if (res)
720 		goto out;
721 
722 	res = protect_mem(m);
723 	if (res)
724 		goto out;
725 	SLIST_INSERT_HEAD(&protmem_list, m, next);
726 out:
727 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
728 
729 	if (res) {
730 		free(m);
731 		return NULL;
732 	}
733 
734 	return &m->mobj;
735 }
736 
mobj_protmem_release_by_cookie(uint64_t cookie)737 TEE_Result mobj_protmem_release_by_cookie(uint64_t cookie)
738 {
739 	uint32_t exceptions = 0;
740 	struct mobj_protmem *rm = NULL;
741 
742 	/*
743 	 * Try to find m and see can be released by this function, if so
744 	 * call mobj_put(). Otherwise this function is called either by
745 	 * wrong cookie and perhaps a second time, regardless return
746 	 * TEE_ERROR_BAD_PARAMETERS.
747 	 */
748 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
749 	rm = protmem_find_unlocked(cookie);
750 	if (!rm || rm->releasing)
751 		rm = NULL;
752 	else
753 		rm->releasing = true;
754 
755 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
756 
757 	if (!rm)
758 		return TEE_ERROR_BAD_PARAMETERS;
759 
760 	mobj_put(&rm->mobj);
761 
762 	/*
763 	 * We've established that this function can release the cookie.
764 	 * Now we wait until mobj_reg_shm_free() is called by the last
765 	 * mobj_put() needed to free this mobj. Note that the call to
766 	 * mobj_put() above could very well be that call.
767 	 *
768 	 * Once mobj_reg_shm_free() is called it will set r->release_frees
769 	 * to true and we can free the mobj here.
770 	 */
771 	mutex_lock(&shm_mu);
772 	shm_release_waiters++;
773 	assert(shm_release_waiters);
774 
775 	while (true) {
776 		exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
777 		if (rm->release_frees) {
778 			protmem_free_helper(rm);
779 			rm = NULL;
780 		}
781 		cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
782 
783 		if (!rm)
784 			break;
785 		condvar_wait(&shm_cv, &shm_mu);
786 	}
787 
788 	assert(shm_release_waiters);
789 	shm_release_waiters--;
790 	mutex_unlock(&shm_mu);
791 
792 	return TEE_SUCCESS;
793 }
794 
protmem_find_by_pa_unlocked(paddr_t pa,paddr_size_t sz)795 static struct mobj_protmem *protmem_find_by_pa_unlocked(paddr_t pa,
796 							paddr_size_t sz)
797 {
798 	struct mobj_protmem *m = NULL;
799 
800 	if (!sz)
801 		sz = 1;
802 
803 	SLIST_FOREACH(m, &protmem_list, next)
804 		if (core_is_buffer_inside(pa, sz, m->pa, m->mobj.size))
805 			return m;
806 
807 	return NULL;
808 }
809 
mobj_protmem_get_by_pa(paddr_t pa,paddr_size_t size)810 struct mobj *mobj_protmem_get_by_pa(paddr_t pa, paddr_size_t size)
811 {
812 	struct mobj_protmem *rm = NULL;
813 	struct mobj *mobj = NULL;
814 	uint32_t exceptions = 0;
815 
816 	exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
817 
818 	rm = protmem_find_by_pa_unlocked(pa, size);
819 	if (rm && !rm->releasing)
820 		mobj = mobj_get(&rm->mobj);
821 
822 	cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
823 
824 	return mobj;
825 }
826 #endif /*CFG_CORE_DYN_PROTMEM*/
827