xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision 003383344c26be3589383acc87c1ebb2860e9317)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <config.h>
9 #include <ffa.h>
10 #include <initcall.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/thread_spmc.h>
15 #include <kernel/virtualization.h>
16 #include <mm/mobj.h>
17 #include <sys/queue.h>
18 
19 /*
20  * Life cycle of struct mobj_ffa
21  *
22  * SPMC at S-EL1 (CFG_CORE_SEL1_SPMC=y)
23  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24  * During FFA_MEM_SHARE allocated in mobj_ffa_sel1_spmc_new() and finally
25  * added to the inactive list at the end of add_mem_share() once
26  * successfully filled in.
27  *	registered_by_cookie = false
28  *	mobj.refs.val = 0
29  *	inactive_refs = 0
30  *
31  * During FFA_MEM_RECLAIM reclaimed/freed using
32  * mobj_ffa_sel1_spmc_reclaim().  This will always succeed if the normal
33  * world is only calling this when all other threads are done with the
34  * shared memory object. However, there are some conditions that must be
35  * met to make sure that this is the case:
36  *	mobj not in the active list, else -> return TEE_ERROR_BUSY
37  *	mobj not in inactive list, else -> return TEE_ERROR_ITEM_NOT_FOUND
38  *	mobj inactive_refs is 0, else -> return TEE_ERROR_BUSY
39  *
40  * mobj is activated using mobj_ffa_get_by_cookie() which unless the mobj
41  * is active already:
42  * - move the mobj into the active list
43  * - if not registered_by_cookie ->
44  *	set registered_by_cookie and increase inactive_refs
45  * - set mobj.refc.val to 1
46  * - increase inactive_refs
47  *
48  * A previously activated mobj is made ready for reclaim using
49  * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
50  * the inactive list and registered_by_cookie is set and then:
51  * - clears registered_by_cookie
52  * - decreases inactive_refs
53  *
54  * Each successful call to mobj_ffa_get_by_cookie() must be matched by a
55  * call to mobj_put(). If the mobj.refc.val reaches 0 it's
56  * - moved to the inactive list
57  * - inactive_refs is decreased
58  *
59  * SPMC at S-EL2/EL3 (CFG_CORE_SEL1_SPMC=n)
60  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61  * mobj is activated/allocated using mobj_ffa_get_by_cookie() which if
62  * already active only is
63  * - increasing mobj.refc.val and inactive_refs
64  * if found in inactive list is
65  * - setting mobj.refc.val to 1
66  * - increasing inactive_refs
67  * - moved into active list
68  * if not found is created using thread_spmc_populate_mobj_from_rx() and
69  * then:
70  * - setting mobj.refc.val to 1
71  * - increasing inactive_refs
72  * - moved into active list
73  *
74  * A previously activated mobj is relinquished using
75  * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
76  * the inactive list and inactive_refs is 1
77  */
78 struct mobj_ffa {
79 	struct mobj mobj;
80 	SLIST_ENTRY(mobj_ffa) link;
81 	uint64_t cookie;
82 	unsigned int inactive_refs;
83 #ifdef CFG_CORE_SEL1_SPMC
84 	bool registered_by_cookie;
85 #endif
86 };
87 
88 struct mobj_ffa_shm {
89 	struct mobj_ffa mf;
90 	tee_mm_entry_t *mm;
91 	struct refcount mapcount;
92 	uint16_t page_offset;
93 	paddr_t pages[];
94 };
95 
96 struct mobj_ffa_prm {
97 	struct mobj_ffa mf;
98 	paddr_t pa;
99 	enum mobj_use_case use_case;
100 	bool assigned_use_case;
101 };
102 
103 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
104 
105 #ifdef CFG_CORE_SEL1_SPMC
106 #ifdef CFG_NS_VIRTUALIZATION
get_shm_bits(void)107 static bitstr_t *get_shm_bits(void)
108 {
109 	return virt_get_shm_bits();
110 }
111 #else
112 static bitstr_t bit_decl(__shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
113 
get_shm_bits(void)114 static bitstr_t *get_shm_bits(void)
115 {
116 	return __shm_bits;
117 }
118 #endif
119 #endif
120 
121 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
122 static struct mobj_ffa_head shm_inactive_head =
123 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
124 
125 static unsigned int shm_lock = SPINLOCK_UNLOCK;
126 
127 static const struct mobj_ops mobj_ffa_shm_ops;
128 static const struct mobj_ops mobj_ffa_prm_ops;
129 
is_mobj_ffa_shm(struct mobj * mobj)130 static bool is_mobj_ffa_shm(struct mobj *mobj)
131 {
132 	return mobj->ops == &mobj_ffa_shm_ops;
133 }
134 
to_mobj_ffa_shm(struct mobj * mobj)135 static struct mobj_ffa_shm *to_mobj_ffa_shm(struct mobj *mobj)
136 {
137 	assert(is_mobj_ffa_shm(mobj));
138 	return container_of(mobj, struct mobj_ffa_shm, mf.mobj);
139 }
140 
is_mobj_ffa_prm(struct mobj * mobj)141 static bool is_mobj_ffa_prm(struct mobj *mobj)
142 {
143 	return mobj->ops == &mobj_ffa_prm_ops;
144 }
145 
to_mobj_ffa_prm(struct mobj * mobj)146 static struct mobj_ffa_prm *to_mobj_ffa_prm(struct mobj *mobj)
147 {
148 	assert(is_mobj_ffa_prm(mobj));
149 	return container_of(mobj, struct mobj_ffa_prm, mf.mobj);
150 }
151 
shm_size(size_t num_pages)152 static size_t shm_size(size_t num_pages)
153 {
154 	size_t s = 0;
155 
156 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
157 		return 0;
158 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa_shm), s, &s))
159 		return 0;
160 	return s;
161 }
162 
ffa_shm_new(unsigned int num_pages)163 static struct mobj_ffa *ffa_shm_new(unsigned int num_pages)
164 {
165 	struct mobj_ffa_shm *m = NULL;
166 	size_t s = 0;
167 
168 	if (!num_pages)
169 		return NULL;
170 
171 	s = shm_size(num_pages);
172 	if (!s)
173 		return NULL;
174 	m = calloc(1, s);
175 	if (!m)
176 		return NULL;
177 
178 	m->mf.mobj.ops = &mobj_ffa_shm_ops;
179 	m->mf.mobj.size = num_pages * SMALL_PAGE_SIZE;
180 	m->mf.mobj.phys_granule = SMALL_PAGE_SIZE;
181 	refcount_set(&m->mf.mobj.refc, 0);
182 	m->mf.inactive_refs = 0;
183 
184 	return &m->mf;
185 }
186 
ffa_prm_new(unsigned int num_pages,enum mobj_use_case use_case)187 static struct mobj_ffa *ffa_prm_new(unsigned int num_pages,
188 				    enum mobj_use_case use_case)
189 {
190 	struct mobj_ffa_prm *m = NULL;
191 	size_t sz = 0;
192 
193 	if (!num_pages || MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &sz) ||
194 	    use_case == MOBJ_USE_CASE_NS_SHM)
195 		return NULL;
196 
197 	m = calloc(1, sizeof(*m));
198 	if (!m)
199 		return NULL;
200 
201 	m->mf.mobj.ops = &mobj_ffa_prm_ops;
202 	m->mf.mobj.size = sz;
203 	m->mf.mobj.phys_granule = SMALL_PAGE_SIZE;
204 	refcount_set(&m->mf.mobj.refc, 0);
205 	m->mf.inactive_refs = 0;
206 	m->use_case = use_case;
207 
208 	return &m->mf;
209 }
210 
211 #ifdef CFG_CORE_SEL1_SPMC
mobj_ffa_sel1_spmc_new(uint64_t cookie,unsigned int num_pages,enum mobj_use_case use_case)212 struct mobj_ffa *mobj_ffa_sel1_spmc_new(uint64_t cookie,
213 					unsigned int num_pages,
214 					enum mobj_use_case use_case)
215 {
216 	struct mobj_ffa *m = NULL;
217 	bitstr_t *shm_bits = NULL;
218 	uint32_t exceptions = 0;
219 	int i = 0;
220 
221 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
222 		if (!(cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT))
223 			return NULL;
224 		if (virt_add_cookie_to_current_guest(cookie))
225 			return NULL;
226 	}
227 
228 	switch (use_case) {
229 	case MOBJ_USE_CASE_NS_SHM:
230 		m = ffa_shm_new(num_pages);
231 		break;
232 	case MOBJ_USE_CASE_SEC_VIDEO_PLAY:
233 	case MOBJ_USE_CASE_TRUSED_UI:
234 		m = ffa_prm_new(num_pages, use_case);
235 		break;
236 	default:
237 		break;
238 	}
239 	if (!m) {
240 		if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
241 			virt_remove_cookie(cookie);
242 		return NULL;
243 	}
244 
245 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
246 		m->cookie = cookie;
247 		return m;
248 	}
249 
250 	shm_bits = get_shm_bits();
251 	exceptions = cpu_spin_lock_xsave(&shm_lock);
252 	bit_ffc(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
253 	if (i != -1) {
254 		bit_set(shm_bits, i);
255 		m->cookie = i;
256 		m->cookie |= FFA_MEMORY_HANDLE_NON_SECURE_BIT;
257 		/*
258 		 * Encode the partition ID into the handle so we know which
259 		 * partition to switch to when reclaiming a handle.
260 		 */
261 		m->cookie |= SHIFT_U64(virt_get_current_guest_id(),
262 				       FFA_MEMORY_HANDLE_PRTN_SHIFT);
263 	}
264 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
265 
266 	if (i == -1) {
267 		mobj_ffa_sel1_spmc_delete(m);
268 		return NULL;
269 	}
270 
271 	return m;
272 }
273 #endif /*CFG_CORE_SEL1_SPMC*/
274 
get_page_count(struct mobj_ffa * mf)275 static size_t get_page_count(struct mobj_ffa *mf)
276 {
277 	return ROUNDUP_DIV(mf->mobj.size, SMALL_PAGE_SIZE);
278 }
279 
cmp_cookie(struct mobj_ffa * mf,uint64_t cookie)280 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
281 {
282 	return mf->cookie == cookie;
283 }
284 
cmp_ptr(struct mobj_ffa * mf,uint64_t ptr)285 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
286 {
287 	return mf == (void *)(vaddr_t)ptr;
288 }
289 
check_shm_overlaps_prm(struct mobj_ffa_shm * shm,struct mobj_ffa_prm * prm)290 static bool check_shm_overlaps_prm(struct mobj_ffa_shm *shm,
291 				   struct mobj_ffa_prm *prm)
292 {
293 	size_t n = 0;
294 
295 	for (n = 0; n < shm->mf.mobj.size / SMALL_PAGE_SIZE; n++)
296 		if (core_is_buffer_intersect(prm->pa, prm->mf.mobj.size,
297 					     shm->pages[n], SMALL_PAGE_SIZE))
298 			return true;
299 
300 	return false;
301 }
302 
cmp_pa_overlap(struct mobj_ffa * mf,uint64_t ptr)303 static bool cmp_pa_overlap(struct mobj_ffa *mf, uint64_t ptr)
304 {
305 	struct mobj_ffa *mf2 = (void *)(vaddr_t)ptr;
306 	bool mf_is_shm = is_mobj_ffa_shm(&mf->mobj);
307 	bool mf2_is_shm = is_mobj_ffa_shm(&mf2->mobj);
308 
309 	if (mf_is_shm && mf2_is_shm) {
310 		/*
311 		 * Not a security issue and might be too expensive to check
312 		 * if we have many pages in each registered shared memory
313 		 * object.
314 		 */
315 		return false;
316 	}
317 
318 	if (mf_is_shm)
319 		return check_shm_overlaps_prm(to_mobj_ffa_shm(&mf->mobj),
320 					      to_mobj_ffa_prm(&mf2->mobj));
321 	if (mf2_is_shm)
322 		return check_shm_overlaps_prm(to_mobj_ffa_shm(&mf2->mobj),
323 					      to_mobj_ffa_prm(&mf->mobj));
324 
325 	return core_is_buffer_intersect(to_mobj_ffa_prm(&mf->mobj)->pa,
326 					mf->mobj.size,
327 					to_mobj_ffa_prm(&mf2->mobj)->pa,
328 					mf2->mobj.size);
329 }
330 
pop_from_list(struct mobj_ffa_head * head,bool (* cmp_func)(struct mobj_ffa * mf,uint64_t val),uint64_t val)331 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
332 				      bool (*cmp_func)(struct mobj_ffa *mf,
333 						       uint64_t val),
334 				      uint64_t val)
335 {
336 	struct mobj_ffa *mf = SLIST_FIRST(head);
337 	struct mobj_ffa *p = NULL;
338 
339 	if (!mf)
340 		return NULL;
341 
342 	if (cmp_func(mf, val)) {
343 		SLIST_REMOVE_HEAD(head, link);
344 		return mf;
345 	}
346 
347 	while (true) {
348 		p = SLIST_NEXT(mf, link);
349 		if (!p)
350 			return NULL;
351 		if (cmp_func(p, val)) {
352 			SLIST_REMOVE_AFTER(mf, link);
353 			return p;
354 		}
355 		mf = p;
356 	}
357 }
358 
find_in_list(struct mobj_ffa_head * head,bool (* cmp_func)(struct mobj_ffa * mf,uint64_t val),uint64_t val)359 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
360 				     bool (*cmp_func)(struct mobj_ffa *mf,
361 						      uint64_t val),
362 				     uint64_t val)
363 {
364 	struct mobj_ffa *mf = NULL;
365 
366 	SLIST_FOREACH(mf, head, link)
367 		if (cmp_func(mf, val))
368 			return mf;
369 
370 	return NULL;
371 }
372 
373 #if defined(CFG_CORE_SEL1_SPMC)
mobj_ffa_sel1_spmc_delete(struct mobj_ffa * mf)374 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
375 {
376 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) ||
377 	    !(mf->cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT)) {
378 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT;
379 		bitstr_t *shm_bits = get_shm_bits();
380 		uint32_t exceptions = 0;
381 		int64_t i = 0;
382 
383 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
384 			mask |= SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
385 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
386 		i = mf->cookie & ~mask;
387 		assert(i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT);
388 
389 		exceptions = cpu_spin_lock_xsave(&shm_lock);
390 		assert(bit_test(shm_bits, i));
391 		bit_clear(shm_bits, i);
392 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
393 	}
394 
395 	if (is_mobj_ffa_shm(&mf->mobj)) {
396 		struct mobj_ffa_shm *m = to_mobj_ffa_shm(&mf->mobj);
397 
398 		assert(!m->mm);
399 		free(m);
400 	} else {
401 		free(to_mobj_ffa_prm(&mf->mobj));
402 	}
403 }
404 #else /* !defined(CFG_CORE_SEL1_SPMC) */
mobj_ffa_spmc_new(uint64_t cookie,unsigned int num_pages,enum mobj_use_case use_case)405 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages,
406 				   enum mobj_use_case use_case)
407 {
408 	struct mobj_ffa *mf = NULL;
409 
410 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
411 	if (use_case == MOBJ_USE_CASE_NS_SHM)
412 		mf = ffa_shm_new(num_pages);
413 	else
414 		mf = ffa_prm_new(num_pages, use_case);
415 	if (mf)
416 		mf->cookie = cookie;
417 	return mf;
418 }
419 
mobj_ffa_spmc_delete(struct mobj_ffa * mf)420 void mobj_ffa_spmc_delete(struct mobj_ffa *mf)
421 {
422 	if (is_mobj_ffa_shm(&mf->mobj))
423 		free(to_mobj_ffa_shm(&mf->mobj));
424 	else
425 		free(to_mobj_ffa_prm(&mf->mobj));
426 }
427 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
428 
mobj_ffa_add_pages_at(struct mobj_ffa * mf,unsigned int * idx,paddr_t pa,unsigned int num_pages)429 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
430 				 paddr_t pa, unsigned int num_pages)
431 {
432 	size_t tot_page_count = tot_page_count = get_page_count(mf);
433 	unsigned int n = 0;
434 
435 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
436 		return TEE_ERROR_BAD_PARAMETERS;
437 
438 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
439 	    !core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
440 		return TEE_ERROR_BAD_PARAMETERS;
441 
442 	if (is_mobj_ffa_shm(&mf->mobj)) {
443 		struct mobj_ffa_shm *mfs = to_mobj_ffa_shm(&mf->mobj);
444 
445 		for (n = 0; n < num_pages; n++)
446 			mfs->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
447 	} else {
448 		struct mobj_ffa_prm *mfr = to_mobj_ffa_prm(&mf->mobj);
449 
450 		if (!*idx)
451 			mfr->pa = pa;
452 		else if (mfr->pa != pa + *idx * SMALL_PAGE_SIZE)
453 			return TEE_ERROR_BAD_PARAMETERS;
454 	}
455 
456 	(*idx) += n;
457 
458 	return TEE_SUCCESS;
459 }
460 
mobj_ffa_get_cookie(struct mobj_ffa * mf)461 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
462 {
463 	return mf->cookie;
464 }
465 
protect_mem(struct mobj_ffa_prm * m)466 static TEE_Result protect_mem(struct mobj_ffa_prm *m)
467 {
468 	DMSG("use_case %d pa %#"PRIxPA", size %#zx cookie %#"PRIx64,
469 	     m->use_case, m->pa, m->mf.mobj.size, m->mf.cookie);
470 
471 	return plat_set_protmem_range(m->use_case, m->pa, m->mf.mobj.size);
472 }
473 
restore_mem(struct mobj_ffa_prm * m)474 static TEE_Result __maybe_unused restore_mem(struct mobj_ffa_prm *m)
475 {
476 	DMSG("use_case %d pa %#" PRIxPA ", size %#zx cookie %#"PRIx64,
477 	     m->use_case, m->pa, m->mf.mobj.size, m->mf.cookie);
478 
479 	return plat_set_protmem_range(MOBJ_USE_CASE_NS_SHM, m->pa,
480 				      m->mf.mobj.size);
481 }
482 
mobj_ffa_push_to_inactive(struct mobj_ffa * mf)483 TEE_Result mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
484 {
485 	TEE_Result res = TEE_SUCCESS;
486 	uint32_t exceptions = 0;
487 
488 	exceptions = cpu_spin_lock_xsave(&shm_lock);
489 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
490 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
491 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
492 
493 	if (find_in_list(&shm_inactive_head, cmp_pa_overlap, (vaddr_t)mf) ||
494 	    find_in_list(&shm_head, cmp_pa_overlap, (vaddr_t)mf)) {
495 		res = TEE_ERROR_BAD_PARAMETERS;
496 		goto out;
497 	}
498 	if (is_mobj_ffa_prm(&mf->mobj)) {
499 		res = protect_mem(to_mobj_ffa_prm(&mf->mobj));
500 		if (res)
501 			goto out;
502 	}
503 
504 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
505 
506 out:
507 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
508 
509 	return res;
510 }
511 
unmap_helper(struct mobj_ffa_shm * m)512 static void unmap_helper(struct mobj_ffa_shm *m)
513 {
514 	if (m->mm) {
515 		core_mmu_unmap_pages(tee_mm_get_smem(m->mm),
516 				     get_page_count(&m->mf));
517 		tee_mm_free(m->mm);
518 		m->mm = NULL;
519 	}
520 }
521 
522 #ifdef CFG_CORE_SEL1_SPMC
mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)523 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
524 {
525 	TEE_Result res = TEE_SUCCESS;
526 	struct mobj_ffa *mf = NULL;
527 	uint32_t exceptions = 0;
528 
529 	exceptions = cpu_spin_lock_xsave(&shm_lock);
530 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
531 	/*
532 	 * If the mobj is found here it's still active and cannot be
533 	 * reclaimed.
534 	 */
535 	if (mf) {
536 		DMSG("cookie %#"PRIx64" busy refc %u",
537 		     cookie, refcount_val(&mf->mobj.refc));
538 		res = TEE_ERROR_BUSY;
539 		goto out;
540 	}
541 
542 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
543 	if (!mf) {
544 		res = TEE_ERROR_ITEM_NOT_FOUND;
545 		goto out;
546 	}
547 	/*
548 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
549 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
550 	 */
551 	if (mf->inactive_refs) {
552 		DMSG("cookie %#"PRIx64" busy inactive_refs %u",
553 		     cookie, mf->inactive_refs);
554 		res = TEE_ERROR_BUSY;
555 		goto out;
556 	}
557 
558 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
559 		panic();
560 	if (is_mobj_ffa_prm(&mf->mobj))
561 		res = restore_mem(to_mobj_ffa_prm(&mf->mobj));
562 	else
563 		res = TEE_SUCCESS;
564 out:
565 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
566 	if (!res) {
567 		mobj_ffa_sel1_spmc_delete(mf);
568 		virt_remove_cookie(cookie);
569 	}
570 	return res;
571 }
572 #endif /*CFG_CORE_SEL1_SPMC*/
573 
mobj_ffa_unregister_by_cookie(uint64_t cookie)574 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
575 {
576 	TEE_Result res = TEE_SUCCESS;
577 	struct mobj_ffa *mf = NULL;
578 	uint32_t exceptions = 0;
579 
580 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
581 	exceptions = cpu_spin_lock_xsave(&shm_lock);
582 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
583 	/*
584 	 * If the mobj is found here it's still active and cannot be
585 	 * unregistered.
586 	 */
587 	if (mf) {
588 		EMSG("cookie %#"PRIx64" busy refc %u:%u",
589 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
590 		res = TEE_ERROR_BUSY;
591 		goto out;
592 	}
593 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
594 	/*
595 	 * If the mobj isn't found or if it already has been unregistered.
596 	 */
597 	if (!mf) {
598 		EMSG("cookie %#"PRIx64" not found", cookie);
599 		res = TEE_ERROR_ITEM_NOT_FOUND;
600 		goto out;
601 	}
602 #if defined(CFG_CORE_SEL1_SPMC)
603 	if (!mf->registered_by_cookie) {
604 		/*
605 		 * This is expected behaviour if the normal world has
606 		 * registered the memory but OP-TEE has not yet used the
607 		 * corresponding cookie with mobj_ffa_get_by_cookie(). It
608 		 * can be non-trivial for the normal world to predict if
609 		 * the cookie really has been used or not. So even if we
610 		 * return it as an error it will be ignored by
611 		 * handle_unregister_shm().
612 		 */
613 		EMSG("cookie %#"PRIx64" not registered refs %u:%u",
614 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
615 		res = TEE_ERROR_ITEM_NOT_FOUND;
616 		goto out;
617 	}
618 	assert(mf->inactive_refs);
619 	mf->inactive_refs--;
620 	mf->registered_by_cookie = false;
621 #else
622 	if (mf->inactive_refs) {
623 		EMSG("cookie %#"PRIx64" busy refc %u:%u",
624 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
625 		res = TEE_ERROR_BUSY;
626 		goto out;
627 	}
628 	mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
629 	mobj_ffa_spmc_delete(mf);
630 	thread_spmc_relinquish(cookie);
631 #endif
632 	res = TEE_SUCCESS;
633 
634 out:
635 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
636 	return res;
637 }
638 
mobj_ffa_get_by_cookie(uint64_t cookie,unsigned int internal_offs)639 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
640 				    unsigned int internal_offs)
641 {
642 	struct mobj_ffa_shm *mfs = NULL;
643 	struct mobj_ffa *mf = NULL;
644 	uint32_t exceptions = 0;
645 	uint16_t offs = 0;
646 
647 	if (internal_offs >= SMALL_PAGE_SIZE)
648 		return NULL;
649 	exceptions = cpu_spin_lock_xsave(&shm_lock);
650 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
651 	if (mf) {
652 		if (is_mobj_ffa_shm(&mf->mobj))
653 			offs = to_mobj_ffa_shm(&mf->mobj)->page_offset;
654 		else
655 			offs = 0;
656 		if (offs == internal_offs) {
657 			if (!refcount_inc(&mf->mobj.refc)) {
658 				/*
659 				 * If refcount is 0 some other thread has
660 				 * called mobj_put() on this reached 0 and
661 				 * before ffa_shm_inactivate() got the lock
662 				 * we found it. Let's reinitialize it.
663 				 */
664 				refcount_set(&mf->mobj.refc, 1);
665 				mf->inactive_refs++;
666 			}
667 			DMSG("cookie %#"PRIx64" active: refc %u:%u",
668 			     cookie, refcount_val(&mf->mobj.refc),
669 			     mf->inactive_refs);
670 		} else {
671 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
672 			     cookie, offs, internal_offs);
673 			mf = NULL;
674 		}
675 	} else {
676 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
677 #if !defined(CFG_CORE_SEL1_SPMC)
678 		/* Try to retrieve it from the SPM at S-EL2 */
679 		if (mf) {
680 			DMSG("cookie %#"PRIx64" resurrecting", cookie);
681 		} else {
682 			enum mobj_use_case uc = MOBJ_USE_CASE_NS_SHM;
683 
684 			DMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
685 			     cookie);
686 			mf = thread_spmc_populate_mobj_from_rx(cookie, uc);
687 		}
688 #endif
689 		if (mf) {
690 #if defined(CFG_CORE_SEL1_SPMC)
691 			if (!mf->registered_by_cookie) {
692 				mf->inactive_refs++;
693 				mf->registered_by_cookie = true;
694 			}
695 #endif
696 			assert(refcount_val(&mf->mobj.refc) == 0);
697 			refcount_set(&mf->mobj.refc, 1);
698 			mf->inactive_refs++;
699 			if (is_mobj_ffa_shm(&mf->mobj)) {
700 				mfs = to_mobj_ffa_shm(&mf->mobj);
701 				refcount_set(&mfs->mapcount, 0);
702 
703 				/*
704 				 * mfs->page_offset is offset into the
705 				 * first page.  This offset is assigned
706 				 * from the internal_offs parameter to this
707 				 * function.
708 				 *
709 				 * While a mobj_ffa is active (ref_count >
710 				 * 0) this will not change, but when being
711 				 * pushed to the inactive list it can be
712 				 * changed again.
713 				 *
714 				 * So below we're backing out the old
715 				 * mfs->page_offset and then assigning a
716 				 * new from internal_offset.
717 				 */
718 				mf->mobj.size += mfs->page_offset;
719 				assert(!(mf->mobj.size & SMALL_PAGE_MASK));
720 				mf->mobj.size -= internal_offs;
721 				mfs->page_offset = internal_offs;
722 			} else if (is_mobj_ffa_prm(&mf->mobj) &&
723 				   internal_offs) {
724 				mf = NULL;
725 			}
726 
727 			SLIST_INSERT_HEAD(&shm_head, mf, link);
728 		}
729 	}
730 
731 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
732 
733 	if (!mf) {
734 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
735 		     cookie, internal_offs);
736 		return NULL;
737 	}
738 	return &mf->mobj;
739 }
740 
ffa_shm_get_pa(struct mobj * mobj,size_t offset,size_t granule,paddr_t * pa)741 static TEE_Result ffa_shm_get_pa(struct mobj *mobj, size_t offset,
742 				 size_t granule, paddr_t *pa)
743 {
744 	struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
745 	size_t full_offset = 0;
746 	paddr_t p = 0;
747 
748 	if (!pa)
749 		return TEE_ERROR_GENERIC;
750 
751 	if (offset >= mobj->size)
752 		return TEE_ERROR_GENERIC;
753 
754 	full_offset = offset + m->page_offset;
755 	switch (granule) {
756 	case 0:
757 		p = m->pages[full_offset / SMALL_PAGE_SIZE] +
758 		    (full_offset & SMALL_PAGE_MASK);
759 		break;
760 	case SMALL_PAGE_SIZE:
761 		p = m->pages[full_offset / SMALL_PAGE_SIZE];
762 		break;
763 	default:
764 		return TEE_ERROR_GENERIC;
765 	}
766 	*pa = p;
767 
768 	return TEE_SUCCESS;
769 }
770 
ffa_shm_get_phys_offs(struct mobj * mobj,size_t granule __maybe_unused)771 static size_t ffa_shm_get_phys_offs(struct mobj *mobj,
772 				    size_t granule __maybe_unused)
773 {
774 	assert(granule >= mobj->phys_granule);
775 
776 	return to_mobj_ffa_shm(mobj)->page_offset;
777 }
778 
ffa_shm_get_va(struct mobj * mobj,size_t offset,size_t len)779 static void *ffa_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
780 {
781 	struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
782 
783 	if (!m->mm || !mobj_check_offset_and_len(mobj, offset, len))
784 		return NULL;
785 
786 	return (void *)(tee_mm_get_smem(m->mm) + offset + m->page_offset);
787 }
788 
ffa_inactivate(struct mobj_ffa * mf)789 static void ffa_inactivate(struct mobj_ffa *mf)
790 {
791 	uint32_t exceptions = 0;
792 
793 	exceptions = cpu_spin_lock_xsave(&shm_lock);
794 	/*
795 	 * If refcount isn't 0 some other thread has found this mobj in
796 	 * shm_head after the mobj_put() that put us here and before we got
797 	 * the lock.
798 	 */
799 	if (refcount_val(&mf->mobj.refc)) {
800 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
801 		goto out;
802 	}
803 
804 	/*
805 	 * pop_from_list() can fail to find the mobj if we had just
806 	 * decreased the refcount to 0 in mobj_put() and was going to
807 	 * acquire the shm_lock but another thread found this mobj and
808 	 * reinitialized the refcount to 1. Then before we got cpu time the
809 	 * other thread called mobj_put() and deactivated the mobj again.
810 	 *
811 	 * However, we still have the inactive count that guarantees
812 	 * that the mobj can't be freed until it reaches 0.
813 	 * At this point the mobj is in the inactive list.
814 	 */
815 	if (pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf)) {
816 		if (is_mobj_ffa_shm(&mf->mobj))
817 			unmap_helper(to_mobj_ffa_shm(&mf->mobj));
818 		SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
819 	}
820 out:
821 	if (!mf->inactive_refs)
822 		panic();
823 	mf->inactive_refs--;
824 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
825 }
826 
ffa_shm_inactivate(struct mobj * mobj)827 static void ffa_shm_inactivate(struct mobj *mobj)
828 {
829 	ffa_inactivate(&to_mobj_ffa_shm(mobj)->mf);
830 }
831 
ffa_shm_get_mem_type(struct mobj * mobj __unused,uint32_t * mt)832 static TEE_Result ffa_shm_get_mem_type(struct mobj *mobj __unused, uint32_t *mt)
833 {
834 	if (!mt)
835 		return TEE_ERROR_GENERIC;
836 
837 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
838 
839 	return TEE_SUCCESS;
840 }
841 
ffa_shm_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)842 static bool ffa_shm_matches(struct mobj *mobj __maybe_unused,
843 			    enum buf_is_attr attr)
844 {
845 	assert(is_mobj_ffa_shm(mobj));
846 
847 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
848 }
849 
ffa_shm_get_cookie(struct mobj * mobj)850 static uint64_t ffa_shm_get_cookie(struct mobj *mobj)
851 {
852 	return to_mobj_ffa_shm(mobj)->mf.cookie;
853 }
854 
ffa_shm_inc_map(struct mobj * mobj)855 static TEE_Result ffa_shm_inc_map(struct mobj *mobj)
856 {
857 	struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
858 	TEE_Result res = TEE_SUCCESS;
859 	uint32_t exceptions = 0;
860 	size_t sz = 0;
861 
862 	while (true) {
863 		if (refcount_inc(&m->mapcount))
864 			return TEE_SUCCESS;
865 
866 		exceptions = cpu_spin_lock_xsave(&shm_lock);
867 
868 		if (!refcount_val(&m->mapcount))
869 			break; /* continue to reinitialize */
870 		/*
871 		 * If another thread beat us to initialize mapcount,
872 		 * restart to make sure we still increase it.
873 		 */
874 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
875 	}
876 
877 	/*
878 	 * If we have beated another thread calling ffa_dec_map()
879 	 * to get the lock we need only to reinitialize mapcount to 1.
880 	 */
881 	if (!m->mm) {
882 		sz = ROUNDUP(mobj->size + m->page_offset, SMALL_PAGE_SIZE);
883 		m->mm = tee_mm_alloc(&core_virt_shm_pool, sz);
884 		if (!m->mm) {
885 			res = TEE_ERROR_OUT_OF_MEMORY;
886 			goto out;
887 		}
888 
889 		res = core_mmu_map_pages(tee_mm_get_smem(m->mm), m->pages,
890 					 sz / SMALL_PAGE_SIZE,
891 					 MEM_AREA_NSEC_SHM);
892 		if (res) {
893 			tee_mm_free(m->mm);
894 			m->mm = NULL;
895 			goto out;
896 		}
897 	}
898 
899 	refcount_set(&m->mapcount, 1);
900 out:
901 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
902 
903 	return res;
904 }
905 
ffa_shm_dec_map(struct mobj * mobj)906 static TEE_Result ffa_shm_dec_map(struct mobj *mobj)
907 {
908 	struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
909 	uint32_t exceptions = 0;
910 
911 	if (!refcount_dec(&m->mapcount))
912 		return TEE_SUCCESS;
913 
914 	exceptions = cpu_spin_lock_xsave(&shm_lock);
915 	if (!refcount_val(&m->mapcount))
916 		unmap_helper(m);
917 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
918 
919 	return TEE_SUCCESS;
920 }
921 
mapped_shm_init(void)922 static TEE_Result mapped_shm_init(void)
923 {
924 	vaddr_t pool_start = 0;
925 	vaddr_t pool_end = 0;
926 
927 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
928 	if (!pool_start || !pool_end)
929 		panic("Can't find region for shmem pool");
930 
931 	if (!tee_mm_init(&core_virt_shm_pool, pool_start, pool_end - pool_start,
932 			 SMALL_PAGE_SHIFT,
933 			 TEE_MM_POOL_NO_FLAGS))
934 		panic("Could not create shmem pool");
935 
936 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
937 	     pool_start, pool_end);
938 	return TEE_SUCCESS;
939 }
940 
941 static const struct mobj_ops mobj_ffa_shm_ops = {
942 	.get_pa = ffa_shm_get_pa,
943 	.get_phys_offs = ffa_shm_get_phys_offs,
944 	.get_va = ffa_shm_get_va,
945 	.get_mem_type = ffa_shm_get_mem_type,
946 	.matches = ffa_shm_matches,
947 	.free = ffa_shm_inactivate,
948 	.get_cookie = ffa_shm_get_cookie,
949 	.inc_map = ffa_shm_inc_map,
950 	.dec_map = ffa_shm_dec_map,
951 };
952 
953 preinit(mapped_shm_init);
954 
955 #ifdef CFG_CORE_DYN_PROTMEM
ffa_prm_get_pa(struct mobj * mobj,size_t offset,size_t granule,paddr_t * pa)956 static TEE_Result ffa_prm_get_pa(struct mobj *mobj, size_t offset,
957 				 size_t granule, paddr_t *pa)
958 {
959 	struct mobj_ffa_prm *m = to_mobj_ffa_prm(mobj);
960 	paddr_t p;
961 
962 	if (!pa || offset >= mobj->size)
963 		return TEE_ERROR_GENERIC;
964 
965 	p = m->pa + offset;
966 
967 	if (granule) {
968 		if (granule != SMALL_PAGE_SIZE &&
969 		    granule != CORE_MMU_PGDIR_SIZE)
970 			return TEE_ERROR_GENERIC;
971 		p &= ~(granule - 1);
972 	}
973 
974 	*pa = p;
975 	return TEE_SUCCESS;
976 }
977 
ffa_prm_get_mem_type(struct mobj * mobj __maybe_unused,uint32_t * mt)978 static TEE_Result ffa_prm_get_mem_type(struct mobj *mobj __maybe_unused,
979 				       uint32_t *mt)
980 {
981 	assert(is_mobj_ffa_prm(mobj));
982 
983 	if (!mt)
984 		return TEE_ERROR_GENERIC;
985 
986 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
987 
988 	return TEE_SUCCESS;
989 }
990 
ffa_prm_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)991 static bool ffa_prm_matches(struct mobj *mobj __maybe_unused,
992 			    enum buf_is_attr attr)
993 {
994 	assert(is_mobj_ffa_prm(mobj));
995 
996 	return attr == CORE_MEM_SEC || attr == CORE_MEM_SDP_MEM;
997 }
998 
ffa_prm_inactivate(struct mobj * mobj)999 static void ffa_prm_inactivate(struct mobj *mobj)
1000 {
1001 	ffa_inactivate(&to_mobj_ffa_prm(mobj)->mf);
1002 }
1003 
ffa_prm_get_cookie(struct mobj * mobj)1004 static uint64_t ffa_prm_get_cookie(struct mobj *mobj)
1005 {
1006 	return to_mobj_ffa_prm(mobj)->mf.cookie;
1007 }
1008 
ffa_prm_no_map(struct mobj * mobj __maybe_unused)1009 static TEE_Result ffa_prm_no_map(struct mobj *mobj __maybe_unused)
1010 {
1011 	assert(is_mobj_ffa_prm(mobj));
1012 
1013 	return TEE_ERROR_GENERIC;
1014 }
1015 
1016 static const struct mobj_ops mobj_ffa_prm_ops = {
1017 	.get_pa = ffa_prm_get_pa,
1018 	.get_mem_type = ffa_prm_get_mem_type,
1019 	.matches = ffa_prm_matches,
1020 	.free = ffa_prm_inactivate,
1021 	.get_cookie = ffa_prm_get_cookie,
1022 	.inc_map = ffa_prm_no_map,
1023 	.dec_map = ffa_prm_no_map,
1024 };
1025 
cmp_protmem_pa(struct mobj_ffa * mf,uint64_t pa)1026 static bool cmp_protmem_pa(struct mobj_ffa *mf, uint64_t pa)
1027 {
1028 	struct mobj_ffa_prm *m = NULL;
1029 
1030 	if (!is_mobj_ffa_prm(&mf->mobj))
1031 		return false;
1032 
1033 	m = to_mobj_ffa_prm(&mf->mobj);
1034 	return pa >= m->pa && pa < m->pa + m->mf.mobj.size;
1035 }
1036 
mobj_ffa_protmem_get_by_pa(paddr_t pa,paddr_size_t size)1037 struct mobj *mobj_ffa_protmem_get_by_pa(paddr_t pa, paddr_size_t size)
1038 {
1039 	struct mobj_ffa_prm *m = NULL;
1040 	struct mobj_ffa *mf = NULL;
1041 	struct mobj *mobj = NULL;
1042 	uint32_t exceptions = 0;
1043 
1044 	if (!size)
1045 		size = 1;
1046 
1047 	exceptions = cpu_spin_lock_xsave(&shm_lock);
1048 
1049 	mf = find_in_list(&shm_head, cmp_protmem_pa, pa);
1050 	if (mf) {
1051 		m = to_mobj_ffa_prm(&mf->mobj);
1052 		if (core_is_buffer_inside(pa, size, m->pa, m->mf.mobj.size))
1053 			mobj = mobj_get(&mf->mobj);
1054 	}
1055 
1056 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
1057 	return mobj;
1058 }
1059 
mobj_ffa_assign_protmem(uint64_t cookie,enum mobj_use_case use_case)1060 TEE_Result mobj_ffa_assign_protmem(uint64_t cookie, enum mobj_use_case use_case)
1061 {
1062 	TEE_Result res = TEE_SUCCESS;
1063 	struct mobj_ffa_prm *m = NULL;
1064 	struct mobj_ffa *mf = NULL;
1065 	uint32_t exceptions = 0;
1066 
1067 	exceptions = cpu_spin_lock_xsave(&shm_lock);
1068 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
1069 	if (mf) {
1070 		if (!is_mobj_ffa_prm(&mf->mobj)) {
1071 			res = TEE_ERROR_ITEM_NOT_FOUND;
1072 			goto out;
1073 		}
1074 		m = to_mobj_ffa_prm(&mf->mobj);
1075 		if (m->assigned_use_case) {
1076 			res = TEE_ERROR_BUSY;
1077 			goto out;
1078 		}
1079 		if (m->use_case != use_case) {
1080 			res = TEE_ERROR_BAD_PARAMETERS;
1081 			goto out;
1082 		}
1083 		m->assigned_use_case = true;
1084 		goto out;
1085 	}
1086 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
1087 	if (mf) {
1088 		if (!is_mobj_ffa_prm(&mf->mobj))
1089 			res = TEE_ERROR_BUSY;
1090 		else
1091 			res = TEE_ERROR_ITEM_NOT_FOUND;
1092 		goto out;
1093 	}
1094 #if !defined(CFG_CORE_SEL1_SPMC)
1095 	/* Try to retrieve it from the SPM at S-EL2 */
1096 	DMSG("Populating mobj from rx buffer, cookie %#"PRIx64" use-case %d",
1097 	     cookie, use_case);
1098 	mf = thread_spmc_populate_mobj_from_rx(cookie, use_case);
1099 	if (mf) {
1100 		SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
1101 	} else {
1102 		EMSG("Failed to assign use-case %d to cookie %#"PRIx64"",
1103 		     use_case, cookie);
1104 		res = TEE_ERROR_ITEM_NOT_FOUND;
1105 		goto out;
1106 	}
1107 #endif
1108 out:
1109 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
1110 	return res;
1111 }
1112 #endif /*CFG_CORE_DYN_PROTMEM*/
1113