xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision 6cfa381e534b362afbd103f526b132048e54ba47)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <config.h>
9 #include <ffa.h>
10 #include <initcall.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/thread_spmc.h>
14 #include <kernel/virtualization.h>
15 #include <mm/mobj.h>
16 #include <sys/queue.h>
17 
18 /*
19  * Life cycle of struct mobj_ffa
20  *
21  * SPMC at S-EL1 (CFG_CORE_SEL1_SPMC=y)
22  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23  * During FFA_MEM_SHARE allocated in mobj_ffa_sel1_spmc_new() and finally
24  * added to the inactive list at the end of add_mem_share() once
25  * successfully filled in.
26  *	registered_by_cookie = false
27  *	mobj.refs.val = 0
28  *	inactive_refs = 0
29  *
30  * During FFA_MEM_RECLAIM reclaimed/freed using
31  * mobj_ffa_sel1_spmc_reclaim().  This will always succeed if the normal
32  * world is only calling this when all other threads are done with the
33  * shared memory object. However, there are some conditions that must be
34  * met to make sure that this is the case:
35  *	mobj not in the active list, else -> return TEE_ERROR_BUSY
36  *	mobj not in inactive list, else -> return TEE_ERROR_ITEM_NOT_FOUND
37  *	mobj inactive_refs is 0, else -> return TEE_ERROR_BUSY
38  *
39  * mobj is activated using mobj_ffa_get_by_cookie() which unless the mobj
40  * is active already:
41  * - move the mobj into the active list
42  * - if not registered_by_cookie ->
43  *	set registered_by_cookie and increase inactive_refs
44  * - set mobj.refc.val to 1
45  * - increase inactive_refs
46  *
47  * A previously activated mobj is made ready for reclaim using
48  * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
49  * the inactive list and registered_by_cookie is set and then:
50  * - clears registered_by_cookie
51  * - decreases inactive_refs
52  *
53  * Each successful call to mobj_ffa_get_by_cookie() must be matched by a
54  * call to mobj_put(). If the mobj.refc.val reaches 0 it's
55  * - moved to the inactive list
56  * - inactive_refs is decreased
57  *
58  * SPMC at S-EL2/EL3 (CFG_CORE_SEL1_SPMC=n)
59  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
60  * mobj is activated/allocated using mobj_ffa_get_by_cookie() which if
61  * already active only is
62  * - increasing mobj.refc.val and inactive_refs
63  * if found in inactive list is
64  * - setting mobj.refc.val to 1
65  * - increasing inactive_refs
66  * - moved into active list
67  * if not found is created using thread_spmc_populate_mobj_from_rx() and
68  * then:
69  * - setting mobj.refc.val to 1
70  * - increasing inactive_refs
71  * - moved into active list
72  *
73  * A previously activated mobj is relinquished using
74  * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
75  * the inactive list and inactive_refs is 1
76  */
77 struct mobj_ffa {
78 	struct mobj mobj;
79 	SLIST_ENTRY(mobj_ffa) link;
80 	uint64_t cookie;
81 	tee_mm_entry_t *mm;
82 	struct refcount mapcount;
83 	unsigned int inactive_refs;
84 	uint16_t page_offset;
85 #ifdef CFG_CORE_SEL1_SPMC
86 	bool registered_by_cookie;
87 #endif
88 	paddr_t pages[];
89 };
90 
91 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
92 
93 #ifdef CFG_CORE_SEL1_SPMC
94 #define NUM_SHMS	64
95 static bitstr_t bit_decl(shm_bits, NUM_SHMS);
96 #endif
97 
98 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
99 static struct mobj_ffa_head shm_inactive_head =
100 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
101 
102 static unsigned int shm_lock = SPINLOCK_UNLOCK;
103 
104 static const struct mobj_ops mobj_ffa_ops;
105 
106 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
107 {
108 	assert(mobj->ops == &mobj_ffa_ops);
109 	return container_of(mobj, struct mobj_ffa, mobj);
110 }
111 
112 static size_t shm_size(size_t num_pages)
113 {
114 	size_t s = 0;
115 
116 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
117 		return 0;
118 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
119 		return 0;
120 	return s;
121 }
122 
123 static struct mobj_ffa *ffa_new(unsigned int num_pages)
124 {
125 	struct mobj_ffa *mf = NULL;
126 	size_t s = 0;
127 
128 	if (!num_pages)
129 		return NULL;
130 
131 	s = shm_size(num_pages);
132 	if (!s)
133 		return NULL;
134 	mf = calloc(1, s);
135 	if (!mf)
136 		return NULL;
137 
138 	mf->mobj.ops = &mobj_ffa_ops;
139 	mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
140 	mf->mobj.phys_granule = SMALL_PAGE_SIZE;
141 	refcount_set(&mf->mobj.refc, 0);
142 	mf->inactive_refs = 0;
143 
144 	return mf;
145 }
146 
147 #ifdef CFG_CORE_SEL1_SPMC
148 struct mobj_ffa *mobj_ffa_sel1_spmc_new(uint64_t cookie,
149 					unsigned int num_pages)
150 {
151 	struct mobj_ffa *mf = NULL;
152 	uint32_t exceptions = 0;
153 	int i = 0;
154 
155 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
156 		if (!(cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT))
157 			return NULL;
158 		if (virt_add_cookie_to_current_guest(cookie))
159 			return NULL;
160 	}
161 
162 	mf = ffa_new(num_pages);
163 	if (!mf) {
164 		if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
165 			virt_remove_cookie(cookie);
166 		return NULL;
167 	}
168 
169 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
170 		mf->cookie = cookie;
171 		return mf;
172 	}
173 
174 	exceptions = cpu_spin_lock_xsave(&shm_lock);
175 	bit_ffc(shm_bits, NUM_SHMS, &i);
176 	if (i != -1) {
177 		bit_set(shm_bits, i);
178 		mf->cookie = i;
179 		mf->cookie |= FFA_MEMORY_HANDLE_NON_SECURE_BIT;
180 		/*
181 		 * Encode the partition ID into the handle so we know which
182 		 * partition to switch to when reclaiming a handle.
183 		 */
184 		mf->cookie |= SHIFT_U64(virt_get_current_guest_id(),
185 					FFA_MEMORY_HANDLE_PRTN_SHIFT);
186 	}
187 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
188 
189 	if (i == -1) {
190 		free(mf);
191 		return NULL;
192 	}
193 
194 	return mf;
195 }
196 #endif /*CFG_CORE_SEL1_SPMC*/
197 
198 static size_t get_page_count(struct mobj_ffa *mf)
199 {
200 	return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
201 }
202 
203 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
204 {
205 	return mf->cookie == cookie;
206 }
207 
208 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
209 {
210 	return mf == (void *)(vaddr_t)ptr;
211 }
212 
213 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
214 				      bool (*cmp_func)(struct mobj_ffa *mf,
215 						       uint64_t val),
216 				      uint64_t val)
217 {
218 	struct mobj_ffa *mf = SLIST_FIRST(head);
219 	struct mobj_ffa *p = NULL;
220 
221 	if (!mf)
222 		return NULL;
223 
224 	if (cmp_func(mf, val)) {
225 		SLIST_REMOVE_HEAD(head, link);
226 		return mf;
227 	}
228 
229 	while (true) {
230 		p = SLIST_NEXT(mf, link);
231 		if (!p)
232 			return NULL;
233 		if (cmp_func(p, val)) {
234 			SLIST_REMOVE_AFTER(mf, link);
235 			return p;
236 		}
237 		mf = p;
238 	}
239 }
240 
241 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
242 				     bool (*cmp_func)(struct mobj_ffa *mf,
243 						      uint64_t val),
244 				     uint64_t val)
245 {
246 	struct mobj_ffa *mf = NULL;
247 
248 	SLIST_FOREACH(mf, head, link)
249 		if (cmp_func(mf, val))
250 			return mf;
251 
252 	return NULL;
253 }
254 
255 #if defined(CFG_CORE_SEL1_SPMC)
256 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
257 {
258 
259 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) ||
260 	    !(mf->cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT)) {
261 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT;
262 		uint32_t exceptions = 0;
263 		int64_t i = 0;
264 
265 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
266 			mask |= SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
267 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
268 		i = mf->cookie & ~mask;
269 		assert(i >= 0 && i < NUM_SHMS);
270 
271 		exceptions = cpu_spin_lock_xsave(&shm_lock);
272 		assert(bit_test(shm_bits, i));
273 		bit_clear(shm_bits, i);
274 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
275 	}
276 
277 	assert(!mf->mm);
278 	free(mf);
279 }
280 #else /* !defined(CFG_CORE_SEL1_SPMC) */
281 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages)
282 {
283 	struct mobj_ffa *mf = NULL;
284 
285 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
286 	mf = ffa_new(num_pages);
287 	if (mf)
288 		mf->cookie = cookie;
289 	return mf;
290 }
291 
292 void mobj_ffa_spmc_delete(struct mobj_ffa *mf)
293 {
294 	free(mf);
295 }
296 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
297 
298 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
299 				 paddr_t pa, unsigned int num_pages)
300 {
301 	unsigned int n = 0;
302 	size_t tot_page_count = get_page_count(mf);
303 
304 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
305 		return TEE_ERROR_BAD_PARAMETERS;
306 
307 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
308 	    !core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
309 		return TEE_ERROR_BAD_PARAMETERS;
310 
311 	for (n = 0; n < num_pages; n++)
312 		mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
313 
314 	(*idx) += n;
315 	return TEE_SUCCESS;
316 }
317 
318 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
319 {
320 	return mf->cookie;
321 }
322 
323 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
324 {
325 	uint32_t exceptions = 0;
326 
327 	exceptions = cpu_spin_lock_xsave(&shm_lock);
328 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
329 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
330 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
331 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
332 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
333 
334 	return mf->cookie;
335 }
336 
337 static void unmap_helper(struct mobj_ffa *mf)
338 {
339 	if (mf->mm) {
340 		core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
341 				     get_page_count(mf));
342 		tee_mm_free(mf->mm);
343 		mf->mm = NULL;
344 	}
345 }
346 
347 #ifdef CFG_CORE_SEL1_SPMC
348 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
349 {
350 	TEE_Result res = TEE_SUCCESS;
351 	struct mobj_ffa *mf = NULL;
352 	uint32_t exceptions = 0;
353 
354 	exceptions = cpu_spin_lock_xsave(&shm_lock);
355 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
356 	/*
357 	 * If the mobj is found here it's still active and cannot be
358 	 * reclaimed.
359 	 */
360 	if (mf) {
361 		DMSG("cookie %#"PRIx64" busy refc %u",
362 		     cookie, refcount_val(&mf->mobj.refc));
363 		res = TEE_ERROR_BUSY;
364 		goto out;
365 	}
366 
367 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
368 	if (!mf) {
369 		res = TEE_ERROR_ITEM_NOT_FOUND;
370 		goto out;
371 	}
372 	/*
373 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
374 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
375 	 */
376 	if (mf->inactive_refs) {
377 		DMSG("cookie %#"PRIx64" busy inactive_refs %u",
378 		     cookie, mf->inactive_refs);
379 		res = TEE_ERROR_BUSY;
380 		goto out;
381 	}
382 
383 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
384 		panic();
385 	res = TEE_SUCCESS;
386 out:
387 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
388 	if (!res) {
389 		mobj_ffa_sel1_spmc_delete(mf);
390 		virt_remove_cookie(cookie);
391 	}
392 	return res;
393 }
394 #endif /*CFG_CORE_SEL1_SPMC*/
395 
396 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
397 {
398 	TEE_Result res = TEE_SUCCESS;
399 	struct mobj_ffa *mf = NULL;
400 	uint32_t exceptions = 0;
401 
402 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
403 	exceptions = cpu_spin_lock_xsave(&shm_lock);
404 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
405 	/*
406 	 * If the mobj is found here it's still active and cannot be
407 	 * unregistered.
408 	 */
409 	if (mf) {
410 		EMSG("cookie %#"PRIx64" busy refc %u:%u",
411 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
412 		res = TEE_ERROR_BUSY;
413 		goto out;
414 	}
415 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
416 	/*
417 	 * If the mobj isn't found or if it already has been unregistered.
418 	 */
419 	if (!mf) {
420 		EMSG("cookie %#"PRIx64" not found", cookie);
421 		res = TEE_ERROR_ITEM_NOT_FOUND;
422 		goto out;
423 	}
424 #if defined(CFG_CORE_SEL1_SPMC)
425 	if (!mf->registered_by_cookie) {
426 		EMSG("cookie %#"PRIx64" not registered", cookie);
427 		res = TEE_ERROR_ITEM_NOT_FOUND;
428 		goto out;
429 	}
430 	assert(mf->inactive_refs);
431 	mf->inactive_refs--;
432 	mf->registered_by_cookie = false;
433 #else
434 	if (mf->inactive_refs) {
435 		EMSG("cookie %#"PRIx64" busy refc %u:%u",
436 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
437 		res = TEE_ERROR_BUSY;
438 		goto out;
439 	}
440 	mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
441 	mobj_ffa_spmc_delete(mf);
442 	thread_spmc_relinquish(cookie);
443 #endif
444 	res = TEE_SUCCESS;
445 
446 out:
447 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
448 	return res;
449 }
450 
451 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
452 				    unsigned int internal_offs)
453 {
454 	struct mobj_ffa *mf = NULL;
455 	uint32_t exceptions = 0;
456 
457 	if (internal_offs >= SMALL_PAGE_SIZE)
458 		return NULL;
459 	exceptions = cpu_spin_lock_xsave(&shm_lock);
460 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
461 	if (mf) {
462 		if (mf->page_offset == internal_offs) {
463 			if (!refcount_inc(&mf->mobj.refc)) {
464 				/*
465 				 * If refcount is 0 some other thread has
466 				 * called mobj_put() on this reached 0 and
467 				 * before ffa_inactivate() got the lock we
468 				 * found it. Let's reinitialize it.
469 				 */
470 				refcount_set(&mf->mobj.refc, 1);
471 				mf->inactive_refs++;
472 			}
473 			DMSG("cookie %#"PRIx64" active: refc %u:%u",
474 			     cookie, refcount_val(&mf->mobj.refc),
475 			     mf->inactive_refs);
476 		} else {
477 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
478 			     cookie, mf->page_offset, internal_offs);
479 			mf = NULL;
480 		}
481 	} else {
482 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
483 #if !defined(CFG_CORE_SEL1_SPMC)
484 		/* Try to retrieve it from the SPM at S-EL2 */
485 		if (mf) {
486 			DMSG("cookie %#"PRIx64" resurrecting", cookie);
487 		} else {
488 			DMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
489 			     cookie);
490 			mf = thread_spmc_populate_mobj_from_rx(cookie);
491 		}
492 #endif
493 		if (mf) {
494 #if defined(CFG_CORE_SEL1_SPMC)
495 			if (!mf->registered_by_cookie) {
496 				mf->inactive_refs++;
497 				mf->registered_by_cookie = true;
498 			}
499 #endif
500 			assert(refcount_val(&mf->mobj.refc) == 0);
501 			refcount_set(&mf->mobj.refc, 1);
502 			refcount_set(&mf->mapcount, 0);
503 			mf->inactive_refs++;
504 
505 			/*
506 			 * mf->page_offset is offset into the first page.
507 			 * This offset is assigned from the internal_offs
508 			 * parameter to this function.
509 			 *
510 			 * While a mobj_ffa is active (ref_count > 0) this
511 			 * will not change, but when being pushed to the
512 			 * inactive list it can be changed again.
513 			 *
514 			 * So below we're backing out the old
515 			 * mf->page_offset and then assigning a new from
516 			 * internal_offset.
517 			 */
518 			mf->mobj.size += mf->page_offset;
519 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
520 			mf->mobj.size -= internal_offs;
521 			mf->page_offset = internal_offs;
522 
523 			SLIST_INSERT_HEAD(&shm_head, mf, link);
524 		}
525 	}
526 
527 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
528 
529 	if (!mf) {
530 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
531 		     cookie, internal_offs);
532 		return NULL;
533 	}
534 	return &mf->mobj;
535 }
536 
537 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
538 			     size_t granule, paddr_t *pa)
539 {
540 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
541 	size_t full_offset = 0;
542 	paddr_t p = 0;
543 
544 	if (!pa)
545 		return TEE_ERROR_GENERIC;
546 
547 	if (offset >= mobj->size)
548 		return TEE_ERROR_GENERIC;
549 
550 	full_offset = offset + mf->page_offset;
551 	switch (granule) {
552 	case 0:
553 		p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
554 		    (full_offset & SMALL_PAGE_MASK);
555 		break;
556 	case SMALL_PAGE_SIZE:
557 		p = mf->pages[full_offset / SMALL_PAGE_SIZE];
558 		break;
559 	default:
560 		return TEE_ERROR_GENERIC;
561 	}
562 	*pa = p;
563 
564 	return TEE_SUCCESS;
565 }
566 
567 static size_t ffa_get_phys_offs(struct mobj *mobj,
568 				size_t granule __maybe_unused)
569 {
570 	assert(granule >= mobj->phys_granule);
571 
572 	return to_mobj_ffa(mobj)->page_offset;
573 }
574 
575 static void *ffa_get_va(struct mobj *mobj, size_t offset, size_t len)
576 {
577 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
578 
579 	if (!mf->mm || !mobj_check_offset_and_len(mobj, offset, len))
580 		return NULL;
581 
582 	return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
583 }
584 
585 static void ffa_inactivate(struct mobj *mobj)
586 {
587 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
588 	uint32_t exceptions = 0;
589 
590 	exceptions = cpu_spin_lock_xsave(&shm_lock);
591 	/*
592 	 * If refcount isn't 0 some other thread has found this mobj in
593 	 * shm_head after the mobj_put() that put us here and before we got
594 	 * the lock.
595 	 */
596 	if (refcount_val(&mobj->refc)) {
597 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
598 		goto out;
599 	}
600 
601 	/*
602 	 * pop_from_list() can fail to find the mobj if we had just
603 	 * decreased the refcount to 0 in mobj_put() and was going to
604 	 * acquire the shm_lock but another thread found this mobj and
605 	 * reinitialized the refcount to 1. Then before we got cpu time the
606 	 * other thread called mobj_put() and deactivated the mobj again.
607 	 *
608 	 * However, we still have the inactive count that guarantees
609 	 * that the mobj can't be freed until it reaches 0.
610 	 * At this point the mobj is in the inactive list.
611 	 */
612 	if (pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf)) {
613 		unmap_helper(mf);
614 		SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
615 	}
616 out:
617 	if (!mf->inactive_refs)
618 		panic();
619 	mf->inactive_refs--;
620 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
621 }
622 
623 static TEE_Result ffa_get_mem_type(struct mobj *mobj __unused, uint32_t *mt)
624 {
625 	if (!mt)
626 		return TEE_ERROR_GENERIC;
627 
628 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
629 
630 	return TEE_SUCCESS;
631 }
632 
633 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
634 {
635 	assert(mobj->ops == &mobj_ffa_ops);
636 
637 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
638 }
639 
640 static uint64_t ffa_get_cookie(struct mobj *mobj)
641 {
642 	return to_mobj_ffa(mobj)->cookie;
643 }
644 
645 static TEE_Result ffa_inc_map(struct mobj *mobj)
646 {
647 	TEE_Result res = TEE_SUCCESS;
648 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
649 	uint32_t exceptions = 0;
650 	size_t sz = 0;
651 
652 	while (true) {
653 		if (refcount_inc(&mf->mapcount))
654 			return TEE_SUCCESS;
655 
656 		exceptions = cpu_spin_lock_xsave(&shm_lock);
657 
658 		if (!refcount_val(&mf->mapcount))
659 			break; /* continue to reinitialize */
660 		/*
661 		 * If another thread beat us to initialize mapcount,
662 		 * restart to make sure we still increase it.
663 		 */
664 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
665 	}
666 
667 	/*
668 	 * If we have beated another thread calling ffa_dec_map()
669 	 * to get the lock we need only to reinitialize mapcount to 1.
670 	 */
671 	if (!mf->mm) {
672 		sz = ROUNDUP(mobj->size + mf->page_offset, SMALL_PAGE_SIZE);
673 		mf->mm = tee_mm_alloc(&tee_mm_shm, sz);
674 		if (!mf->mm) {
675 			res = TEE_ERROR_OUT_OF_MEMORY;
676 			goto out;
677 		}
678 
679 		res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
680 					 sz / SMALL_PAGE_SIZE,
681 					 MEM_AREA_NSEC_SHM);
682 		if (res) {
683 			tee_mm_free(mf->mm);
684 			mf->mm = NULL;
685 			goto out;
686 		}
687 	}
688 
689 	refcount_set(&mf->mapcount, 1);
690 out:
691 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
692 
693 	return res;
694 }
695 
696 static TEE_Result ffa_dec_map(struct mobj *mobj)
697 {
698 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
699 	uint32_t exceptions = 0;
700 
701 	if (!refcount_dec(&mf->mapcount))
702 		return TEE_SUCCESS;
703 
704 	exceptions = cpu_spin_lock_xsave(&shm_lock);
705 	if (!refcount_val(&mf->mapcount))
706 		unmap_helper(mf);
707 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
708 
709 	return TEE_SUCCESS;
710 }
711 
712 static TEE_Result mapped_shm_init(void)
713 {
714 	vaddr_t pool_start = 0;
715 	vaddr_t pool_end = 0;
716 
717 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
718 	if (!pool_start || !pool_end)
719 		panic("Can't find region for shmem pool");
720 
721 	if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end - pool_start,
722 			 SMALL_PAGE_SHIFT,
723 			 TEE_MM_POOL_NO_FLAGS))
724 		panic("Could not create shmem pool");
725 
726 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
727 	     pool_start, pool_end);
728 	return TEE_SUCCESS;
729 }
730 
731 static const struct mobj_ops mobj_ffa_ops = {
732 	.get_pa = ffa_get_pa,
733 	.get_phys_offs = ffa_get_phys_offs,
734 	.get_va = ffa_get_va,
735 	.get_mem_type = ffa_get_mem_type,
736 	.matches = ffa_matches,
737 	.free = ffa_inactivate,
738 	.get_cookie = ffa_get_cookie,
739 	.inc_map = ffa_inc_map,
740 	.dec_map = ffa_dec_map,
741 };
742 
743 preinit(mapped_shm_init);
744