xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision a0f3154cfa75eda772785dfcb586b916514d7007)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <config.h>
9 #include <ffa.h>
10 #include <initcall.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/thread_spmc.h>
14 #include <kernel/virtualization.h>
15 #include <mm/mobj.h>
16 #include <sys/queue.h>
17 
18 /*
19  * Life cycle of struct mobj_ffa
20  *
21  * SPMC at S-EL1 (CFG_CORE_SEL1_SPMC=y)
22  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23  * During FFA_MEM_SHARE allocated in mobj_ffa_sel1_spmc_new() and finally
24  * added to the inactive list at the end of add_mem_share() once
25  * successfully filled in.
26  *	registered_by_cookie = false
27  *	mobj.refs.val = 0
28  *	inactive_refs = 0
29  *
30  * During FFA_MEM_RECLAIM reclaimed/freed using
31  * mobj_ffa_sel1_spmc_reclaim().  This will always succeed if the normal
32  * world is only calling this when all other threads are done with the
33  * shared memory object. However, there are some conditions that must be
34  * met to make sure that this is the case:
35  *	mobj not in the active list, else -> return TEE_ERROR_BUSY
36  *	mobj not in inactive list, else -> return TEE_ERROR_ITEM_NOT_FOUND
37  *	mobj inactive_refs is 0, else -> return TEE_ERROR_BUSY
38  *
39  * mobj is activated using mobj_ffa_get_by_cookie() which unless the mobj
40  * is active already:
41  * - move the mobj into the active list
42  * - if not registered_by_cookie ->
43  *	set registered_by_cookie and increase inactive_refs
44  * - set mobj.refc.val to 1
45  * - increase inactive_refs
46  *
47  * A previously activated mobj is made ready for reclaim using
48  * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
49  * the inactive list and registered_by_cookie is set and then:
50  * - clears registered_by_cookie
51  * - decreases inactive_refs
52  *
53  * Each successful call to mobj_ffa_get_by_cookie() must be matched by a
54  * call to mobj_put(). If the mobj.refc.val reaches 0 it's
55  * - moved to the inactive list
56  * - inactive_refs is decreased
57  *
58  * SPMC at S-EL2/EL3 (CFG_CORE_SEL1_SPMC=n)
59  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
60  * mobj is activated/allocated using mobj_ffa_get_by_cookie() which if
61  * already active only is
62  * - increasing mobj.refc.val and inactive_refs
63  * if found in inactive list is
64  * - setting mobj.refc.val to 1
65  * - increasing inactive_refs
66  * - moved into active list
67  * if not found is created using thread_spmc_populate_mobj_from_rx() and
68  * then:
69  * - setting mobj.refc.val to 1
70  * - increasing inactive_refs
71  * - moved into active list
72  *
73  * A previously activated mobj is relinquished using
74  * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
75  * the inactive list and inactive_refs is 1
76  */
77 struct mobj_ffa {
78 	struct mobj mobj;
79 	SLIST_ENTRY(mobj_ffa) link;
80 	uint64_t cookie;
81 	tee_mm_entry_t *mm;
82 	struct refcount mapcount;
83 	unsigned int inactive_refs;
84 	uint16_t page_offset;
85 #ifdef CFG_CORE_SEL1_SPMC
86 	bool registered_by_cookie;
87 #endif
88 	paddr_t pages[];
89 };
90 
91 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
92 
93 #ifdef CFG_CORE_SEL1_SPMC
94 #ifdef CFG_NS_VIRTUALIZATION
95 static bitstr_t *get_shm_bits(void)
96 {
97 	return virt_get_shm_bits();
98 }
99 #else
100 static bitstr_t bit_decl(__shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
101 
102 static bitstr_t *get_shm_bits(void)
103 {
104 	return __shm_bits;
105 }
106 #endif
107 #endif
108 
109 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
110 static struct mobj_ffa_head shm_inactive_head =
111 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
112 
113 static unsigned int shm_lock = SPINLOCK_UNLOCK;
114 
115 static const struct mobj_ops mobj_ffa_ops;
116 
117 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
118 {
119 	assert(mobj->ops == &mobj_ffa_ops);
120 	return container_of(mobj, struct mobj_ffa, mobj);
121 }
122 
123 static size_t shm_size(size_t num_pages)
124 {
125 	size_t s = 0;
126 
127 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
128 		return 0;
129 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
130 		return 0;
131 	return s;
132 }
133 
134 static struct mobj_ffa *ffa_new(unsigned int num_pages)
135 {
136 	struct mobj_ffa *mf = NULL;
137 	size_t s = 0;
138 
139 	if (!num_pages)
140 		return NULL;
141 
142 	s = shm_size(num_pages);
143 	if (!s)
144 		return NULL;
145 	mf = calloc(1, s);
146 	if (!mf)
147 		return NULL;
148 
149 	mf->mobj.ops = &mobj_ffa_ops;
150 	mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
151 	mf->mobj.phys_granule = SMALL_PAGE_SIZE;
152 	refcount_set(&mf->mobj.refc, 0);
153 	mf->inactive_refs = 0;
154 
155 	return mf;
156 }
157 
158 #ifdef CFG_CORE_SEL1_SPMC
159 struct mobj_ffa *mobj_ffa_sel1_spmc_new(uint64_t cookie,
160 					unsigned int num_pages)
161 {
162 	struct mobj_ffa *mf = NULL;
163 	bitstr_t *shm_bits = NULL;
164 	uint32_t exceptions = 0;
165 	int i = 0;
166 
167 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
168 		if (!(cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT))
169 			return NULL;
170 		if (virt_add_cookie_to_current_guest(cookie))
171 			return NULL;
172 	}
173 
174 	mf = ffa_new(num_pages);
175 	if (!mf) {
176 		if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
177 			virt_remove_cookie(cookie);
178 		return NULL;
179 	}
180 
181 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
182 		mf->cookie = cookie;
183 		return mf;
184 	}
185 
186 	shm_bits = get_shm_bits();
187 	exceptions = cpu_spin_lock_xsave(&shm_lock);
188 	bit_ffc(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
189 	if (i != -1) {
190 		bit_set(shm_bits, i);
191 		mf->cookie = i;
192 		mf->cookie |= FFA_MEMORY_HANDLE_NON_SECURE_BIT;
193 		/*
194 		 * Encode the partition ID into the handle so we know which
195 		 * partition to switch to when reclaiming a handle.
196 		 */
197 		mf->cookie |= SHIFT_U64(virt_get_current_guest_id(),
198 					FFA_MEMORY_HANDLE_PRTN_SHIFT);
199 	}
200 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
201 
202 	if (i == -1) {
203 		free(mf);
204 		return NULL;
205 	}
206 
207 	return mf;
208 }
209 #endif /*CFG_CORE_SEL1_SPMC*/
210 
211 static size_t get_page_count(struct mobj_ffa *mf)
212 {
213 	return ROUNDUP_DIV(mf->mobj.size, SMALL_PAGE_SIZE);
214 }
215 
216 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
217 {
218 	return mf->cookie == cookie;
219 }
220 
221 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
222 {
223 	return mf == (void *)(vaddr_t)ptr;
224 }
225 
226 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
227 				      bool (*cmp_func)(struct mobj_ffa *mf,
228 						       uint64_t val),
229 				      uint64_t val)
230 {
231 	struct mobj_ffa *mf = SLIST_FIRST(head);
232 	struct mobj_ffa *p = NULL;
233 
234 	if (!mf)
235 		return NULL;
236 
237 	if (cmp_func(mf, val)) {
238 		SLIST_REMOVE_HEAD(head, link);
239 		return mf;
240 	}
241 
242 	while (true) {
243 		p = SLIST_NEXT(mf, link);
244 		if (!p)
245 			return NULL;
246 		if (cmp_func(p, val)) {
247 			SLIST_REMOVE_AFTER(mf, link);
248 			return p;
249 		}
250 		mf = p;
251 	}
252 }
253 
254 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
255 				     bool (*cmp_func)(struct mobj_ffa *mf,
256 						      uint64_t val),
257 				     uint64_t val)
258 {
259 	struct mobj_ffa *mf = NULL;
260 
261 	SLIST_FOREACH(mf, head, link)
262 		if (cmp_func(mf, val))
263 			return mf;
264 
265 	return NULL;
266 }
267 
268 #if defined(CFG_CORE_SEL1_SPMC)
269 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
270 {
271 
272 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) ||
273 	    !(mf->cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT)) {
274 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT;
275 		bitstr_t *shm_bits = get_shm_bits();
276 		uint32_t exceptions = 0;
277 		int64_t i = 0;
278 
279 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
280 			mask |= SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
281 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
282 		i = mf->cookie & ~mask;
283 		assert(i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT);
284 
285 		exceptions = cpu_spin_lock_xsave(&shm_lock);
286 		assert(bit_test(shm_bits, i));
287 		bit_clear(shm_bits, i);
288 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
289 	}
290 
291 	assert(!mf->mm);
292 	free(mf);
293 }
294 #else /* !defined(CFG_CORE_SEL1_SPMC) */
295 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages)
296 {
297 	struct mobj_ffa *mf = NULL;
298 
299 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
300 	mf = ffa_new(num_pages);
301 	if (mf)
302 		mf->cookie = cookie;
303 	return mf;
304 }
305 
306 void mobj_ffa_spmc_delete(struct mobj_ffa *mf)
307 {
308 	free(mf);
309 }
310 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
311 
312 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
313 				 paddr_t pa, unsigned int num_pages)
314 {
315 	unsigned int n = 0;
316 	size_t tot_page_count = get_page_count(mf);
317 
318 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
319 		return TEE_ERROR_BAD_PARAMETERS;
320 
321 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
322 	    !core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
323 		return TEE_ERROR_BAD_PARAMETERS;
324 
325 	for (n = 0; n < num_pages; n++)
326 		mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
327 
328 	(*idx) += n;
329 	return TEE_SUCCESS;
330 }
331 
332 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
333 {
334 	return mf->cookie;
335 }
336 
337 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
338 {
339 	uint32_t exceptions = 0;
340 
341 	exceptions = cpu_spin_lock_xsave(&shm_lock);
342 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
343 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
344 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
345 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
346 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
347 
348 	return mf->cookie;
349 }
350 
351 static void unmap_helper(struct mobj_ffa *mf)
352 {
353 	if (mf->mm) {
354 		core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
355 				     get_page_count(mf));
356 		tee_mm_free(mf->mm);
357 		mf->mm = NULL;
358 	}
359 }
360 
361 #ifdef CFG_CORE_SEL1_SPMC
362 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
363 {
364 	TEE_Result res = TEE_SUCCESS;
365 	struct mobj_ffa *mf = NULL;
366 	uint32_t exceptions = 0;
367 
368 	exceptions = cpu_spin_lock_xsave(&shm_lock);
369 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
370 	/*
371 	 * If the mobj is found here it's still active and cannot be
372 	 * reclaimed.
373 	 */
374 	if (mf) {
375 		DMSG("cookie %#"PRIx64" busy refc %u",
376 		     cookie, refcount_val(&mf->mobj.refc));
377 		res = TEE_ERROR_BUSY;
378 		goto out;
379 	}
380 
381 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
382 	if (!mf) {
383 		res = TEE_ERROR_ITEM_NOT_FOUND;
384 		goto out;
385 	}
386 	/*
387 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
388 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
389 	 */
390 	if (mf->inactive_refs) {
391 		DMSG("cookie %#"PRIx64" busy inactive_refs %u",
392 		     cookie, mf->inactive_refs);
393 		res = TEE_ERROR_BUSY;
394 		goto out;
395 	}
396 
397 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
398 		panic();
399 	res = TEE_SUCCESS;
400 out:
401 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
402 	if (!res) {
403 		mobj_ffa_sel1_spmc_delete(mf);
404 		virt_remove_cookie(cookie);
405 	}
406 	return res;
407 }
408 #endif /*CFG_CORE_SEL1_SPMC*/
409 
410 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
411 {
412 	TEE_Result res = TEE_SUCCESS;
413 	struct mobj_ffa *mf = NULL;
414 	uint32_t exceptions = 0;
415 
416 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
417 	exceptions = cpu_spin_lock_xsave(&shm_lock);
418 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
419 	/*
420 	 * If the mobj is found here it's still active and cannot be
421 	 * unregistered.
422 	 */
423 	if (mf) {
424 		EMSG("cookie %#"PRIx64" busy refc %u:%u",
425 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
426 		res = TEE_ERROR_BUSY;
427 		goto out;
428 	}
429 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
430 	/*
431 	 * If the mobj isn't found or if it already has been unregistered.
432 	 */
433 	if (!mf) {
434 		EMSG("cookie %#"PRIx64" not found", cookie);
435 		res = TEE_ERROR_ITEM_NOT_FOUND;
436 		goto out;
437 	}
438 #if defined(CFG_CORE_SEL1_SPMC)
439 	if (!mf->registered_by_cookie) {
440 		/*
441 		 * This is expected behaviour if the normal world has
442 		 * registered the memory but OP-TEE has not yet used the
443 		 * corresponding cookie with mobj_ffa_get_by_cookie(). It
444 		 * can be non-trivial for the normal world to predict if
445 		 * the cookie really has been used or not. So even if we
446 		 * return it as an error it will be ignored by
447 		 * handle_unregister_shm().
448 		 */
449 		EMSG("cookie %#"PRIx64" not registered refs %u:%u",
450 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
451 		res = TEE_ERROR_ITEM_NOT_FOUND;
452 		goto out;
453 	}
454 	assert(mf->inactive_refs);
455 	mf->inactive_refs--;
456 	mf->registered_by_cookie = false;
457 #else
458 	if (mf->inactive_refs) {
459 		EMSG("cookie %#"PRIx64" busy refc %u:%u",
460 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
461 		res = TEE_ERROR_BUSY;
462 		goto out;
463 	}
464 	mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
465 	mobj_ffa_spmc_delete(mf);
466 	thread_spmc_relinquish(cookie);
467 #endif
468 	res = TEE_SUCCESS;
469 
470 out:
471 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
472 	return res;
473 }
474 
475 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
476 				    unsigned int internal_offs)
477 {
478 	struct mobj_ffa *mf = NULL;
479 	uint32_t exceptions = 0;
480 
481 	if (internal_offs >= SMALL_PAGE_SIZE)
482 		return NULL;
483 	exceptions = cpu_spin_lock_xsave(&shm_lock);
484 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
485 	if (mf) {
486 		if (mf->page_offset == internal_offs) {
487 			if (!refcount_inc(&mf->mobj.refc)) {
488 				/*
489 				 * If refcount is 0 some other thread has
490 				 * called mobj_put() on this reached 0 and
491 				 * before ffa_inactivate() got the lock we
492 				 * found it. Let's reinitialize it.
493 				 */
494 				refcount_set(&mf->mobj.refc, 1);
495 				mf->inactive_refs++;
496 			}
497 			DMSG("cookie %#"PRIx64" active: refc %u:%u",
498 			     cookie, refcount_val(&mf->mobj.refc),
499 			     mf->inactive_refs);
500 		} else {
501 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
502 			     cookie, mf->page_offset, internal_offs);
503 			mf = NULL;
504 		}
505 	} else {
506 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
507 #if !defined(CFG_CORE_SEL1_SPMC)
508 		/* Try to retrieve it from the SPM at S-EL2 */
509 		if (mf) {
510 			DMSG("cookie %#"PRIx64" resurrecting", cookie);
511 		} else {
512 			DMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
513 			     cookie);
514 			mf = thread_spmc_populate_mobj_from_rx(cookie);
515 		}
516 #endif
517 		if (mf) {
518 #if defined(CFG_CORE_SEL1_SPMC)
519 			if (!mf->registered_by_cookie) {
520 				mf->inactive_refs++;
521 				mf->registered_by_cookie = true;
522 			}
523 #endif
524 			assert(refcount_val(&mf->mobj.refc) == 0);
525 			refcount_set(&mf->mobj.refc, 1);
526 			refcount_set(&mf->mapcount, 0);
527 			mf->inactive_refs++;
528 
529 			/*
530 			 * mf->page_offset is offset into the first page.
531 			 * This offset is assigned from the internal_offs
532 			 * parameter to this function.
533 			 *
534 			 * While a mobj_ffa is active (ref_count > 0) this
535 			 * will not change, but when being pushed to the
536 			 * inactive list it can be changed again.
537 			 *
538 			 * So below we're backing out the old
539 			 * mf->page_offset and then assigning a new from
540 			 * internal_offset.
541 			 */
542 			mf->mobj.size += mf->page_offset;
543 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
544 			mf->mobj.size -= internal_offs;
545 			mf->page_offset = internal_offs;
546 
547 			SLIST_INSERT_HEAD(&shm_head, mf, link);
548 		}
549 	}
550 
551 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
552 
553 	if (!mf) {
554 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
555 		     cookie, internal_offs);
556 		return NULL;
557 	}
558 	return &mf->mobj;
559 }
560 
561 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
562 			     size_t granule, paddr_t *pa)
563 {
564 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
565 	size_t full_offset = 0;
566 	paddr_t p = 0;
567 
568 	if (!pa)
569 		return TEE_ERROR_GENERIC;
570 
571 	if (offset >= mobj->size)
572 		return TEE_ERROR_GENERIC;
573 
574 	full_offset = offset + mf->page_offset;
575 	switch (granule) {
576 	case 0:
577 		p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
578 		    (full_offset & SMALL_PAGE_MASK);
579 		break;
580 	case SMALL_PAGE_SIZE:
581 		p = mf->pages[full_offset / SMALL_PAGE_SIZE];
582 		break;
583 	default:
584 		return TEE_ERROR_GENERIC;
585 	}
586 	*pa = p;
587 
588 	return TEE_SUCCESS;
589 }
590 
591 static size_t ffa_get_phys_offs(struct mobj *mobj,
592 				size_t granule __maybe_unused)
593 {
594 	assert(granule >= mobj->phys_granule);
595 
596 	return to_mobj_ffa(mobj)->page_offset;
597 }
598 
599 static void *ffa_get_va(struct mobj *mobj, size_t offset, size_t len)
600 {
601 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
602 
603 	if (!mf->mm || !mobj_check_offset_and_len(mobj, offset, len))
604 		return NULL;
605 
606 	return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
607 }
608 
609 static void ffa_inactivate(struct mobj *mobj)
610 {
611 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
612 	uint32_t exceptions = 0;
613 
614 	exceptions = cpu_spin_lock_xsave(&shm_lock);
615 	/*
616 	 * If refcount isn't 0 some other thread has found this mobj in
617 	 * shm_head after the mobj_put() that put us here and before we got
618 	 * the lock.
619 	 */
620 	if (refcount_val(&mobj->refc)) {
621 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
622 		goto out;
623 	}
624 
625 	/*
626 	 * pop_from_list() can fail to find the mobj if we had just
627 	 * decreased the refcount to 0 in mobj_put() and was going to
628 	 * acquire the shm_lock but another thread found this mobj and
629 	 * reinitialized the refcount to 1. Then before we got cpu time the
630 	 * other thread called mobj_put() and deactivated the mobj again.
631 	 *
632 	 * However, we still have the inactive count that guarantees
633 	 * that the mobj can't be freed until it reaches 0.
634 	 * At this point the mobj is in the inactive list.
635 	 */
636 	if (pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf)) {
637 		unmap_helper(mf);
638 		SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
639 	}
640 out:
641 	if (!mf->inactive_refs)
642 		panic();
643 	mf->inactive_refs--;
644 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
645 }
646 
647 static TEE_Result ffa_get_mem_type(struct mobj *mobj __unused, uint32_t *mt)
648 {
649 	if (!mt)
650 		return TEE_ERROR_GENERIC;
651 
652 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
653 
654 	return TEE_SUCCESS;
655 }
656 
657 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
658 {
659 	assert(mobj->ops == &mobj_ffa_ops);
660 
661 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
662 }
663 
664 static uint64_t ffa_get_cookie(struct mobj *mobj)
665 {
666 	return to_mobj_ffa(mobj)->cookie;
667 }
668 
669 static TEE_Result ffa_inc_map(struct mobj *mobj)
670 {
671 	TEE_Result res = TEE_SUCCESS;
672 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
673 	uint32_t exceptions = 0;
674 	size_t sz = 0;
675 
676 	while (true) {
677 		if (refcount_inc(&mf->mapcount))
678 			return TEE_SUCCESS;
679 
680 		exceptions = cpu_spin_lock_xsave(&shm_lock);
681 
682 		if (!refcount_val(&mf->mapcount))
683 			break; /* continue to reinitialize */
684 		/*
685 		 * If another thread beat us to initialize mapcount,
686 		 * restart to make sure we still increase it.
687 		 */
688 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
689 	}
690 
691 	/*
692 	 * If we have beated another thread calling ffa_dec_map()
693 	 * to get the lock we need only to reinitialize mapcount to 1.
694 	 */
695 	if (!mf->mm) {
696 		sz = ROUNDUP(mobj->size + mf->page_offset, SMALL_PAGE_SIZE);
697 		mf->mm = tee_mm_alloc(&core_virt_shm_pool, sz);
698 		if (!mf->mm) {
699 			res = TEE_ERROR_OUT_OF_MEMORY;
700 			goto out;
701 		}
702 
703 		res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
704 					 sz / SMALL_PAGE_SIZE,
705 					 MEM_AREA_NSEC_SHM);
706 		if (res) {
707 			tee_mm_free(mf->mm);
708 			mf->mm = NULL;
709 			goto out;
710 		}
711 	}
712 
713 	refcount_set(&mf->mapcount, 1);
714 out:
715 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
716 
717 	return res;
718 }
719 
720 static TEE_Result ffa_dec_map(struct mobj *mobj)
721 {
722 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
723 	uint32_t exceptions = 0;
724 
725 	if (!refcount_dec(&mf->mapcount))
726 		return TEE_SUCCESS;
727 
728 	exceptions = cpu_spin_lock_xsave(&shm_lock);
729 	if (!refcount_val(&mf->mapcount))
730 		unmap_helper(mf);
731 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
732 
733 	return TEE_SUCCESS;
734 }
735 
736 static TEE_Result mapped_shm_init(void)
737 {
738 	vaddr_t pool_start = 0;
739 	vaddr_t pool_end = 0;
740 
741 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
742 	if (!pool_start || !pool_end)
743 		panic("Can't find region for shmem pool");
744 
745 	if (!tee_mm_init(&core_virt_shm_pool, pool_start, pool_end - pool_start,
746 			 SMALL_PAGE_SHIFT,
747 			 TEE_MM_POOL_NO_FLAGS))
748 		panic("Could not create shmem pool");
749 
750 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
751 	     pool_start, pool_end);
752 	return TEE_SUCCESS;
753 }
754 
755 static const struct mobj_ops mobj_ffa_ops = {
756 	.get_pa = ffa_get_pa,
757 	.get_phys_offs = ffa_get_phys_offs,
758 	.get_va = ffa_get_va,
759 	.get_mem_type = ffa_get_mem_type,
760 	.matches = ffa_matches,
761 	.free = ffa_inactivate,
762 	.get_cookie = ffa_get_cookie,
763 	.inc_map = ffa_inc_map,
764 	.dec_map = ffa_dec_map,
765 };
766 
767 preinit(mapped_shm_init);
768