xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision 070d197fa568917d4b32fa2b379098715016c52d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <config.h>
9 #include <ffa.h>
10 #include <initcall.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/thread_spmc.h>
14 #include <kernel/virtualization.h>
15 #include <mm/mobj.h>
16 #include <sys/queue.h>
17 
18 /*
19  * Life cycle of struct mobj_ffa
20  *
21  * SPMC at S-EL1 (CFG_CORE_SEL1_SPMC=y)
22  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23  * During FFA_MEM_SHARE allocated in mobj_ffa_sel1_spmc_new() and finally
24  * added to the inactive list at the end of add_mem_share() once
25  * successfully filled in.
26  *	registered_by_cookie = false
27  *	mobj.refs.val = 0
28  *	inactive_refs = 0
29  *
30  * During FFA_MEM_RECLAIM reclaimed/freed using
31  * mobj_ffa_sel1_spmc_reclaim().  This will always succeed if the normal
32  * world is only calling this when all other threads are done with the
33  * shared memory object. However, there are some conditions that must be
34  * met to make sure that this is the case:
35  *	mobj not in the active list, else -> return TEE_ERROR_BUSY
36  *	mobj not in inactive list, else -> return TEE_ERROR_ITEM_NOT_FOUND
37  *	mobj inactive_refs is 0, else -> return TEE_ERROR_BUSY
38  *
39  * mobj is activated using mobj_ffa_get_by_cookie() which unless the mobj
40  * is active already:
41  * - move the mobj into the active list
42  * - if not registered_by_cookie ->
43  *	set registered_by_cookie and increase inactive_refs
44  * - set mobj.refc.val to 1
45  * - increase inactive_refs
46  *
47  * A previously activated mobj is made ready for reclaim using
48  * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
49  * the inactive list and registered_by_cookie is set and then:
50  * - clears registered_by_cookie
51  * - decreases inactive_refs
52  *
53  * Each successful call to mobj_ffa_get_by_cookie() must be matched by a
54  * call to mobj_put(). If the mobj.refc.val reaches 0 it's
55  * - moved to the inactive list
56  * - inactive_refs is decreased
57  *
58  * SPMC at S-EL2/EL3 (CFG_CORE_SEL1_SPMC=n)
59  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
60  * mobj is activated/allocated using mobj_ffa_get_by_cookie() which if
61  * already active only is
62  * - increasing mobj.refc.val and inactive_refs
63  * if found in inactive list is
64  * - setting mobj.refc.val to 1
65  * - increasing inactive_refs
66  * - moved into active list
67  * if not found is created using thread_spmc_populate_mobj_from_rx() and
68  * then:
69  * - setting mobj.refc.val to 1
70  * - increasing inactive_refs
71  * - moved into active list
72  *
73  * A previously activated mobj is relinquished using
74  * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
75  * the inactive list and inactive_refs is 1
76  */
77 struct mobj_ffa {
78 	struct mobj mobj;
79 	SLIST_ENTRY(mobj_ffa) link;
80 	uint64_t cookie;
81 	tee_mm_entry_t *mm;
82 	struct refcount mapcount;
83 	unsigned int inactive_refs;
84 	uint16_t page_offset;
85 #ifdef CFG_CORE_SEL1_SPMC
86 	bool registered_by_cookie;
87 #endif
88 	paddr_t pages[];
89 };
90 
91 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
92 
93 #ifdef CFG_CORE_SEL1_SPMC
94 static bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
95 #endif
96 
97 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
98 static struct mobj_ffa_head shm_inactive_head =
99 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
100 
101 static unsigned int shm_lock = SPINLOCK_UNLOCK;
102 
103 static const struct mobj_ops mobj_ffa_ops;
104 
105 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
106 {
107 	assert(mobj->ops == &mobj_ffa_ops);
108 	return container_of(mobj, struct mobj_ffa, mobj);
109 }
110 
111 static size_t shm_size(size_t num_pages)
112 {
113 	size_t s = 0;
114 
115 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
116 		return 0;
117 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
118 		return 0;
119 	return s;
120 }
121 
122 static struct mobj_ffa *ffa_new(unsigned int num_pages)
123 {
124 	struct mobj_ffa *mf = NULL;
125 	size_t s = 0;
126 
127 	if (!num_pages)
128 		return NULL;
129 
130 	s = shm_size(num_pages);
131 	if (!s)
132 		return NULL;
133 	mf = calloc(1, s);
134 	if (!mf)
135 		return NULL;
136 
137 	mf->mobj.ops = &mobj_ffa_ops;
138 	mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
139 	mf->mobj.phys_granule = SMALL_PAGE_SIZE;
140 	refcount_set(&mf->mobj.refc, 0);
141 	mf->inactive_refs = 0;
142 
143 	return mf;
144 }
145 
146 #ifdef CFG_CORE_SEL1_SPMC
147 struct mobj_ffa *mobj_ffa_sel1_spmc_new(uint64_t cookie,
148 					unsigned int num_pages)
149 {
150 	struct mobj_ffa *mf = NULL;
151 	uint32_t exceptions = 0;
152 	int i = 0;
153 
154 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
155 		if (!(cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT))
156 			return NULL;
157 		if (virt_add_cookie_to_current_guest(cookie))
158 			return NULL;
159 	}
160 
161 	mf = ffa_new(num_pages);
162 	if (!mf) {
163 		if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
164 			virt_remove_cookie(cookie);
165 		return NULL;
166 	}
167 
168 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
169 		mf->cookie = cookie;
170 		return mf;
171 	}
172 
173 	exceptions = cpu_spin_lock_xsave(&shm_lock);
174 	bit_ffc(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
175 	if (i != -1) {
176 		bit_set(shm_bits, i);
177 		mf->cookie = i;
178 		mf->cookie |= FFA_MEMORY_HANDLE_NON_SECURE_BIT;
179 		/*
180 		 * Encode the partition ID into the handle so we know which
181 		 * partition to switch to when reclaiming a handle.
182 		 */
183 		mf->cookie |= SHIFT_U64(virt_get_current_guest_id(),
184 					FFA_MEMORY_HANDLE_PRTN_SHIFT);
185 	}
186 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
187 
188 	if (i == -1) {
189 		free(mf);
190 		return NULL;
191 	}
192 
193 	return mf;
194 }
195 #endif /*CFG_CORE_SEL1_SPMC*/
196 
197 static size_t get_page_count(struct mobj_ffa *mf)
198 {
199 	return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
200 }
201 
202 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
203 {
204 	return mf->cookie == cookie;
205 }
206 
207 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
208 {
209 	return mf == (void *)(vaddr_t)ptr;
210 }
211 
212 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
213 				      bool (*cmp_func)(struct mobj_ffa *mf,
214 						       uint64_t val),
215 				      uint64_t val)
216 {
217 	struct mobj_ffa *mf = SLIST_FIRST(head);
218 	struct mobj_ffa *p = NULL;
219 
220 	if (!mf)
221 		return NULL;
222 
223 	if (cmp_func(mf, val)) {
224 		SLIST_REMOVE_HEAD(head, link);
225 		return mf;
226 	}
227 
228 	while (true) {
229 		p = SLIST_NEXT(mf, link);
230 		if (!p)
231 			return NULL;
232 		if (cmp_func(p, val)) {
233 			SLIST_REMOVE_AFTER(mf, link);
234 			return p;
235 		}
236 		mf = p;
237 	}
238 }
239 
240 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
241 				     bool (*cmp_func)(struct mobj_ffa *mf,
242 						      uint64_t val),
243 				     uint64_t val)
244 {
245 	struct mobj_ffa *mf = NULL;
246 
247 	SLIST_FOREACH(mf, head, link)
248 		if (cmp_func(mf, val))
249 			return mf;
250 
251 	return NULL;
252 }
253 
254 #if defined(CFG_CORE_SEL1_SPMC)
255 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
256 {
257 
258 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) ||
259 	    !(mf->cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT)) {
260 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT;
261 		uint32_t exceptions = 0;
262 		int64_t i = 0;
263 
264 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
265 			mask |= SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
266 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
267 		i = mf->cookie & ~mask;
268 		assert(i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT);
269 
270 		exceptions = cpu_spin_lock_xsave(&shm_lock);
271 		assert(bit_test(shm_bits, i));
272 		bit_clear(shm_bits, i);
273 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
274 	}
275 
276 	assert(!mf->mm);
277 	free(mf);
278 }
279 #else /* !defined(CFG_CORE_SEL1_SPMC) */
280 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages)
281 {
282 	struct mobj_ffa *mf = NULL;
283 
284 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
285 	mf = ffa_new(num_pages);
286 	if (mf)
287 		mf->cookie = cookie;
288 	return mf;
289 }
290 
291 void mobj_ffa_spmc_delete(struct mobj_ffa *mf)
292 {
293 	free(mf);
294 }
295 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
296 
297 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
298 				 paddr_t pa, unsigned int num_pages)
299 {
300 	unsigned int n = 0;
301 	size_t tot_page_count = get_page_count(mf);
302 
303 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
304 		return TEE_ERROR_BAD_PARAMETERS;
305 
306 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
307 	    !core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
308 		return TEE_ERROR_BAD_PARAMETERS;
309 
310 	for (n = 0; n < num_pages; n++)
311 		mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
312 
313 	(*idx) += n;
314 	return TEE_SUCCESS;
315 }
316 
317 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
318 {
319 	return mf->cookie;
320 }
321 
322 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
323 {
324 	uint32_t exceptions = 0;
325 
326 	exceptions = cpu_spin_lock_xsave(&shm_lock);
327 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
328 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
329 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
330 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
331 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
332 
333 	return mf->cookie;
334 }
335 
336 static void unmap_helper(struct mobj_ffa *mf)
337 {
338 	if (mf->mm) {
339 		core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
340 				     get_page_count(mf));
341 		tee_mm_free(mf->mm);
342 		mf->mm = NULL;
343 	}
344 }
345 
346 #ifdef CFG_CORE_SEL1_SPMC
347 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
348 {
349 	TEE_Result res = TEE_SUCCESS;
350 	struct mobj_ffa *mf = NULL;
351 	uint32_t exceptions = 0;
352 
353 	exceptions = cpu_spin_lock_xsave(&shm_lock);
354 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
355 	/*
356 	 * If the mobj is found here it's still active and cannot be
357 	 * reclaimed.
358 	 */
359 	if (mf) {
360 		DMSG("cookie %#"PRIx64" busy refc %u",
361 		     cookie, refcount_val(&mf->mobj.refc));
362 		res = TEE_ERROR_BUSY;
363 		goto out;
364 	}
365 
366 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
367 	if (!mf) {
368 		res = TEE_ERROR_ITEM_NOT_FOUND;
369 		goto out;
370 	}
371 	/*
372 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
373 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
374 	 */
375 	if (mf->inactive_refs) {
376 		DMSG("cookie %#"PRIx64" busy inactive_refs %u",
377 		     cookie, mf->inactive_refs);
378 		res = TEE_ERROR_BUSY;
379 		goto out;
380 	}
381 
382 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
383 		panic();
384 	res = TEE_SUCCESS;
385 out:
386 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
387 	if (!res) {
388 		mobj_ffa_sel1_spmc_delete(mf);
389 		virt_remove_cookie(cookie);
390 	}
391 	return res;
392 }
393 #endif /*CFG_CORE_SEL1_SPMC*/
394 
395 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
396 {
397 	TEE_Result res = TEE_SUCCESS;
398 	struct mobj_ffa *mf = NULL;
399 	uint32_t exceptions = 0;
400 
401 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
402 	exceptions = cpu_spin_lock_xsave(&shm_lock);
403 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
404 	/*
405 	 * If the mobj is found here it's still active and cannot be
406 	 * unregistered.
407 	 */
408 	if (mf) {
409 		EMSG("cookie %#"PRIx64" busy refc %u:%u",
410 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
411 		res = TEE_ERROR_BUSY;
412 		goto out;
413 	}
414 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
415 	/*
416 	 * If the mobj isn't found or if it already has been unregistered.
417 	 */
418 	if (!mf) {
419 		EMSG("cookie %#"PRIx64" not found", cookie);
420 		res = TEE_ERROR_ITEM_NOT_FOUND;
421 		goto out;
422 	}
423 #if defined(CFG_CORE_SEL1_SPMC)
424 	if (!mf->registered_by_cookie) {
425 		/*
426 		 * This is expected behaviour if the normal world has
427 		 * registered the memory but OP-TEE has not yet used the
428 		 * corresponding cookie with mobj_ffa_get_by_cookie(). It
429 		 * can be non-trivial for the normal world to predict if
430 		 * the cookie really has been used or not. So even if we
431 		 * return it as an error it will be ignored by
432 		 * handle_unregister_shm().
433 		 */
434 		EMSG("cookie %#"PRIx64" not registered refs %u:%u",
435 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
436 		res = TEE_ERROR_ITEM_NOT_FOUND;
437 		goto out;
438 	}
439 	assert(mf->inactive_refs);
440 	mf->inactive_refs--;
441 	mf->registered_by_cookie = false;
442 #else
443 	if (mf->inactive_refs) {
444 		EMSG("cookie %#"PRIx64" busy refc %u:%u",
445 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
446 		res = TEE_ERROR_BUSY;
447 		goto out;
448 	}
449 	mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
450 	mobj_ffa_spmc_delete(mf);
451 	thread_spmc_relinquish(cookie);
452 #endif
453 	res = TEE_SUCCESS;
454 
455 out:
456 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
457 	return res;
458 }
459 
460 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
461 				    unsigned int internal_offs)
462 {
463 	struct mobj_ffa *mf = NULL;
464 	uint32_t exceptions = 0;
465 
466 	if (internal_offs >= SMALL_PAGE_SIZE)
467 		return NULL;
468 	exceptions = cpu_spin_lock_xsave(&shm_lock);
469 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
470 	if (mf) {
471 		if (mf->page_offset == internal_offs) {
472 			if (!refcount_inc(&mf->mobj.refc)) {
473 				/*
474 				 * If refcount is 0 some other thread has
475 				 * called mobj_put() on this reached 0 and
476 				 * before ffa_inactivate() got the lock we
477 				 * found it. Let's reinitialize it.
478 				 */
479 				refcount_set(&mf->mobj.refc, 1);
480 				mf->inactive_refs++;
481 			}
482 			DMSG("cookie %#"PRIx64" active: refc %u:%u",
483 			     cookie, refcount_val(&mf->mobj.refc),
484 			     mf->inactive_refs);
485 		} else {
486 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
487 			     cookie, mf->page_offset, internal_offs);
488 			mf = NULL;
489 		}
490 	} else {
491 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
492 #if !defined(CFG_CORE_SEL1_SPMC)
493 		/* Try to retrieve it from the SPM at S-EL2 */
494 		if (mf) {
495 			DMSG("cookie %#"PRIx64" resurrecting", cookie);
496 		} else {
497 			DMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
498 			     cookie);
499 			mf = thread_spmc_populate_mobj_from_rx(cookie);
500 		}
501 #endif
502 		if (mf) {
503 #if defined(CFG_CORE_SEL1_SPMC)
504 			if (!mf->registered_by_cookie) {
505 				mf->inactive_refs++;
506 				mf->registered_by_cookie = true;
507 			}
508 #endif
509 			assert(refcount_val(&mf->mobj.refc) == 0);
510 			refcount_set(&mf->mobj.refc, 1);
511 			refcount_set(&mf->mapcount, 0);
512 			mf->inactive_refs++;
513 
514 			/*
515 			 * mf->page_offset is offset into the first page.
516 			 * This offset is assigned from the internal_offs
517 			 * parameter to this function.
518 			 *
519 			 * While a mobj_ffa is active (ref_count > 0) this
520 			 * will not change, but when being pushed to the
521 			 * inactive list it can be changed again.
522 			 *
523 			 * So below we're backing out the old
524 			 * mf->page_offset and then assigning a new from
525 			 * internal_offset.
526 			 */
527 			mf->mobj.size += mf->page_offset;
528 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
529 			mf->mobj.size -= internal_offs;
530 			mf->page_offset = internal_offs;
531 
532 			SLIST_INSERT_HEAD(&shm_head, mf, link);
533 		}
534 	}
535 
536 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
537 
538 	if (!mf) {
539 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
540 		     cookie, internal_offs);
541 		return NULL;
542 	}
543 	return &mf->mobj;
544 }
545 
546 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
547 			     size_t granule, paddr_t *pa)
548 {
549 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
550 	size_t full_offset = 0;
551 	paddr_t p = 0;
552 
553 	if (!pa)
554 		return TEE_ERROR_GENERIC;
555 
556 	if (offset >= mobj->size)
557 		return TEE_ERROR_GENERIC;
558 
559 	full_offset = offset + mf->page_offset;
560 	switch (granule) {
561 	case 0:
562 		p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
563 		    (full_offset & SMALL_PAGE_MASK);
564 		break;
565 	case SMALL_PAGE_SIZE:
566 		p = mf->pages[full_offset / SMALL_PAGE_SIZE];
567 		break;
568 	default:
569 		return TEE_ERROR_GENERIC;
570 	}
571 	*pa = p;
572 
573 	return TEE_SUCCESS;
574 }
575 
576 static size_t ffa_get_phys_offs(struct mobj *mobj,
577 				size_t granule __maybe_unused)
578 {
579 	assert(granule >= mobj->phys_granule);
580 
581 	return to_mobj_ffa(mobj)->page_offset;
582 }
583 
584 static void *ffa_get_va(struct mobj *mobj, size_t offset, size_t len)
585 {
586 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
587 
588 	if (!mf->mm || !mobj_check_offset_and_len(mobj, offset, len))
589 		return NULL;
590 
591 	return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
592 }
593 
594 static void ffa_inactivate(struct mobj *mobj)
595 {
596 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
597 	uint32_t exceptions = 0;
598 
599 	exceptions = cpu_spin_lock_xsave(&shm_lock);
600 	/*
601 	 * If refcount isn't 0 some other thread has found this mobj in
602 	 * shm_head after the mobj_put() that put us here and before we got
603 	 * the lock.
604 	 */
605 	if (refcount_val(&mobj->refc)) {
606 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
607 		goto out;
608 	}
609 
610 	/*
611 	 * pop_from_list() can fail to find the mobj if we had just
612 	 * decreased the refcount to 0 in mobj_put() and was going to
613 	 * acquire the shm_lock but another thread found this mobj and
614 	 * reinitialized the refcount to 1. Then before we got cpu time the
615 	 * other thread called mobj_put() and deactivated the mobj again.
616 	 *
617 	 * However, we still have the inactive count that guarantees
618 	 * that the mobj can't be freed until it reaches 0.
619 	 * At this point the mobj is in the inactive list.
620 	 */
621 	if (pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf)) {
622 		unmap_helper(mf);
623 		SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
624 	}
625 out:
626 	if (!mf->inactive_refs)
627 		panic();
628 	mf->inactive_refs--;
629 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
630 }
631 
632 static TEE_Result ffa_get_mem_type(struct mobj *mobj __unused, uint32_t *mt)
633 {
634 	if (!mt)
635 		return TEE_ERROR_GENERIC;
636 
637 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
638 
639 	return TEE_SUCCESS;
640 }
641 
642 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
643 {
644 	assert(mobj->ops == &mobj_ffa_ops);
645 
646 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
647 }
648 
649 static uint64_t ffa_get_cookie(struct mobj *mobj)
650 {
651 	return to_mobj_ffa(mobj)->cookie;
652 }
653 
654 static TEE_Result ffa_inc_map(struct mobj *mobj)
655 {
656 	TEE_Result res = TEE_SUCCESS;
657 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
658 	uint32_t exceptions = 0;
659 	size_t sz = 0;
660 
661 	while (true) {
662 		if (refcount_inc(&mf->mapcount))
663 			return TEE_SUCCESS;
664 
665 		exceptions = cpu_spin_lock_xsave(&shm_lock);
666 
667 		if (!refcount_val(&mf->mapcount))
668 			break; /* continue to reinitialize */
669 		/*
670 		 * If another thread beat us to initialize mapcount,
671 		 * restart to make sure we still increase it.
672 		 */
673 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
674 	}
675 
676 	/*
677 	 * If we have beated another thread calling ffa_dec_map()
678 	 * to get the lock we need only to reinitialize mapcount to 1.
679 	 */
680 	if (!mf->mm) {
681 		sz = ROUNDUP(mobj->size + mf->page_offset, SMALL_PAGE_SIZE);
682 		mf->mm = tee_mm_alloc(&tee_mm_shm, sz);
683 		if (!mf->mm) {
684 			res = TEE_ERROR_OUT_OF_MEMORY;
685 			goto out;
686 		}
687 
688 		res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
689 					 sz / SMALL_PAGE_SIZE,
690 					 MEM_AREA_NSEC_SHM);
691 		if (res) {
692 			tee_mm_free(mf->mm);
693 			mf->mm = NULL;
694 			goto out;
695 		}
696 	}
697 
698 	refcount_set(&mf->mapcount, 1);
699 out:
700 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
701 
702 	return res;
703 }
704 
705 static TEE_Result ffa_dec_map(struct mobj *mobj)
706 {
707 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
708 	uint32_t exceptions = 0;
709 
710 	if (!refcount_dec(&mf->mapcount))
711 		return TEE_SUCCESS;
712 
713 	exceptions = cpu_spin_lock_xsave(&shm_lock);
714 	if (!refcount_val(&mf->mapcount))
715 		unmap_helper(mf);
716 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
717 
718 	return TEE_SUCCESS;
719 }
720 
721 static TEE_Result mapped_shm_init(void)
722 {
723 	vaddr_t pool_start = 0;
724 	vaddr_t pool_end = 0;
725 
726 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
727 	if (!pool_start || !pool_end)
728 		panic("Can't find region for shmem pool");
729 
730 	if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end - pool_start,
731 			 SMALL_PAGE_SHIFT,
732 			 TEE_MM_POOL_NO_FLAGS))
733 		panic("Could not create shmem pool");
734 
735 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
736 	     pool_start, pool_end);
737 	return TEE_SUCCESS;
738 }
739 
740 static const struct mobj_ops mobj_ffa_ops = {
741 	.get_pa = ffa_get_pa,
742 	.get_phys_offs = ffa_get_phys_offs,
743 	.get_va = ffa_get_va,
744 	.get_mem_type = ffa_get_mem_type,
745 	.matches = ffa_matches,
746 	.free = ffa_inactivate,
747 	.get_cookie = ffa_get_cookie,
748 	.inc_map = ffa_inc_map,
749 	.dec_map = ffa_dec_map,
750 };
751 
752 preinit(mapped_shm_init);
753