xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision 46195e2faa80aaa6a099e081803658bb12db2a6b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <config.h>
9 #include <ffa.h>
10 #include <initcall.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/thread_spmc.h>
14 #include <kernel/virtualization.h>
15 #include <mm/mobj.h>
16 #include <sys/queue.h>
17 
18 /*
19  * Life cycle of struct mobj_ffa
20  *
21  * SPMC at S-EL1 (CFG_CORE_SEL1_SPMC=y)
22  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23  * During FFA_MEM_SHARE allocated in mobj_ffa_sel1_spmc_new() and finally
24  * added to the inactive list at the end of add_mem_share() once
25  * successfully filled in.
26  *	registered_by_cookie = false
27  *	mobj.refs.val = 0
28  *	inactive_refs = 0
29  *
30  * During FFA_MEM_RECLAIM reclaimed/freed using
31  * mobj_ffa_sel1_spmc_reclaim().  This will always succeed if the normal
32  * world is only calling this when all other threads are done with the
33  * shared memory object. However, there are some conditions that must be
34  * met to make sure that this is the case:
35  *	mobj not in the active list, else -> return TEE_ERROR_BUSY
36  *	mobj not in inactive list, else -> return TEE_ERROR_ITEM_NOT_FOUND
37  *	mobj inactive_refs is 0, else -> return TEE_ERROR_BUSY
38  *
39  * mobj is activated using mobj_ffa_get_by_cookie() which unless the mobj
40  * is active already:
41  * - move the mobj into the active list
42  * - if not registered_by_cookie ->
43  *	set registered_by_cookie and increase inactive_refs
44  * - set mobj.refc.val to 1
45  * - increase inactive_refs
46  *
47  * A previously activated mobj is made ready for reclaim using
48  * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
49  * the inactive list and registered_by_cookie is set and then:
50  * - clears registered_by_cookie
51  * - decreases inactive_refs
52  *
53  * Each successful call to mobj_ffa_get_by_cookie() must be matched by a
54  * call to mobj_put(). If the mobj.refc.val reaches 0 it's
55  * - moved to the inactive list
56  * - inactive_refs is decreased
57  *
58  * SPMC at S-EL2/EL3 (CFG_CORE_SEL1_SPMC=n)
59  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
60  * mobj is activated/allocated using mobj_ffa_get_by_cookie() which if
61  * already active only is
62  * - increasing mobj.refc.val and inactive_refs
63  * if found in inactive list is
64  * - setting mobj.refc.val to 1
65  * - increasing inactive_refs
66  * - moved into active list
67  * if not found is created using thread_spmc_populate_mobj_from_rx() and
68  * then:
69  * - setting mobj.refc.val to 1
70  * - increasing inactive_refs
71  * - moved into active list
72  *
73  * A previously activated mobj is relinquished using
74  * mobj_ffa_unregister_by_cookie() which only succeeds if the mobj is in
75  * the inactive list and inactive_refs is 1
76  */
77 struct mobj_ffa {
78 	struct mobj mobj;
79 	SLIST_ENTRY(mobj_ffa) link;
80 	uint64_t cookie;
81 	unsigned int inactive_refs;
82 #ifdef CFG_CORE_SEL1_SPMC
83 	bool registered_by_cookie;
84 #endif
85 };
86 
87 struct mobj_ffa_shm {
88 	struct mobj_ffa mf;
89 	tee_mm_entry_t *mm;
90 	struct refcount mapcount;
91 	uint16_t page_offset;
92 	paddr_t pages[];
93 };
94 
95 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
96 
97 #ifdef CFG_CORE_SEL1_SPMC
98 #ifdef CFG_NS_VIRTUALIZATION
99 static bitstr_t *get_shm_bits(void)
100 {
101 	return virt_get_shm_bits();
102 }
103 #else
104 static bitstr_t bit_decl(__shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
105 
106 static bitstr_t *get_shm_bits(void)
107 {
108 	return __shm_bits;
109 }
110 #endif
111 #endif
112 
113 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
114 static struct mobj_ffa_head shm_inactive_head =
115 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
116 
117 static unsigned int shm_lock = SPINLOCK_UNLOCK;
118 
119 static const struct mobj_ops mobj_ffa_shm_ops;
120 
121 static bool __maybe_unused is_mobj_ffa_shm(struct mobj *mobj)
122 {
123 	return mobj->ops == &mobj_ffa_shm_ops;
124 }
125 
126 static struct mobj_ffa_shm *to_mobj_ffa_shm(struct mobj *mobj)
127 {
128 	assert(is_mobj_ffa_shm(mobj));
129 	return container_of(mobj, struct mobj_ffa_shm, mf.mobj);
130 }
131 
132 static size_t shm_size(size_t num_pages)
133 {
134 	size_t s = 0;
135 
136 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
137 		return 0;
138 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa_shm), s, &s))
139 		return 0;
140 	return s;
141 }
142 
143 static struct mobj_ffa_shm *ffa_shm_new(unsigned int num_pages)
144 {
145 	struct mobj_ffa_shm *m = NULL;
146 	size_t s = 0;
147 
148 	if (!num_pages)
149 		return NULL;
150 
151 	s = shm_size(num_pages);
152 	if (!s)
153 		return NULL;
154 	m = calloc(1, s);
155 	if (!m)
156 		return NULL;
157 
158 	m->mf.mobj.ops = &mobj_ffa_shm_ops;
159 	m->mf.mobj.size = num_pages * SMALL_PAGE_SIZE;
160 	m->mf.mobj.phys_granule = SMALL_PAGE_SIZE;
161 	refcount_set(&m->mf.mobj.refc, 0);
162 	m->mf.inactive_refs = 0;
163 
164 	return m;
165 }
166 
167 #ifdef CFG_CORE_SEL1_SPMC
168 struct mobj_ffa *mobj_ffa_sel1_spmc_new(uint64_t cookie,
169 					unsigned int num_pages)
170 {
171 	struct mobj_ffa_shm *m = NULL;
172 	bitstr_t *shm_bits = NULL;
173 	uint32_t exceptions = 0;
174 	int i = 0;
175 
176 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
177 		if (!(cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT))
178 			return NULL;
179 		if (virt_add_cookie_to_current_guest(cookie))
180 			return NULL;
181 	}
182 
183 	m = ffa_shm_new(num_pages);
184 	if (!m) {
185 		if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
186 			virt_remove_cookie(cookie);
187 		return NULL;
188 	}
189 
190 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
191 		m->mf.cookie = cookie;
192 		return &m->mf;
193 	}
194 
195 	shm_bits = get_shm_bits();
196 	exceptions = cpu_spin_lock_xsave(&shm_lock);
197 	bit_ffc(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
198 	if (i != -1) {
199 		bit_set(shm_bits, i);
200 		m->mf.cookie = i;
201 		m->mf.cookie |= FFA_MEMORY_HANDLE_NON_SECURE_BIT;
202 		/*
203 		 * Encode the partition ID into the handle so we know which
204 		 * partition to switch to when reclaiming a handle.
205 		 */
206 		m->mf.cookie |= SHIFT_U64(virt_get_current_guest_id(),
207 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
208 	}
209 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
210 
211 	if (i == -1) {
212 		free(m);
213 		return NULL;
214 	}
215 
216 	return &m->mf;
217 }
218 #endif /*CFG_CORE_SEL1_SPMC*/
219 
220 static size_t get_page_count(struct mobj_ffa *mf)
221 {
222 	return ROUNDUP_DIV(mf->mobj.size, SMALL_PAGE_SIZE);
223 }
224 
225 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
226 {
227 	return mf->cookie == cookie;
228 }
229 
230 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
231 {
232 	return mf == (void *)(vaddr_t)ptr;
233 }
234 
235 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
236 				      bool (*cmp_func)(struct mobj_ffa *mf,
237 						       uint64_t val),
238 				      uint64_t val)
239 {
240 	struct mobj_ffa *mf = SLIST_FIRST(head);
241 	struct mobj_ffa *p = NULL;
242 
243 	if (!mf)
244 		return NULL;
245 
246 	if (cmp_func(mf, val)) {
247 		SLIST_REMOVE_HEAD(head, link);
248 		return mf;
249 	}
250 
251 	while (true) {
252 		p = SLIST_NEXT(mf, link);
253 		if (!p)
254 			return NULL;
255 		if (cmp_func(p, val)) {
256 			SLIST_REMOVE_AFTER(mf, link);
257 			return p;
258 		}
259 		mf = p;
260 	}
261 }
262 
263 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
264 				     bool (*cmp_func)(struct mobj_ffa *mf,
265 						      uint64_t val),
266 				     uint64_t val)
267 {
268 	struct mobj_ffa *mf = NULL;
269 
270 	SLIST_FOREACH(mf, head, link)
271 		if (cmp_func(mf, val))
272 			return mf;
273 
274 	return NULL;
275 }
276 
277 #if defined(CFG_CORE_SEL1_SPMC)
278 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
279 {
280 	struct mobj_ffa_shm *m = NULL;
281 
282 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) ||
283 	    !(mf->cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT)) {
284 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT;
285 		bitstr_t *shm_bits = get_shm_bits();
286 		uint32_t exceptions = 0;
287 		int64_t i = 0;
288 
289 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
290 			mask |= SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
291 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
292 		i = mf->cookie & ~mask;
293 		assert(i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT);
294 
295 		exceptions = cpu_spin_lock_xsave(&shm_lock);
296 		assert(bit_test(shm_bits, i));
297 		bit_clear(shm_bits, i);
298 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
299 	}
300 
301 	m = to_mobj_ffa_shm(&mf->mobj);
302 	assert(!m->mm);
303 	free(m);
304 }
305 #else /* !defined(CFG_CORE_SEL1_SPMC) */
306 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages)
307 {
308 	struct mobj_ffa_shm *m = NULL;
309 
310 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
311 	m = ffa_shm_new(num_pages);
312 	if (m)
313 		m->mf.cookie = cookie;
314 	return &m->mf;
315 }
316 
317 void mobj_ffa_spmc_delete(struct mobj_ffa *mf)
318 {
319 	free(to_mobj_ffa_shm(&mf->mobj));
320 }
321 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
322 
323 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
324 				 paddr_t pa, unsigned int num_pages)
325 {
326 	struct mobj_ffa_shm *mfs = to_mobj_ffa_shm(&mf->mobj);
327 	size_t tot_page_count = get_page_count(mf);
328 	unsigned int n = 0;
329 
330 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
331 		return TEE_ERROR_BAD_PARAMETERS;
332 
333 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
334 	    !core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
335 		return TEE_ERROR_BAD_PARAMETERS;
336 
337 	for (n = 0; n < num_pages; n++)
338 		mfs->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
339 
340 	(*idx) += n;
341 	return TEE_SUCCESS;
342 }
343 
344 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
345 {
346 	return mf->cookie;
347 }
348 
349 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
350 {
351 	uint32_t exceptions = 0;
352 
353 	exceptions = cpu_spin_lock_xsave(&shm_lock);
354 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
355 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
356 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
357 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
358 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
359 
360 	return mf->cookie;
361 }
362 
363 static void unmap_helper(struct mobj_ffa_shm *m)
364 {
365 	if (m->mm) {
366 		core_mmu_unmap_pages(tee_mm_get_smem(m->mm),
367 				     get_page_count(&m->mf));
368 		tee_mm_free(m->mm);
369 		m->mm = NULL;
370 	}
371 }
372 
373 #ifdef CFG_CORE_SEL1_SPMC
374 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
375 {
376 	TEE_Result res = TEE_SUCCESS;
377 	struct mobj_ffa *mf = NULL;
378 	uint32_t exceptions = 0;
379 
380 	exceptions = cpu_spin_lock_xsave(&shm_lock);
381 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
382 	/*
383 	 * If the mobj is found here it's still active and cannot be
384 	 * reclaimed.
385 	 */
386 	if (mf) {
387 		DMSG("cookie %#"PRIx64" busy refc %u",
388 		     cookie, refcount_val(&mf->mobj.refc));
389 		res = TEE_ERROR_BUSY;
390 		goto out;
391 	}
392 
393 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
394 	if (!mf) {
395 		res = TEE_ERROR_ITEM_NOT_FOUND;
396 		goto out;
397 	}
398 	/*
399 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
400 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
401 	 */
402 	if (mf->inactive_refs) {
403 		DMSG("cookie %#"PRIx64" busy inactive_refs %u",
404 		     cookie, mf->inactive_refs);
405 		res = TEE_ERROR_BUSY;
406 		goto out;
407 	}
408 
409 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
410 		panic();
411 	res = TEE_SUCCESS;
412 out:
413 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
414 	if (!res) {
415 		mobj_ffa_sel1_spmc_delete(mf);
416 		virt_remove_cookie(cookie);
417 	}
418 	return res;
419 }
420 #endif /*CFG_CORE_SEL1_SPMC*/
421 
422 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
423 {
424 	TEE_Result res = TEE_SUCCESS;
425 	struct mobj_ffa *mf = NULL;
426 	uint32_t exceptions = 0;
427 
428 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
429 	exceptions = cpu_spin_lock_xsave(&shm_lock);
430 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
431 	/*
432 	 * If the mobj is found here it's still active and cannot be
433 	 * unregistered.
434 	 */
435 	if (mf) {
436 		EMSG("cookie %#"PRIx64" busy refc %u:%u",
437 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
438 		res = TEE_ERROR_BUSY;
439 		goto out;
440 	}
441 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
442 	/*
443 	 * If the mobj isn't found or if it already has been unregistered.
444 	 */
445 	if (!mf) {
446 		EMSG("cookie %#"PRIx64" not found", cookie);
447 		res = TEE_ERROR_ITEM_NOT_FOUND;
448 		goto out;
449 	}
450 #if defined(CFG_CORE_SEL1_SPMC)
451 	if (!mf->registered_by_cookie) {
452 		/*
453 		 * This is expected behaviour if the normal world has
454 		 * registered the memory but OP-TEE has not yet used the
455 		 * corresponding cookie with mobj_ffa_get_by_cookie(). It
456 		 * can be non-trivial for the normal world to predict if
457 		 * the cookie really has been used or not. So even if we
458 		 * return it as an error it will be ignored by
459 		 * handle_unregister_shm().
460 		 */
461 		EMSG("cookie %#"PRIx64" not registered refs %u:%u",
462 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
463 		res = TEE_ERROR_ITEM_NOT_FOUND;
464 		goto out;
465 	}
466 	assert(mf->inactive_refs);
467 	mf->inactive_refs--;
468 	mf->registered_by_cookie = false;
469 #else
470 	if (mf->inactive_refs) {
471 		EMSG("cookie %#"PRIx64" busy refc %u:%u",
472 		     cookie, refcount_val(&mf->mobj.refc), mf->inactive_refs);
473 		res = TEE_ERROR_BUSY;
474 		goto out;
475 	}
476 	mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
477 	mobj_ffa_spmc_delete(mf);
478 	thread_spmc_relinquish(cookie);
479 #endif
480 	res = TEE_SUCCESS;
481 
482 out:
483 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
484 	return res;
485 }
486 
487 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
488 				    unsigned int internal_offs)
489 {
490 	struct mobj_ffa_shm *mfs = NULL;
491 	struct mobj_ffa *mf = NULL;
492 	uint32_t exceptions = 0;
493 	uint16_t offs = 0;
494 
495 	if (internal_offs >= SMALL_PAGE_SIZE)
496 		return NULL;
497 	exceptions = cpu_spin_lock_xsave(&shm_lock);
498 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
499 	if (mf) {
500 		mfs = to_mobj_ffa_shm(&mf->mobj);
501 		offs = mfs->page_offset;
502 		if (offs == internal_offs) {
503 			if (!refcount_inc(&mf->mobj.refc)) {
504 				/*
505 				 * If refcount is 0 some other thread has
506 				 * called mobj_put() on this reached 0 and
507 				 * before ffa_shm_inactivate() got the lock
508 				 * we found it. Let's reinitialize it.
509 				 */
510 				refcount_set(&mf->mobj.refc, 1);
511 				mf->inactive_refs++;
512 			}
513 			DMSG("cookie %#"PRIx64" active: refc %u:%u",
514 			     cookie, refcount_val(&mf->mobj.refc),
515 			     mf->inactive_refs);
516 		} else {
517 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
518 			     cookie, offs, internal_offs);
519 			mf = NULL;
520 		}
521 	} else {
522 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
523 #if !defined(CFG_CORE_SEL1_SPMC)
524 		/* Try to retrieve it from the SPM at S-EL2 */
525 		if (mf) {
526 			DMSG("cookie %#"PRIx64" resurrecting", cookie);
527 		} else {
528 			DMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
529 			     cookie);
530 			mf = thread_spmc_populate_mobj_from_rx(cookie);
531 		}
532 #endif
533 		if (mf) {
534 #if defined(CFG_CORE_SEL1_SPMC)
535 			if (!mf->registered_by_cookie) {
536 				mf->inactive_refs++;
537 				mf->registered_by_cookie = true;
538 			}
539 #endif
540 			assert(refcount_val(&mf->mobj.refc) == 0);
541 			refcount_set(&mf->mobj.refc, 1);
542 			mf->inactive_refs++;
543 			mfs = to_mobj_ffa_shm(&mf->mobj);
544 			refcount_set(&mfs->mapcount, 0);
545 
546 			/*
547 			 * mf->page_offset is offset into the first page.
548 			 * This offset is assigned from the internal_offs
549 			 * parameter to this function.
550 			 *
551 			 * While a mobj_ffa is active (ref_count > 0) this
552 			 * will not change, but when being pushed to the
553 			 * inactive list it can be changed again.
554 			 *
555 			 * So below we're backing out the old
556 			 * mf->page_offset and then assigning a new from
557 			 * internal_offset.
558 			 */
559 			mf->mobj.size += mfs->page_offset;
560 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
561 			mf->mobj.size -= internal_offs;
562 			mfs->page_offset = internal_offs;
563 
564 			SLIST_INSERT_HEAD(&shm_head, mf, link);
565 		}
566 	}
567 
568 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
569 
570 	if (!mf) {
571 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
572 		     cookie, internal_offs);
573 		return NULL;
574 	}
575 	return &mf->mobj;
576 }
577 
578 static TEE_Result ffa_shm_get_pa(struct mobj *mobj, size_t offset,
579 				 size_t granule, paddr_t *pa)
580 {
581 	struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
582 	size_t full_offset = 0;
583 	paddr_t p = 0;
584 
585 	if (!pa)
586 		return TEE_ERROR_GENERIC;
587 
588 	if (offset >= mobj->size)
589 		return TEE_ERROR_GENERIC;
590 
591 	full_offset = offset + m->page_offset;
592 	switch (granule) {
593 	case 0:
594 		p = m->pages[full_offset / SMALL_PAGE_SIZE] +
595 		    (full_offset & SMALL_PAGE_MASK);
596 		break;
597 	case SMALL_PAGE_SIZE:
598 		p = m->pages[full_offset / SMALL_PAGE_SIZE];
599 		break;
600 	default:
601 		return TEE_ERROR_GENERIC;
602 	}
603 	*pa = p;
604 
605 	return TEE_SUCCESS;
606 }
607 
608 static size_t ffa_shm_get_phys_offs(struct mobj *mobj,
609 				    size_t granule __maybe_unused)
610 {
611 	assert(granule >= mobj->phys_granule);
612 
613 	return to_mobj_ffa_shm(mobj)->page_offset;
614 }
615 
616 static void *ffa_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
617 {
618 	struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
619 
620 	if (!m->mm || !mobj_check_offset_and_len(mobj, offset, len))
621 		return NULL;
622 
623 	return (void *)(tee_mm_get_smem(m->mm) + offset + m->page_offset);
624 }
625 
626 static void ffa_shm_inactivate(struct mobj *mobj)
627 {
628 	struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
629 	uint32_t exceptions = 0;
630 
631 	exceptions = cpu_spin_lock_xsave(&shm_lock);
632 	/*
633 	 * If refcount isn't 0 some other thread has found this mobj in
634 	 * shm_head after the mobj_put() that put us here and before we got
635 	 * the lock.
636 	 */
637 	if (refcount_val(&mobj->refc)) {
638 		DMSG("cookie %#"PRIx64" was resurrected", m->mf.cookie);
639 		goto out;
640 	}
641 
642 	/*
643 	 * pop_from_list() can fail to find the mobj if we had just
644 	 * decreased the refcount to 0 in mobj_put() and was going to
645 	 * acquire the shm_lock but another thread found this mobj and
646 	 * reinitialized the refcount to 1. Then before we got cpu time the
647 	 * other thread called mobj_put() and deactivated the mobj again.
648 	 *
649 	 * However, we still have the inactive count that guarantees
650 	 * that the mobj can't be freed until it reaches 0.
651 	 * At this point the mobj is in the inactive list.
652 	 */
653 	if (pop_from_list(&shm_head, cmp_ptr, (vaddr_t)&m->mf)) {
654 		unmap_helper(m);
655 		SLIST_INSERT_HEAD(&shm_inactive_head, &m->mf, link);
656 	}
657 out:
658 	if (!m->mf.inactive_refs)
659 		panic();
660 	m->mf.inactive_refs--;
661 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
662 }
663 
664 static TEE_Result ffa_shm_get_mem_type(struct mobj *mobj __unused, uint32_t *mt)
665 {
666 	if (!mt)
667 		return TEE_ERROR_GENERIC;
668 
669 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
670 
671 	return TEE_SUCCESS;
672 }
673 
674 static bool ffa_shm_matches(struct mobj *mobj __maybe_unused,
675 			    enum buf_is_attr attr)
676 {
677 	assert(is_mobj_ffa_shm(mobj));
678 
679 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
680 }
681 
682 static uint64_t ffa_shm_get_cookie(struct mobj *mobj)
683 {
684 	return to_mobj_ffa_shm(mobj)->mf.cookie;
685 }
686 
687 static TEE_Result ffa_shm_inc_map(struct mobj *mobj)
688 {
689 	struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
690 	TEE_Result res = TEE_SUCCESS;
691 	uint32_t exceptions = 0;
692 	size_t sz = 0;
693 
694 	while (true) {
695 		if (refcount_inc(&m->mapcount))
696 			return TEE_SUCCESS;
697 
698 		exceptions = cpu_spin_lock_xsave(&shm_lock);
699 
700 		if (!refcount_val(&m->mapcount))
701 			break; /* continue to reinitialize */
702 		/*
703 		 * If another thread beat us to initialize mapcount,
704 		 * restart to make sure we still increase it.
705 		 */
706 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
707 	}
708 
709 	/*
710 	 * If we have beated another thread calling ffa_dec_map()
711 	 * to get the lock we need only to reinitialize mapcount to 1.
712 	 */
713 	if (!m->mm) {
714 		sz = ROUNDUP(mobj->size + m->page_offset, SMALL_PAGE_SIZE);
715 		m->mm = tee_mm_alloc(&core_virt_shm_pool, sz);
716 		if (!m->mm) {
717 			res = TEE_ERROR_OUT_OF_MEMORY;
718 			goto out;
719 		}
720 
721 		res = core_mmu_map_pages(tee_mm_get_smem(m->mm), m->pages,
722 					 sz / SMALL_PAGE_SIZE,
723 					 MEM_AREA_NSEC_SHM);
724 		if (res) {
725 			tee_mm_free(m->mm);
726 			m->mm = NULL;
727 			goto out;
728 		}
729 	}
730 
731 	refcount_set(&m->mapcount, 1);
732 out:
733 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
734 
735 	return res;
736 }
737 
738 static TEE_Result ffa_shm_dec_map(struct mobj *mobj)
739 {
740 	struct mobj_ffa_shm *m = to_mobj_ffa_shm(mobj);
741 	uint32_t exceptions = 0;
742 
743 	if (!refcount_dec(&m->mapcount))
744 		return TEE_SUCCESS;
745 
746 	exceptions = cpu_spin_lock_xsave(&shm_lock);
747 	if (!refcount_val(&m->mapcount))
748 		unmap_helper(m);
749 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
750 
751 	return TEE_SUCCESS;
752 }
753 
754 static TEE_Result mapped_shm_init(void)
755 {
756 	vaddr_t pool_start = 0;
757 	vaddr_t pool_end = 0;
758 
759 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
760 	if (!pool_start || !pool_end)
761 		panic("Can't find region for shmem pool");
762 
763 	if (!tee_mm_init(&core_virt_shm_pool, pool_start, pool_end - pool_start,
764 			 SMALL_PAGE_SHIFT,
765 			 TEE_MM_POOL_NO_FLAGS))
766 		panic("Could not create shmem pool");
767 
768 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
769 	     pool_start, pool_end);
770 	return TEE_SUCCESS;
771 }
772 
773 static const struct mobj_ops mobj_ffa_shm_ops = {
774 	.get_pa = ffa_shm_get_pa,
775 	.get_phys_offs = ffa_shm_get_phys_offs,
776 	.get_va = ffa_shm_get_va,
777 	.get_mem_type = ffa_shm_get_mem_type,
778 	.matches = ffa_shm_matches,
779 	.free = ffa_shm_inactivate,
780 	.get_cookie = ffa_shm_get_cookie,
781 	.inc_map = ffa_shm_inc_map,
782 	.dec_map = ffa_shm_dec_map,
783 };
784 
785 preinit(mapped_shm_init);
786