xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision ba2a6adb764f1310ad3c3091d89de84274f86b02)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <config.h>
9 #include <ffa.h>
10 #include <initcall.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/thread_spmc.h>
14 #include <kernel/virtualization.h>
15 #include <mm/mobj.h>
16 #include <sys/queue.h>
17 
18 struct mobj_ffa {
19 	struct mobj mobj;
20 	SLIST_ENTRY(mobj_ffa) link;
21 	uint64_t cookie;
22 	tee_mm_entry_t *mm;
23 	struct refcount mapcount;
24 	uint16_t page_offset;
25 #ifdef CFG_CORE_SEL1_SPMC
26 	bool registered_by_cookie;
27 	bool unregistered_by_cookie;
28 #endif
29 	paddr_t pages[];
30 };
31 
32 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
33 
34 #ifdef CFG_CORE_SEL1_SPMC
35 #define NUM_SHMS	64
36 static bitstr_t bit_decl(shm_bits, NUM_SHMS);
37 #endif
38 
39 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
40 static struct mobj_ffa_head shm_inactive_head =
41 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
42 
43 static unsigned int shm_lock = SPINLOCK_UNLOCK;
44 
45 static const struct mobj_ops mobj_ffa_ops;
46 
47 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
48 {
49 	assert(mobj->ops == &mobj_ffa_ops);
50 	return container_of(mobj, struct mobj_ffa, mobj);
51 }
52 
53 static size_t shm_size(size_t num_pages)
54 {
55 	size_t s = 0;
56 
57 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
58 		return 0;
59 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
60 		return 0;
61 	return s;
62 }
63 
64 static struct mobj_ffa *ffa_new(unsigned int num_pages)
65 {
66 	struct mobj_ffa *mf = NULL;
67 	size_t s = 0;
68 
69 	if (!num_pages)
70 		return NULL;
71 
72 	s = shm_size(num_pages);
73 	if (!s)
74 		return NULL;
75 	mf = calloc(1, s);
76 	if (!mf)
77 		return NULL;
78 
79 	mf->mobj.ops = &mobj_ffa_ops;
80 	mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
81 	mf->mobj.phys_granule = SMALL_PAGE_SIZE;
82 	refcount_set(&mf->mobj.refc, 0);
83 
84 	return mf;
85 }
86 
87 #ifdef CFG_CORE_SEL1_SPMC
88 struct mobj_ffa *mobj_ffa_sel1_spmc_new(uint64_t cookie,
89 					unsigned int num_pages)
90 {
91 	struct mobj_ffa *mf = NULL;
92 	uint32_t exceptions = 0;
93 	int i = 0;
94 
95 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
96 		if (!(cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT))
97 			return NULL;
98 		if (virt_add_cookie_to_current_guest(cookie))
99 			return NULL;
100 	}
101 
102 	mf = ffa_new(num_pages);
103 	if (!mf) {
104 		if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
105 			virt_remove_cookie(cookie);
106 		return NULL;
107 	}
108 
109 	if (cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) {
110 		mf->cookie = cookie;
111 		return mf;
112 	}
113 
114 	exceptions = cpu_spin_lock_xsave(&shm_lock);
115 	bit_ffc(shm_bits, NUM_SHMS, &i);
116 	if (i != -1) {
117 		bit_set(shm_bits, i);
118 		mf->cookie = i;
119 		mf->cookie |= FFA_MEMORY_HANDLE_NON_SECURE_BIT;
120 		/*
121 		 * Encode the partition ID into the handle so we know which
122 		 * partition to switch to when reclaiming a handle.
123 		 */
124 		mf->cookie |= SHIFT_U64(virt_get_current_guest_id(),
125 					FFA_MEMORY_HANDLE_PRTN_SHIFT);
126 	}
127 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
128 
129 	if (i == -1) {
130 		free(mf);
131 		return NULL;
132 	}
133 
134 	return mf;
135 }
136 #endif /*CFG_CORE_SEL1_SPMC*/
137 
138 static size_t get_page_count(struct mobj_ffa *mf)
139 {
140 	return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
141 }
142 
143 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
144 {
145 	return mf->cookie == cookie;
146 }
147 
148 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
149 {
150 	return mf == (void *)(vaddr_t)ptr;
151 }
152 
153 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
154 				      bool (*cmp_func)(struct mobj_ffa *mf,
155 						       uint64_t val),
156 				      uint64_t val)
157 {
158 	struct mobj_ffa *mf = SLIST_FIRST(head);
159 	struct mobj_ffa *p = NULL;
160 
161 	if (!mf)
162 		return NULL;
163 
164 	if (cmp_func(mf, val)) {
165 		SLIST_REMOVE_HEAD(head, link);
166 		return mf;
167 	}
168 
169 	while (true) {
170 		p = SLIST_NEXT(mf, link);
171 		if (!p)
172 			return NULL;
173 		if (cmp_func(p, val)) {
174 			SLIST_REMOVE_AFTER(mf, link);
175 			return p;
176 		}
177 		mf = p;
178 	}
179 }
180 
181 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
182 				     bool (*cmp_func)(struct mobj_ffa *mf,
183 						      uint64_t val),
184 				     uint64_t val)
185 {
186 	struct mobj_ffa *mf = NULL;
187 
188 	SLIST_FOREACH(mf, head, link)
189 		if (cmp_func(mf, val))
190 			return mf;
191 
192 	return NULL;
193 }
194 
195 #if defined(CFG_CORE_SEL1_SPMC)
196 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
197 {
198 
199 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) ||
200 	    !(mf->cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT)) {
201 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT;
202 		uint32_t exceptions = 0;
203 		int64_t i = 0;
204 
205 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
206 			mask |= SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
207 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
208 		i = mf->cookie & ~mask;
209 		assert(i >= 0 && i < NUM_SHMS);
210 
211 		exceptions = cpu_spin_lock_xsave(&shm_lock);
212 		assert(bit_test(shm_bits, i));
213 		bit_clear(shm_bits, i);
214 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
215 	}
216 
217 	assert(!mf->mm);
218 	free(mf);
219 }
220 #else /* !defined(CFG_CORE_SEL1_SPMC) */
221 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages)
222 {
223 	struct mobj_ffa *mf = NULL;
224 
225 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
226 	mf = ffa_new(num_pages);
227 	if (mf)
228 		mf->cookie = cookie;
229 	return mf;
230 }
231 
232 void mobj_ffa_spmc_delete(struct mobj_ffa *mf)
233 {
234 	free(mf);
235 }
236 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
237 
238 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
239 				 paddr_t pa, unsigned int num_pages)
240 {
241 	unsigned int n = 0;
242 	size_t tot_page_count = get_page_count(mf);
243 
244 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
245 		return TEE_ERROR_BAD_PARAMETERS;
246 
247 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
248 	    !core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
249 		return TEE_ERROR_BAD_PARAMETERS;
250 
251 	for (n = 0; n < num_pages; n++)
252 		mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
253 
254 	(*idx) += n;
255 	return TEE_SUCCESS;
256 }
257 
258 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
259 {
260 	return mf->cookie;
261 }
262 
263 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
264 {
265 	uint32_t exceptions = 0;
266 
267 	exceptions = cpu_spin_lock_xsave(&shm_lock);
268 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
269 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
270 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
271 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
272 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
273 
274 	return mf->cookie;
275 }
276 
277 static void unmap_helper(struct mobj_ffa *mf)
278 {
279 	if (mf->mm) {
280 		core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
281 				     get_page_count(mf));
282 		tee_mm_free(mf->mm);
283 		mf->mm = NULL;
284 	}
285 }
286 
287 #ifdef CFG_CORE_SEL1_SPMC
288 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
289 {
290 	TEE_Result res = TEE_SUCCESS;
291 	struct mobj_ffa *mf = NULL;
292 	uint32_t exceptions = 0;
293 
294 	exceptions = cpu_spin_lock_xsave(&shm_lock);
295 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
296 	/*
297 	 * If the mobj is found here it's still active and cannot be
298 	 * reclaimed.
299 	 */
300 	if (mf) {
301 		DMSG("cookie %#"PRIx64" busy refc %u",
302 		     cookie, refcount_val(&mf->mobj.refc));
303 		res = TEE_ERROR_BUSY;
304 		goto out;
305 	}
306 
307 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
308 	if (!mf) {
309 		res = TEE_ERROR_ITEM_NOT_FOUND;
310 		goto out;
311 	}
312 	/*
313 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
314 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
315 	 */
316 	if (mf->registered_by_cookie && !mf->unregistered_by_cookie) {
317 		DMSG("cookie %#"PRIx64" busy", cookie);
318 		res = TEE_ERROR_BUSY;
319 		goto out;
320 	}
321 
322 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
323 		panic();
324 	res = TEE_SUCCESS;
325 out:
326 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
327 	if (!res) {
328 		mobj_ffa_sel1_spmc_delete(mf);
329 		virt_remove_cookie(cookie);
330 	}
331 	return res;
332 }
333 #endif /*CFG_CORE_SEL1_SPMC*/
334 
335 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
336 {
337 	TEE_Result res = TEE_SUCCESS;
338 	struct mobj_ffa *mf = NULL;
339 	uint32_t exceptions = 0;
340 
341 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
342 	exceptions = cpu_spin_lock_xsave(&shm_lock);
343 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
344 	/*
345 	 * If the mobj is found here it's still active and cannot be
346 	 * unregistered.
347 	 */
348 	if (mf) {
349 		DMSG("cookie %#"PRIx64" busy refc %u",
350 		     cookie, refcount_val(&mf->mobj.refc));
351 		res = TEE_ERROR_BUSY;
352 		goto out;
353 	}
354 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
355 	/*
356 	 * If the mobj isn't found or if it already has been unregistered.
357 	 */
358 #if defined(CFG_CORE_SEL1_SPMC)
359 	if (!mf || mf->unregistered_by_cookie) {
360 		res = TEE_ERROR_ITEM_NOT_FOUND;
361 		goto out;
362 	}
363 	mf->unregistered_by_cookie = true;
364 #else
365 	if (!mf) {
366 		res = TEE_ERROR_ITEM_NOT_FOUND;
367 		goto out;
368 	}
369 	mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
370 	mobj_ffa_spmc_delete(mf);
371 	thread_spmc_relinquish(cookie);
372 #endif
373 	res = TEE_SUCCESS;
374 
375 out:
376 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
377 	return res;
378 }
379 
380 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
381 				    unsigned int internal_offs)
382 {
383 	struct mobj_ffa *mf = NULL;
384 	uint32_t exceptions = 0;
385 
386 	if (internal_offs >= SMALL_PAGE_SIZE)
387 		return NULL;
388 	exceptions = cpu_spin_lock_xsave(&shm_lock);
389 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
390 	if (mf) {
391 		if (mf->page_offset == internal_offs) {
392 			if (!refcount_inc(&mf->mobj.refc)) {
393 				/*
394 				 * If refcount is 0 some other thread has
395 				 * called mobj_put() on this reached 0 and
396 				 * before ffa_inactivate() got the lock we
397 				 * found it. Let's reinitialize it.
398 				 */
399 				refcount_set(&mf->mobj.refc, 1);
400 			}
401 			DMSG("cookie %#"PRIx64" active: refc %d",
402 			     cookie, refcount_val(&mf->mobj.refc));
403 		} else {
404 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
405 			     cookie, mf->page_offset, internal_offs);
406 			mf = NULL;
407 		}
408 	} else {
409 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
410 #if !defined(CFG_CORE_SEL1_SPMC)
411 		/* Try to retrieve it from the SPM at S-EL2 */
412 		if (mf) {
413 			DMSG("cookie %#"PRIx64" resurrecting", cookie);
414 		} else {
415 			EMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
416 			     cookie);
417 			mf = thread_spmc_populate_mobj_from_rx(cookie);
418 		}
419 #endif
420 		if (mf) {
421 #if defined(CFG_CORE_SEL1_SPMC)
422 			mf->unregistered_by_cookie = false;
423 			mf->registered_by_cookie = true;
424 #endif
425 			assert(refcount_val(&mf->mobj.refc) == 0);
426 			refcount_set(&mf->mobj.refc, 1);
427 			refcount_set(&mf->mapcount, 0);
428 
429 			/*
430 			 * mf->page_offset is offset into the first page.
431 			 * This offset is assigned from the internal_offs
432 			 * parameter to this function.
433 			 *
434 			 * While a mobj_ffa is active (ref_count > 0) this
435 			 * will not change, but when being pushed to the
436 			 * inactive list it can be changed again.
437 			 *
438 			 * So below we're backing out the old
439 			 * mf->page_offset and then assigning a new from
440 			 * internal_offset.
441 			 */
442 			mf->mobj.size += mf->page_offset;
443 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
444 			mf->mobj.size -= internal_offs;
445 			mf->page_offset = internal_offs;
446 
447 			SLIST_INSERT_HEAD(&shm_head, mf, link);
448 		}
449 	}
450 
451 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
452 
453 	if (!mf) {
454 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
455 		     cookie, internal_offs);
456 		return NULL;
457 	}
458 	return &mf->mobj;
459 }
460 
461 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
462 			     size_t granule, paddr_t *pa)
463 {
464 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
465 	size_t full_offset = 0;
466 	paddr_t p = 0;
467 
468 	if (!pa)
469 		return TEE_ERROR_GENERIC;
470 
471 	if (offset >= mobj->size)
472 		return TEE_ERROR_GENERIC;
473 
474 	full_offset = offset + mf->page_offset;
475 	switch (granule) {
476 	case 0:
477 		p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
478 		    (full_offset & SMALL_PAGE_MASK);
479 		break;
480 	case SMALL_PAGE_SIZE:
481 		p = mf->pages[full_offset / SMALL_PAGE_SIZE];
482 		break;
483 	default:
484 		return TEE_ERROR_GENERIC;
485 	}
486 	*pa = p;
487 
488 	return TEE_SUCCESS;
489 }
490 
491 static size_t ffa_get_phys_offs(struct mobj *mobj,
492 				size_t granule __maybe_unused)
493 {
494 	assert(granule >= mobj->phys_granule);
495 
496 	return to_mobj_ffa(mobj)->page_offset;
497 }
498 
499 static void *ffa_get_va(struct mobj *mobj, size_t offset, size_t len)
500 {
501 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
502 
503 	if (!mf->mm || !mobj_check_offset_and_len(mobj, offset, len))
504 		return NULL;
505 
506 	return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
507 }
508 
509 static void ffa_inactivate(struct mobj *mobj)
510 {
511 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
512 	uint32_t exceptions = 0;
513 
514 	exceptions = cpu_spin_lock_xsave(&shm_lock);
515 	/*
516 	 * If refcount isn't 0 some other thread has found this mobj in
517 	 * shm_head after the mobj_put() that put us here and before we got
518 	 * the lock.
519 	 */
520 	if (refcount_val(&mobj->refc)) {
521 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
522 		goto out;
523 	}
524 
525 	DMSG("cookie %#"PRIx64, mf->cookie);
526 	if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf))
527 		panic();
528 	unmap_helper(mf);
529 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
530 out:
531 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
532 }
533 
534 static TEE_Result ffa_get_mem_type(struct mobj *mobj __unused, uint32_t *mt)
535 {
536 	if (!mt)
537 		return TEE_ERROR_GENERIC;
538 
539 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
540 
541 	return TEE_SUCCESS;
542 }
543 
544 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
545 {
546 	assert(mobj->ops == &mobj_ffa_ops);
547 
548 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
549 }
550 
551 static uint64_t ffa_get_cookie(struct mobj *mobj)
552 {
553 	return to_mobj_ffa(mobj)->cookie;
554 }
555 
556 static TEE_Result ffa_inc_map(struct mobj *mobj)
557 {
558 	TEE_Result res = TEE_SUCCESS;
559 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
560 	uint32_t exceptions = 0;
561 	size_t sz = 0;
562 
563 	while (true) {
564 		if (refcount_inc(&mf->mapcount))
565 			return TEE_SUCCESS;
566 
567 		exceptions = cpu_spin_lock_xsave(&shm_lock);
568 
569 		if (!refcount_val(&mf->mapcount))
570 			break; /* continue to reinitialize */
571 		/*
572 		 * If another thread beat us to initialize mapcount,
573 		 * restart to make sure we still increase it.
574 		 */
575 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
576 	}
577 
578 	/*
579 	 * If we have beated another thread calling ffa_dec_map()
580 	 * to get the lock we need only to reinitialize mapcount to 1.
581 	 */
582 	if (!mf->mm) {
583 		sz = ROUNDUP(mobj->size + mf->page_offset, SMALL_PAGE_SIZE);
584 		mf->mm = tee_mm_alloc(&tee_mm_shm, sz);
585 		if (!mf->mm) {
586 			res = TEE_ERROR_OUT_OF_MEMORY;
587 			goto out;
588 		}
589 
590 		res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
591 					 sz / SMALL_PAGE_SIZE,
592 					 MEM_AREA_NSEC_SHM);
593 		if (res) {
594 			tee_mm_free(mf->mm);
595 			mf->mm = NULL;
596 			goto out;
597 		}
598 	}
599 
600 	refcount_set(&mf->mapcount, 1);
601 out:
602 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
603 
604 	return res;
605 }
606 
607 static TEE_Result ffa_dec_map(struct mobj *mobj)
608 {
609 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
610 	uint32_t exceptions = 0;
611 
612 	if (!refcount_dec(&mf->mapcount))
613 		return TEE_SUCCESS;
614 
615 	exceptions = cpu_spin_lock_xsave(&shm_lock);
616 	if (!refcount_val(&mf->mapcount))
617 		unmap_helper(mf);
618 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
619 
620 	return TEE_SUCCESS;
621 }
622 
623 static TEE_Result mapped_shm_init(void)
624 {
625 	vaddr_t pool_start = 0;
626 	vaddr_t pool_end = 0;
627 
628 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
629 	if (!pool_start || !pool_end)
630 		panic("Can't find region for shmem pool");
631 
632 	if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end - pool_start,
633 			 SMALL_PAGE_SHIFT,
634 			 TEE_MM_POOL_NO_FLAGS))
635 		panic("Could not create shmem pool");
636 
637 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
638 	     pool_start, pool_end);
639 	return TEE_SUCCESS;
640 }
641 
642 static const struct mobj_ops mobj_ffa_ops = {
643 	.get_pa = ffa_get_pa,
644 	.get_phys_offs = ffa_get_phys_offs,
645 	.get_va = ffa_get_va,
646 	.get_mem_type = ffa_get_mem_type,
647 	.matches = ffa_matches,
648 	.free = ffa_inactivate,
649 	.get_cookie = ffa_get_cookie,
650 	.inc_map = ffa_inc_map,
651 	.dec_map = ffa_dec_map,
652 };
653 
654 preinit(mapped_shm_init);
655