xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision 7dfcefda2cd455765172b4b300155797a42dee38)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <kernel/refcount.h>
11 #include <kernel/spinlock.h>
12 #include <kernel/thread_spmc.h>
13 #include <mm/mobj.h>
14 #include <sys/queue.h>
15 
16 struct mobj_ffa {
17 	struct mobj mobj;
18 	SLIST_ENTRY(mobj_ffa) link;
19 	uint64_t cookie;
20 	tee_mm_entry_t *mm;
21 	struct refcount mapcount;
22 	uint16_t page_offset;
23 #ifdef CFG_CORE_SEL1_SPMC
24 	bool registered_by_cookie;
25 	bool unregistered_by_cookie;
26 #endif
27 	paddr_t pages[];
28 };
29 
30 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
31 
32 #ifdef CFG_CORE_SEL1_SPMC
33 #define NUM_SHMS	64
34 static bitstr_t bit_decl(shm_bits, NUM_SHMS);
35 #endif
36 
37 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
38 static struct mobj_ffa_head shm_inactive_head =
39 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
40 
41 static unsigned int shm_lock = SPINLOCK_UNLOCK;
42 
43 static const struct mobj_ops mobj_ffa_ops;
44 
45 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
46 {
47 	assert(mobj->ops == &mobj_ffa_ops);
48 	return container_of(mobj, struct mobj_ffa, mobj);
49 }
50 
51 static size_t shm_size(size_t num_pages)
52 {
53 	size_t s = 0;
54 
55 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
56 		return 0;
57 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
58 		return 0;
59 	return s;
60 }
61 
62 static struct mobj_ffa *ffa_new(unsigned int num_pages)
63 {
64 	struct mobj_ffa *mf = NULL;
65 	size_t s = 0;
66 
67 	if (!num_pages)
68 		return NULL;
69 
70 	s = shm_size(num_pages);
71 	if (!s)
72 		return NULL;
73 	mf = calloc(1, s);
74 	if (!mf)
75 		return NULL;
76 
77 	mf->mobj.ops = &mobj_ffa_ops;
78 	mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
79 	mf->mobj.phys_granule = SMALL_PAGE_SIZE;
80 	refcount_set(&mf->mobj.refc, 0);
81 
82 	return mf;
83 }
84 
85 #ifdef CFG_CORE_SEL1_SPMC
86 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages)
87 {
88 	struct mobj_ffa *mf = NULL;
89 	uint32_t exceptions = 0;
90 	int i = 0;
91 
92 	mf = ffa_new(num_pages);
93 	if (!mf)
94 		return NULL;
95 
96 	exceptions = cpu_spin_lock_xsave(&shm_lock);
97 	bit_ffc(shm_bits, NUM_SHMS, &i);
98 	if (i != -1) {
99 		bit_set(shm_bits, i);
100 		/*
101 		 * Setting bit 44 to use one of the upper 32 bits too for
102 		 * testing.
103 		 */
104 		mf->cookie = i | FFA_MEMORY_HANDLE_NONE_SECURE_BIT;
105 
106 	}
107 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
108 
109 	if (i == -1) {
110 		free(mf);
111 		return NULL;
112 	}
113 
114 	return mf;
115 }
116 #endif /*CFG_CORE_SEL1_SPMC*/
117 
118 static size_t get_page_count(struct mobj_ffa *mf)
119 {
120 	return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
121 }
122 
123 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
124 {
125 	return mf->cookie == cookie;
126 }
127 
128 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
129 {
130 	return mf == (void *)(vaddr_t)ptr;
131 }
132 
133 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
134 				      bool (*cmp_func)(struct mobj_ffa *mf,
135 						       uint64_t val),
136 				      uint64_t val)
137 {
138 	struct mobj_ffa *mf = SLIST_FIRST(head);
139 	struct mobj_ffa *p = NULL;
140 
141 	if (!mf)
142 		return NULL;
143 
144 	if (cmp_func(mf, val)) {
145 		SLIST_REMOVE_HEAD(head, link);
146 		return mf;
147 	}
148 
149 	while (true) {
150 		p = SLIST_NEXT(mf, link);
151 		if (!p)
152 			return NULL;
153 		if (cmp_func(p, val)) {
154 			SLIST_REMOVE_AFTER(mf, link);
155 			return p;
156 		}
157 		mf = p;
158 	}
159 }
160 
161 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
162 				     bool (*cmp_func)(struct mobj_ffa *mf,
163 						      uint64_t val),
164 				     uint64_t val)
165 {
166 	struct mobj_ffa *mf = NULL;
167 
168 	SLIST_FOREACH(mf, head, link)
169 		if (cmp_func(mf, val))
170 			return mf;
171 
172 	return NULL;
173 }
174 
175 #if defined(CFG_CORE_SEL1_SPMC)
176 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
177 {
178 	int i = mf->cookie & ~BIT64(44);
179 	uint32_t exceptions = 0;
180 
181 	assert(i >= 0 && i < NUM_SHMS);
182 
183 	exceptions = cpu_spin_lock_xsave(&shm_lock);
184 	assert(bit_test(shm_bits, i));
185 	bit_clear(shm_bits, i);
186 	assert(!mf->mm);
187 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
188 
189 	free(mf);
190 }
191 #else /* !defined(CFG_CORE_SEL1_SPMC) */
192 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages)
193 {
194 	struct mobj_ffa *mf = NULL;
195 
196 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
197 	mf = ffa_new(num_pages);
198 	if (mf)
199 		mf->cookie = cookie;
200 	return mf;
201 }
202 
203 void mobj_ffa_spmc_delete(struct mobj_ffa *mf)
204 {
205 	free(mf);
206 }
207 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
208 
209 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
210 				 paddr_t pa, unsigned int num_pages)
211 {
212 	unsigned int n = 0;
213 	size_t tot_page_count = get_page_count(mf);
214 
215 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
216 		return TEE_ERROR_BAD_PARAMETERS;
217 
218 	if (!IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
219 	    !core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
220 		return TEE_ERROR_BAD_PARAMETERS;
221 
222 	for (n = 0; n < num_pages; n++)
223 		mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
224 
225 	(*idx) += n;
226 	return TEE_SUCCESS;
227 }
228 
229 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
230 {
231 	return mf->cookie;
232 }
233 
234 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
235 {
236 	uint32_t exceptions = 0;
237 
238 	exceptions = cpu_spin_lock_xsave(&shm_lock);
239 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
240 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
241 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
242 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
243 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
244 
245 	return mf->cookie;
246 }
247 
248 static void unmap_helper(struct mobj_ffa *mf)
249 {
250 	if (mf->mm) {
251 		core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
252 				     get_page_count(mf));
253 		tee_mm_free(mf->mm);
254 		mf->mm = NULL;
255 	}
256 }
257 
258 #ifdef CFG_CORE_SEL1_SPMC
259 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
260 {
261 	TEE_Result res = TEE_SUCCESS;
262 	struct mobj_ffa *mf = NULL;
263 	uint32_t exceptions = 0;
264 
265 	exceptions = cpu_spin_lock_xsave(&shm_lock);
266 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
267 	/*
268 	 * If the mobj is found here it's still active and cannot be
269 	 * reclaimed.
270 	 */
271 	if (mf) {
272 		DMSG("cookie %#"PRIx64" busy refc %u",
273 		     cookie, refcount_val(&mf->mobj.refc));
274 		res = TEE_ERROR_BUSY;
275 		goto out;
276 	}
277 
278 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
279 	if (!mf) {
280 		res = TEE_ERROR_ITEM_NOT_FOUND;
281 		goto out;
282 	}
283 	/*
284 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
285 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
286 	 */
287 	if (mf->registered_by_cookie && !mf->unregistered_by_cookie) {
288 		DMSG("cookie %#"PRIx64" busy", cookie);
289 		res = TEE_ERROR_BUSY;
290 		goto out;
291 	}
292 
293 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
294 		panic();
295 	res = TEE_SUCCESS;
296 out:
297 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
298 	if (!res)
299 		mobj_ffa_sel1_spmc_delete(mf);
300 	return res;
301 }
302 #endif /*CFG_CORE_SEL1_SPMC*/
303 
304 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
305 {
306 	TEE_Result res = TEE_SUCCESS;
307 	struct mobj_ffa *mf = NULL;
308 	uint32_t exceptions = 0;
309 
310 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
311 	exceptions = cpu_spin_lock_xsave(&shm_lock);
312 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
313 	/*
314 	 * If the mobj is found here it's still active and cannot be
315 	 * unregistered.
316 	 */
317 	if (mf) {
318 		DMSG("cookie %#"PRIx64" busy refc %u",
319 		     cookie, refcount_val(&mf->mobj.refc));
320 		res = TEE_ERROR_BUSY;
321 		goto out;
322 	}
323 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
324 	/*
325 	 * If the mobj isn't found or if it already has been unregistered.
326 	 */
327 #if defined(CFG_CORE_SEL1_SPMC)
328 	if (!mf || mf->unregistered_by_cookie) {
329 		res = TEE_ERROR_ITEM_NOT_FOUND;
330 		goto out;
331 	}
332 	mf->unregistered_by_cookie = true;
333 #else
334 	if (!mf) {
335 		res = TEE_ERROR_ITEM_NOT_FOUND;
336 		goto out;
337 	}
338 	mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
339 	mobj_ffa_spmc_delete(mf);
340 	thread_spmc_relinquish(cookie);
341 #endif
342 	res = TEE_SUCCESS;
343 
344 out:
345 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
346 	return res;
347 }
348 
349 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
350 				    unsigned int internal_offs)
351 {
352 	struct mobj_ffa *mf = NULL;
353 	uint32_t exceptions = 0;
354 
355 	if (internal_offs >= SMALL_PAGE_SIZE)
356 		return NULL;
357 	exceptions = cpu_spin_lock_xsave(&shm_lock);
358 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
359 	if (mf) {
360 		if (mf->page_offset == internal_offs) {
361 			if (!refcount_inc(&mf->mobj.refc)) {
362 				/*
363 				 * If refcount is 0 some other thread has
364 				 * called mobj_put() on this reached 0 and
365 				 * before ffa_inactivate() got the lock we
366 				 * found it. Let's reinitialize it.
367 				 */
368 				refcount_set(&mf->mobj.refc, 1);
369 			}
370 			DMSG("cookie %#"PRIx64" active: refc %d",
371 			     cookie, refcount_val(&mf->mobj.refc));
372 		} else {
373 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
374 			     cookie, mf->page_offset, internal_offs);
375 			mf = NULL;
376 		}
377 	} else {
378 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
379 #if !defined(CFG_CORE_SEL1_SPMC)
380 		/* Try to retrieve it from the SPM at S-EL2 */
381 		if (mf) {
382 			DMSG("cookie %#"PRIx64" resurrecting", cookie);
383 		} else {
384 			EMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
385 			     cookie);
386 			mf = thread_spmc_populate_mobj_from_rx(cookie);
387 		}
388 #endif
389 		if (mf) {
390 #if defined(CFG_CORE_SEL1_SPMC)
391 			mf->unregistered_by_cookie = false;
392 			mf->registered_by_cookie = true;
393 #endif
394 			assert(refcount_val(&mf->mobj.refc) == 0);
395 			refcount_set(&mf->mobj.refc, 1);
396 			refcount_set(&mf->mapcount, 0);
397 
398 			/*
399 			 * mf->page_offset is offset into the first page.
400 			 * This offset is assigned from the internal_offs
401 			 * parameter to this function.
402 			 *
403 			 * While a mobj_ffa is active (ref_count > 0) this
404 			 * will not change, but when being pushed to the
405 			 * inactive list it can be changed again.
406 			 *
407 			 * So below we're backing out the old
408 			 * mf->page_offset and then assigning a new from
409 			 * internal_offset.
410 			 */
411 			mf->mobj.size += mf->page_offset;
412 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
413 			mf->mobj.size -= internal_offs;
414 			mf->page_offset = internal_offs;
415 
416 			SLIST_INSERT_HEAD(&shm_head, mf, link);
417 		}
418 	}
419 
420 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
421 
422 	if (!mf) {
423 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
424 		     cookie, internal_offs);
425 		return NULL;
426 	}
427 	return &mf->mobj;
428 }
429 
430 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
431 			     size_t granule, paddr_t *pa)
432 {
433 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
434 	size_t full_offset = 0;
435 	paddr_t p = 0;
436 
437 	if (!pa)
438 		return TEE_ERROR_GENERIC;
439 
440 	if (offset >= mobj->size)
441 		return TEE_ERROR_GENERIC;
442 
443 	full_offset = offset + mf->page_offset;
444 	switch (granule) {
445 	case 0:
446 		p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
447 		    (full_offset & SMALL_PAGE_MASK);
448 		break;
449 	case SMALL_PAGE_SIZE:
450 		p = mf->pages[full_offset / SMALL_PAGE_SIZE];
451 		break;
452 	default:
453 		return TEE_ERROR_GENERIC;
454 	}
455 	*pa = p;
456 
457 	return TEE_SUCCESS;
458 }
459 
460 static size_t ffa_get_phys_offs(struct mobj *mobj,
461 				size_t granule __maybe_unused)
462 {
463 	assert(granule >= mobj->phys_granule);
464 
465 	return to_mobj_ffa(mobj)->page_offset;
466 }
467 
468 static void *ffa_get_va(struct mobj *mobj, size_t offset, size_t len)
469 {
470 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
471 
472 	if (!mf->mm || !mobj_check_offset_and_len(mobj, offset, len))
473 		return NULL;
474 
475 	return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
476 }
477 
478 static void ffa_inactivate(struct mobj *mobj)
479 {
480 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
481 	uint32_t exceptions = 0;
482 
483 	exceptions = cpu_spin_lock_xsave(&shm_lock);
484 	/*
485 	 * If refcount isn't 0 some other thread has found this mobj in
486 	 * shm_head after the mobj_put() that put us here and before we got
487 	 * the lock.
488 	 */
489 	if (refcount_val(&mobj->refc)) {
490 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
491 		goto out;
492 	}
493 
494 	DMSG("cookie %#"PRIx64, mf->cookie);
495 	if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf))
496 		panic();
497 	unmap_helper(mf);
498 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
499 out:
500 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
501 }
502 
503 static TEE_Result ffa_get_mem_type(struct mobj *mobj __unused, uint32_t *mt)
504 {
505 	if (!mt)
506 		return TEE_ERROR_GENERIC;
507 
508 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
509 
510 	return TEE_SUCCESS;
511 }
512 
513 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
514 {
515 	assert(mobj->ops == &mobj_ffa_ops);
516 
517 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
518 }
519 
520 static uint64_t ffa_get_cookie(struct mobj *mobj)
521 {
522 	return to_mobj_ffa(mobj)->cookie;
523 }
524 
525 static TEE_Result ffa_inc_map(struct mobj *mobj)
526 {
527 	TEE_Result res = TEE_SUCCESS;
528 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
529 	uint32_t exceptions = 0;
530 	size_t sz = 0;
531 
532 	while (true) {
533 		if (refcount_inc(&mf->mapcount))
534 			return TEE_SUCCESS;
535 
536 		exceptions = cpu_spin_lock_xsave(&shm_lock);
537 
538 		if (!refcount_val(&mf->mapcount))
539 			break; /* continue to reinitialize */
540 		/*
541 		 * If another thread beat us to initialize mapcount,
542 		 * restart to make sure we still increase it.
543 		 */
544 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
545 	}
546 
547 	/*
548 	 * If we have beated another thread calling ffa_dec_map()
549 	 * to get the lock we need only to reinitialize mapcount to 1.
550 	 */
551 	if (!mf->mm) {
552 		sz = ROUNDUP(mobj->size + mf->page_offset, SMALL_PAGE_SIZE);
553 		mf->mm = tee_mm_alloc(&tee_mm_shm, sz);
554 		if (!mf->mm) {
555 			res = TEE_ERROR_OUT_OF_MEMORY;
556 			goto out;
557 		}
558 
559 		res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
560 					 sz / SMALL_PAGE_SIZE,
561 					 MEM_AREA_NSEC_SHM);
562 		if (res) {
563 			tee_mm_free(mf->mm);
564 			mf->mm = NULL;
565 			goto out;
566 		}
567 	}
568 
569 	refcount_set(&mf->mapcount, 1);
570 out:
571 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
572 
573 	return res;
574 }
575 
576 static TEE_Result ffa_dec_map(struct mobj *mobj)
577 {
578 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
579 	uint32_t exceptions = 0;
580 
581 	if (!refcount_dec(&mf->mapcount))
582 		return TEE_SUCCESS;
583 
584 	exceptions = cpu_spin_lock_xsave(&shm_lock);
585 	if (!refcount_val(&mf->mapcount))
586 		unmap_helper(mf);
587 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
588 
589 	return TEE_SUCCESS;
590 }
591 
592 static TEE_Result mapped_shm_init(void)
593 {
594 	vaddr_t pool_start = 0;
595 	vaddr_t pool_end = 0;
596 
597 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
598 	if (!pool_start || !pool_end)
599 		panic("Can't find region for shmem pool");
600 
601 	if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end - pool_start,
602 			 SMALL_PAGE_SHIFT,
603 			 TEE_MM_POOL_NO_FLAGS))
604 		panic("Could not create shmem pool");
605 
606 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
607 	     pool_start, pool_end);
608 	return TEE_SUCCESS;
609 }
610 
611 static const struct mobj_ops mobj_ffa_ops = {
612 	.get_pa = ffa_get_pa,
613 	.get_phys_offs = ffa_get_phys_offs,
614 	.get_va = ffa_get_va,
615 	.get_mem_type = ffa_get_mem_type,
616 	.matches = ffa_matches,
617 	.free = ffa_inactivate,
618 	.get_cookie = ffa_get_cookie,
619 	.inc_map = ffa_inc_map,
620 	.dec_map = ffa_dec_map,
621 };
622 
623 preinit(mapped_shm_init);
624