xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision 039e02df2716a0ed886b56e1e07b7ac1d8597228)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <keep.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/thread_spmc.h>
14 #include <mm/mobj.h>
15 #include <sys/queue.h>
16 
17 struct mobj_ffa {
18 	struct mobj mobj;
19 	SLIST_ENTRY(mobj_ffa) link;
20 	uint64_t cookie;
21 	tee_mm_entry_t *mm;
22 	struct refcount mapcount;
23 	uint16_t page_offset;
24 #ifdef CFG_CORE_SEL1_SPMC
25 	bool registered_by_cookie;
26 	bool unregistered_by_cookie;
27 #endif
28 	paddr_t pages[];
29 };
30 
31 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
32 
33 #ifdef CFG_CORE_SEL1_SPMC
34 #define NUM_SHMS	64
35 static bitstr_t bit_decl(shm_bits, NUM_SHMS);
36 #endif
37 
38 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
39 static struct mobj_ffa_head shm_inactive_head =
40 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
41 
42 static unsigned int shm_lock = SPINLOCK_UNLOCK;
43 
44 const struct mobj_ops mobj_ffa_ops;
45 
46 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
47 {
48 	assert(mobj->ops == &mobj_ffa_ops);
49 	return container_of(mobj, struct mobj_ffa, mobj);
50 }
51 
52 static size_t shm_size(size_t num_pages)
53 {
54 	size_t s = 0;
55 
56 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
57 		return 0;
58 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
59 		return 0;
60 	return s;
61 }
62 
63 static struct mobj_ffa *ffa_new(unsigned int num_pages)
64 {
65 	struct mobj_ffa *mf = NULL;
66 	size_t s = 0;
67 
68 	if (!num_pages)
69 		return NULL;
70 
71 	s = shm_size(num_pages);
72 	if (!s)
73 		return NULL;
74 	mf = calloc(1, s);
75 	if (!mf)
76 		return NULL;
77 
78 	mf->mobj.ops = &mobj_ffa_ops;
79 	mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
80 	mf->mobj.phys_granule = SMALL_PAGE_SIZE;
81 	refcount_set(&mf->mobj.refc, 0);
82 
83 	return mf;
84 }
85 
86 #ifdef CFG_CORE_SEL1_SPMC
87 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages)
88 {
89 	struct mobj_ffa *mf = NULL;
90 	uint32_t exceptions = 0;
91 	int i = 0;
92 
93 	mf = ffa_new(num_pages);
94 	if (!mf)
95 		return NULL;
96 
97 	exceptions = cpu_spin_lock_xsave(&shm_lock);
98 	bit_ffc(shm_bits, NUM_SHMS, &i);
99 	if (i != -1) {
100 		bit_set(shm_bits, i);
101 		/*
102 		 * Setting bit 44 to use one of the upper 32 bits too for
103 		 * testing.
104 		 */
105 		mf->cookie = i | FFA_MEMORY_HANDLE_NONE_SECURE_BIT;
106 
107 	}
108 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
109 
110 	if (i == -1) {
111 		free(mf);
112 		return NULL;
113 	}
114 
115 	return mf;
116 }
117 #endif /*CFG_CORE_SEL1_SPMC*/
118 
119 static size_t get_page_count(struct mobj_ffa *mf)
120 {
121 	return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
122 }
123 
124 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
125 {
126 	return mf->cookie == cookie;
127 }
128 
129 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
130 {
131 	return mf == (void *)(vaddr_t)ptr;
132 }
133 
134 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
135 				      bool (*cmp_func)(struct mobj_ffa *mf,
136 						       uint64_t val),
137 				      uint64_t val)
138 {
139 	struct mobj_ffa *mf = SLIST_FIRST(head);
140 	struct mobj_ffa *p = NULL;
141 
142 	if (!mf)
143 		return NULL;
144 
145 	if (cmp_func(mf, val)) {
146 		SLIST_REMOVE_HEAD(head, link);
147 		return mf;
148 	}
149 
150 	while (true) {
151 		p = SLIST_NEXT(mf, link);
152 		if (!p)
153 			return NULL;
154 		if (cmp_func(p, val)) {
155 			SLIST_REMOVE_AFTER(mf, link);
156 			return p;
157 		}
158 		mf = p;
159 	}
160 }
161 
162 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
163 				     bool (*cmp_func)(struct mobj_ffa *mf,
164 						      uint64_t val),
165 				     uint64_t val)
166 {
167 	struct mobj_ffa *mf = NULL;
168 
169 	SLIST_FOREACH(mf, head, link)
170 		if (cmp_func(mf, val))
171 			return mf;
172 
173 	return NULL;
174 }
175 
176 #if defined(CFG_CORE_SEL1_SPMC)
177 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
178 {
179 	int i = mf->cookie & ~BIT64(44);
180 	uint32_t exceptions = 0;
181 
182 	assert(i >= 0 && i < NUM_SHMS);
183 
184 	exceptions = cpu_spin_lock_xsave(&shm_lock);
185 	assert(bit_test(shm_bits, i));
186 	bit_clear(shm_bits, i);
187 	assert(!mf->mm);
188 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
189 
190 	free(mf);
191 }
192 #else /* !defined(CFG_CORE_SEL1_SPMC) */
193 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages)
194 {
195 	struct mobj_ffa *mf = NULL;
196 
197 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
198 	mf = ffa_new(num_pages);
199 	if (mf)
200 		mf->cookie = cookie;
201 	return mf;
202 }
203 
204 void mobj_ffa_spmc_delete(struct mobj_ffa *mf)
205 {
206 	free(mf);
207 }
208 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
209 
210 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
211 				 paddr_t pa, unsigned int num_pages)
212 {
213 	unsigned int n = 0;
214 	size_t tot_page_count = get_page_count(mf);
215 
216 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
217 		return TEE_ERROR_BAD_PARAMETERS;
218 
219 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
220 		return TEE_ERROR_BAD_PARAMETERS;
221 
222 	for (n = 0; n < num_pages; n++)
223 		mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
224 
225 	(*idx) += n;
226 	return TEE_SUCCESS;
227 }
228 
229 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
230 {
231 	return mf->cookie;
232 }
233 
234 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
235 {
236 	uint32_t exceptions = 0;
237 
238 	exceptions = cpu_spin_lock_xsave(&shm_lock);
239 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
240 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
241 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
242 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
243 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
244 
245 	return mf->cookie;
246 }
247 
248 static void unmap_helper(struct mobj_ffa *mf)
249 {
250 	if (mf->mm) {
251 		core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
252 				     get_page_count(mf));
253 		tee_mm_free(mf->mm);
254 		mf->mm = NULL;
255 	}
256 }
257 
258 #ifdef CFG_CORE_SEL1_SPMC
259 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
260 {
261 	TEE_Result res = TEE_SUCCESS;
262 	struct mobj_ffa *mf = NULL;
263 	uint32_t exceptions = 0;
264 
265 	exceptions = cpu_spin_lock_xsave(&shm_lock);
266 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
267 	/*
268 	 * If the mobj is found here it's still active and cannot be
269 	 * reclaimed.
270 	 */
271 	if (mf) {
272 		DMSG("cookie %#"PRIx64" busy refc %u",
273 		     cookie, refcount_val(&mf->mobj.refc));
274 		res = TEE_ERROR_BUSY;
275 		goto out;
276 	}
277 
278 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
279 	if (!mf) {
280 		res = TEE_ERROR_ITEM_NOT_FOUND;
281 		goto out;
282 	}
283 	/*
284 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
285 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
286 	 */
287 	if (mf->registered_by_cookie && !mf->unregistered_by_cookie) {
288 		DMSG("cookie %#"PRIx64" busy", cookie);
289 		res = TEE_ERROR_BUSY;
290 		goto out;
291 	}
292 
293 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
294 		panic();
295 	res = TEE_SUCCESS;
296 out:
297 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
298 	if (!res)
299 		mobj_ffa_sel1_spmc_delete(mf);
300 	return res;
301 }
302 #endif /*CFG_CORE_SEL1_SPMC*/
303 
304 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
305 {
306 	TEE_Result res = TEE_SUCCESS;
307 	struct mobj_ffa *mf = NULL;
308 	uint32_t exceptions = 0;
309 
310 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
311 	exceptions = cpu_spin_lock_xsave(&shm_lock);
312 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
313 	/*
314 	 * If the mobj is found here it's still active and cannot be
315 	 * unregistered.
316 	 */
317 	if (mf) {
318 		DMSG("cookie %#"PRIx64" busy refc %u",
319 		     cookie, refcount_val(&mf->mobj.refc));
320 		res = TEE_ERROR_BUSY;
321 		goto out;
322 	}
323 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
324 	/*
325 	 * If the mobj isn't found or if it already has been unregistered.
326 	 */
327 #if defined(CFG_CORE_SEL1_SPMC)
328 	if (!mf || mf->unregistered_by_cookie) {
329 		res = TEE_ERROR_ITEM_NOT_FOUND;
330 		goto out;
331 	}
332 	mf->unregistered_by_cookie = true;
333 #else
334 	if (!mf) {
335 		res = TEE_ERROR_ITEM_NOT_FOUND;
336 		goto out;
337 	}
338 	mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
339 	mobj_ffa_spmc_delete(mf);
340 	thread_spmc_relinquish(cookie);
341 #endif
342 	res = TEE_SUCCESS;
343 
344 out:
345 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
346 	return res;
347 }
348 
349 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
350 				    unsigned int internal_offs)
351 {
352 	struct mobj_ffa *mf = NULL;
353 	uint32_t exceptions = 0;
354 
355 	if (internal_offs >= SMALL_PAGE_SIZE)
356 		return NULL;
357 	exceptions = cpu_spin_lock_xsave(&shm_lock);
358 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
359 	if (mf) {
360 		if (mf->page_offset == internal_offs) {
361 			if (!refcount_inc(&mf->mobj.refc)) {
362 				/*
363 				 * If refcount is 0 some other thread has
364 				 * called mobj_put() on this reached 0 and
365 				 * before ffa_inactivate() got the lock we
366 				 * found it. Let's reinitialize it.
367 				 */
368 				refcount_set(&mf->mobj.refc, 1);
369 			}
370 			DMSG("cookie %#"PRIx64" active: refc %d",
371 			     cookie, refcount_val(&mf->mobj.refc));
372 		} else {
373 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
374 			     cookie, mf->page_offset, internal_offs);
375 			mf = NULL;
376 		}
377 	} else {
378 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
379 #if !defined(CFG_CORE_SEL1_SPMC)
380 		/* Try to retrieve it from the SPM at S-EL2 */
381 		if (mf) {
382 			DMSG("cookie %#"PRIx64" resurrecting", cookie);
383 		} else {
384 			EMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
385 			     cookie);
386 			mf = thread_spmc_populate_mobj_from_rx(cookie);
387 		}
388 #endif
389 		if (mf) {
390 #if defined(CFG_CORE_SEL1_SPMC)
391 			mf->unregistered_by_cookie = false;
392 			mf->registered_by_cookie = true;
393 #endif
394 			assert(refcount_val(&mf->mobj.refc) == 0);
395 			refcount_set(&mf->mobj.refc, 1);
396 			refcount_set(&mf->mapcount, 0);
397 
398 			/*
399 			 * mf->page_offset is offset into the first page.
400 			 * This offset is assigned from the internal_offs
401 			 * parameter to this function.
402 			 *
403 			 * While a mobj_ffa is active (ref_count > 0) this
404 			 * will not change, but when being pushed to the
405 			 * inactive list it can be changed again.
406 			 *
407 			 * So below we're backing out the old
408 			 * mf->page_offset and then assigning a new from
409 			 * internal_offset.
410 			 */
411 			mf->mobj.size += mf->page_offset;
412 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
413 			mf->mobj.size -= internal_offs;
414 			mf->page_offset = internal_offs;
415 
416 			SLIST_INSERT_HEAD(&shm_head, mf, link);
417 		}
418 	}
419 
420 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
421 
422 	if (!mf) {
423 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
424 		     cookie, internal_offs);
425 		return NULL;
426 	}
427 	return &mf->mobj;
428 }
429 
430 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
431 			     size_t granule, paddr_t *pa)
432 {
433 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
434 	size_t full_offset = 0;
435 	paddr_t p = 0;
436 
437 	if (!pa)
438 		return TEE_ERROR_GENERIC;
439 
440 	if (offset >= mobj->size)
441 		return TEE_ERROR_GENERIC;
442 
443 	full_offset = offset + mf->page_offset;
444 	switch (granule) {
445 	case 0:
446 		p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
447 		    (full_offset & SMALL_PAGE_MASK);
448 		break;
449 	case SMALL_PAGE_SIZE:
450 		p = mf->pages[full_offset / SMALL_PAGE_SIZE];
451 		break;
452 	default:
453 		return TEE_ERROR_GENERIC;
454 	}
455 	*pa = p;
456 
457 	return TEE_SUCCESS;
458 }
459 DECLARE_KEEP_PAGER(ffa_get_pa);
460 
461 static size_t ffa_get_phys_offs(struct mobj *mobj,
462 				size_t granule __maybe_unused)
463 {
464 	assert(granule >= mobj->phys_granule);
465 
466 	return to_mobj_ffa(mobj)->page_offset;
467 }
468 
469 static void *ffa_get_va(struct mobj *mobj, size_t offset, size_t len)
470 {
471 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
472 
473 	if (!mf->mm || !mobj_check_offset_and_len(mobj, offset, len))
474 		return NULL;
475 
476 	return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
477 }
478 
479 static void ffa_inactivate(struct mobj *mobj)
480 {
481 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
482 	uint32_t exceptions = 0;
483 
484 	exceptions = cpu_spin_lock_xsave(&shm_lock);
485 	/*
486 	 * If refcount isn't 0 some other thread has found this mobj in
487 	 * shm_head after the mobj_put() that put us here and before we got
488 	 * the lock.
489 	 */
490 	if (refcount_val(&mobj->refc)) {
491 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
492 		goto out;
493 	}
494 
495 	DMSG("cookie %#"PRIx64, mf->cookie);
496 	if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf))
497 		panic();
498 	unmap_helper(mf);
499 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
500 out:
501 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
502 }
503 
504 static TEE_Result ffa_get_mem_type(struct mobj *mobj __unused, uint32_t *mt)
505 {
506 	if (!mt)
507 		return TEE_ERROR_GENERIC;
508 
509 	*mt = TEE_MATTR_MEM_TYPE_CACHED;
510 
511 	return TEE_SUCCESS;
512 }
513 
514 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
515 {
516 	assert(mobj->ops == &mobj_ffa_ops);
517 
518 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
519 }
520 
521 static uint64_t ffa_get_cookie(struct mobj *mobj)
522 {
523 	return to_mobj_ffa(mobj)->cookie;
524 }
525 
526 static TEE_Result ffa_inc_map(struct mobj *mobj)
527 {
528 	TEE_Result res = TEE_SUCCESS;
529 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
530 	uint32_t exceptions = 0;
531 	size_t sz = 0;
532 
533 	while (true) {
534 		if (refcount_inc(&mf->mapcount))
535 			return TEE_SUCCESS;
536 
537 		exceptions = cpu_spin_lock_xsave(&shm_lock);
538 
539 		if (!refcount_val(&mf->mapcount))
540 			break; /* continue to reinitialize */
541 		/*
542 		 * If another thread beat us to initialize mapcount,
543 		 * restart to make sure we still increase it.
544 		 */
545 		cpu_spin_unlock_xrestore(&shm_lock, exceptions);
546 	}
547 
548 	/*
549 	 * If we have beated another thread calling ffa_dec_map()
550 	 * to get the lock we need only to reinitialize mapcount to 1.
551 	 */
552 	if (!mf->mm) {
553 		sz = ROUNDUP(mobj->size + mf->page_offset, SMALL_PAGE_SIZE);
554 		mf->mm = tee_mm_alloc(&tee_mm_shm, sz);
555 		if (!mf->mm) {
556 			res = TEE_ERROR_OUT_OF_MEMORY;
557 			goto out;
558 		}
559 
560 		res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
561 					 sz / SMALL_PAGE_SIZE,
562 					 MEM_AREA_NSEC_SHM);
563 		if (res) {
564 			tee_mm_free(mf->mm);
565 			mf->mm = NULL;
566 			goto out;
567 		}
568 	}
569 
570 	refcount_set(&mf->mapcount, 1);
571 out:
572 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
573 
574 	return res;
575 }
576 
577 static TEE_Result ffa_dec_map(struct mobj *mobj)
578 {
579 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
580 	uint32_t exceptions = 0;
581 
582 	if (!refcount_dec(&mf->mapcount))
583 		return TEE_SUCCESS;
584 
585 	exceptions = cpu_spin_lock_xsave(&shm_lock);
586 	if (!refcount_val(&mf->mapcount))
587 		unmap_helper(mf);
588 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
589 
590 	return TEE_SUCCESS;
591 }
592 
593 static TEE_Result mapped_shm_init(void)
594 {
595 	vaddr_t pool_start = 0;
596 	vaddr_t pool_end = 0;
597 
598 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
599 	if (!pool_start || !pool_end)
600 		panic("Can't find region for shmem pool");
601 
602 	if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end, SMALL_PAGE_SHIFT,
603 			 TEE_MM_POOL_NO_FLAGS))
604 		panic("Could not create shmem pool");
605 
606 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
607 	     pool_start, pool_end);
608 	return TEE_SUCCESS;
609 }
610 
611 /*
612  * Note: this variable is weak just to ease breaking its dependency chain
613  * when added to the unpaged area.
614  */
615 const struct mobj_ops mobj_ffa_ops
616 __weak __relrodata_unpaged("mobj_ffa_ops") = {
617 	.get_pa = ffa_get_pa,
618 	.get_phys_offs = ffa_get_phys_offs,
619 	.get_va = ffa_get_va,
620 	.get_mem_type = ffa_get_mem_type,
621 	.matches = ffa_matches,
622 	.free = ffa_inactivate,
623 	.get_cookie = ffa_get_cookie,
624 	.inc_map = ffa_inc_map,
625 	.dec_map = ffa_dec_map,
626 };
627 
628 preinit(mapped_shm_init);
629