xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision 21c96e485246348896690a8b6ff5a6496daca303)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/refcount.h>
11 #include <kernel/spinlock.h>
12 #include <kernel/thread_spmc.h>
13 #include <mm/mobj.h>
14 #include <sys/queue.h>
15 
16 struct mobj_ffa {
17 	struct mobj mobj;
18 	SLIST_ENTRY(mobj_ffa) link;
19 	uint64_t cookie;
20 	tee_mm_entry_t *mm;
21 	struct refcount mapcount;
22 	uint16_t page_offset;
23 #ifdef CFG_CORE_SEL1_SPMC
24 	bool registered_by_cookie;
25 	bool unregistered_by_cookie;
26 #endif
27 	paddr_t pages[];
28 };
29 
30 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
31 
32 #ifdef CFG_CORE_SEL1_SPMC
33 #define NUM_SHMS	64
34 static bitstr_t bit_decl(shm_bits, NUM_SHMS);
35 #endif
36 
37 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
38 static struct mobj_ffa_head shm_inactive_head =
39 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
40 
41 static unsigned int shm_lock = SPINLOCK_UNLOCK;
42 
43 const struct mobj_ops mobj_ffa_ops;
44 
45 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
46 {
47 	assert(mobj->ops == &mobj_ffa_ops);
48 	return container_of(mobj, struct mobj_ffa, mobj);
49 }
50 
51 static size_t shm_size(size_t num_pages)
52 {
53 	size_t s = 0;
54 
55 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
56 		return 0;
57 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
58 		return 0;
59 	return s;
60 }
61 
62 static struct mobj_ffa *ffa_new(unsigned int num_pages)
63 {
64 	struct mobj_ffa *mf = NULL;
65 	size_t s = 0;
66 
67 	if (!num_pages)
68 		return NULL;
69 
70 	s = shm_size(num_pages);
71 	if (!s)
72 		return NULL;
73 	mf = calloc(1, s);
74 	if (!mf)
75 		return NULL;
76 
77 	mf->mobj.ops = &mobj_ffa_ops;
78 	mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
79 	mf->mobj.phys_granule = SMALL_PAGE_SIZE;
80 	refcount_set(&mf->mobj.refc, 0);
81 
82 	return mf;
83 }
84 
85 #ifdef CFG_CORE_SEL1_SPMC
86 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages)
87 {
88 	struct mobj_ffa *mf = NULL;
89 	uint32_t exceptions = 0;
90 	int i = 0;
91 
92 	mf = ffa_new(num_pages);
93 	if (!mf)
94 		return NULL;
95 
96 	exceptions = cpu_spin_lock_xsave(&shm_lock);
97 	bit_ffc(shm_bits, NUM_SHMS, &i);
98 	if (i != -1) {
99 		bit_set(shm_bits, i);
100 		/*
101 		 * Setting bit 44 to use one of the upper 32 bits too for
102 		 * testing.
103 		 */
104 		mf->cookie = i | BIT64(44);
105 	}
106 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
107 
108 	if (i == -1) {
109 		free(mf);
110 		return NULL;
111 	}
112 
113 	return mf;
114 }
115 #endif /*CFG_CORE_SEL1_SPMC*/
116 
117 static size_t get_page_count(struct mobj_ffa *mf)
118 {
119 	return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
120 }
121 
122 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
123 {
124 	return mf->cookie == cookie;
125 }
126 
127 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
128 {
129 	return mf == (void *)(vaddr_t)ptr;
130 }
131 
132 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
133 				      bool (*cmp_func)(struct mobj_ffa *mf,
134 						       uint64_t val),
135 				      uint64_t val)
136 {
137 	struct mobj_ffa *mf = SLIST_FIRST(head);
138 	struct mobj_ffa *p = NULL;
139 
140 	if (!mf)
141 		return NULL;
142 
143 	if (cmp_func(mf, val)) {
144 		SLIST_REMOVE_HEAD(head, link);
145 		return mf;
146 	}
147 
148 	while (true) {
149 		p = SLIST_NEXT(mf, link);
150 		if (!p)
151 			return NULL;
152 		if (cmp_func(p, val)) {
153 			SLIST_REMOVE_AFTER(mf, link);
154 			return p;
155 		}
156 		mf = p;
157 	}
158 }
159 
160 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
161 				     bool (*cmp_func)(struct mobj_ffa *mf,
162 						      uint64_t val),
163 				     uint64_t val)
164 {
165 	struct mobj_ffa *mf = NULL;
166 
167 	SLIST_FOREACH(mf, head, link)
168 		if (cmp_func(mf, val))
169 			return mf;
170 
171 	return NULL;
172 }
173 
174 #ifdef CFG_CORE_SEL1_SPMC
175 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
176 {
177 	int i = mf->cookie & ~BIT64(44);
178 	uint32_t exceptions = 0;
179 
180 	assert(i >= 0 && i < NUM_SHMS);
181 
182 	exceptions = cpu_spin_lock_xsave(&shm_lock);
183 	assert(bit_test(shm_bits, i));
184 	bit_clear(shm_bits, i);
185 	assert(!mf->mm);
186 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
187 
188 	free(mf);
189 }
190 #endif /*CFG_CORE_SEL1_SPMC*/
191 
192 #ifdef CFG_CORE_SEL2_SPMC
193 struct mobj_ffa *mobj_ffa_sel2_spmc_new(uint64_t cookie,
194 					unsigned int num_pages)
195 {
196 	struct mobj_ffa *mf = NULL;
197 
198 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
199 	mf = ffa_new(num_pages);
200 	if (mf)
201 		mf->cookie = cookie;
202 	return mf;
203 }
204 
205 void mobj_ffa_sel2_spmc_delete(struct mobj_ffa *mf)
206 {
207 	free(mf);
208 }
209 #endif /*CFG_CORE_SEL2_SPMC*/
210 
211 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
212 				 paddr_t pa, unsigned int num_pages)
213 {
214 	unsigned int n = 0;
215 	size_t tot_page_count = get_page_count(mf);
216 
217 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
218 		return TEE_ERROR_BAD_PARAMETERS;
219 
220 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
221 		return TEE_ERROR_BAD_PARAMETERS;
222 
223 	for (n = 0; n < num_pages; n++)
224 		mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
225 
226 	(*idx) += n;
227 	return TEE_SUCCESS;
228 }
229 
230 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
231 {
232 	return mf->cookie;
233 }
234 
235 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
236 {
237 	uint32_t exceptions = 0;
238 
239 	exceptions = cpu_spin_lock_xsave(&shm_lock);
240 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
241 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
242 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
243 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
244 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
245 
246 	return mf->cookie;
247 }
248 
249 static void unmap_helper(struct mobj_ffa *mf)
250 {
251 	if (mf->mm) {
252 		core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
253 				     get_page_count(mf));
254 		tee_mm_free(mf->mm);
255 		mf->mm = NULL;
256 	}
257 }
258 
259 #ifdef CFG_CORE_SEL1_SPMC
260 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
261 {
262 	TEE_Result res = TEE_SUCCESS;
263 	struct mobj_ffa *mf = NULL;
264 	uint32_t exceptions = 0;
265 
266 	exceptions = cpu_spin_lock_xsave(&shm_lock);
267 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
268 	/*
269 	 * If the mobj is found here it's still active and cannot be
270 	 * reclaimed.
271 	 */
272 	if (mf) {
273 		DMSG("cookie %#"PRIx64" busy refc %u",
274 		     cookie, refcount_val(&mf->mobj.refc));
275 		res = TEE_ERROR_BUSY;
276 		goto out;
277 	}
278 
279 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
280 	if (!mf) {
281 		res = TEE_ERROR_ITEM_NOT_FOUND;
282 		goto out;
283 	}
284 	/*
285 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
286 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
287 	 */
288 	if (mf->registered_by_cookie && !mf->unregistered_by_cookie) {
289 		DMSG("cookie %#"PRIx64" busy", cookie);
290 		res = TEE_ERROR_BUSY;
291 		goto out;
292 	}
293 
294 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
295 		panic();
296 	res = TEE_SUCCESS;
297 out:
298 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
299 	if (!res)
300 		mobj_ffa_sel1_spmc_delete(mf);
301 	return res;
302 }
303 #endif /*CFG_CORE_SEL1_SPMC*/
304 
305 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
306 {
307 	TEE_Result res = TEE_SUCCESS;
308 	struct mobj_ffa *mf = NULL;
309 	uint32_t exceptions = 0;
310 
311 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
312 	exceptions = cpu_spin_lock_xsave(&shm_lock);
313 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
314 	/*
315 	 * If the mobj is found here it's still active and cannot be
316 	 * unregistered.
317 	 */
318 	if (mf) {
319 		DMSG("cookie %#"PRIx64" busy refc %u",
320 		     cookie, refcount_val(&mf->mobj.refc));
321 		res = TEE_ERROR_BUSY;
322 		goto out;
323 	}
324 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
325 	/*
326 	 * If the mobj isn't found or if it already has been unregistered.
327 	 */
328 #ifdef CFG_CORE_SEL2_SPMC
329 	if (!mf) {
330 #else
331 	if (!mf || mf->unregistered_by_cookie) {
332 #endif
333 		res = TEE_ERROR_ITEM_NOT_FOUND;
334 		goto out;
335 	}
336 
337 #ifdef CFG_CORE_SEL2_SPMC
338 	mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
339 	mobj_ffa_sel2_spmc_delete(mf);
340 	thread_spmc_relinquish(cookie);
341 #else
342 	mf->unregistered_by_cookie = true;
343 #endif
344 	res = TEE_SUCCESS;
345 
346 out:
347 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
348 	return res;
349 }
350 
351 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
352 				    unsigned int internal_offs)
353 {
354 	struct mobj_ffa *mf = NULL;
355 	uint32_t exceptions = 0;
356 
357 	if (internal_offs >= SMALL_PAGE_SIZE)
358 		return NULL;
359 	exceptions = cpu_spin_lock_xsave(&shm_lock);
360 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
361 	if (mf) {
362 		if (mf->page_offset == internal_offs) {
363 			if (!refcount_inc(&mf->mobj.refc)) {
364 				/*
365 				 * If refcount is 0 some other thread has
366 				 * called mobj_put() on this reached 0 and
367 				 * before ffa_inactivate() got the lock we
368 				 * found it. Let's reinitialize it.
369 				 */
370 				refcount_set(&mf->mobj.refc, 1);
371 			}
372 			DMSG("cookie %#"PRIx64" active: refc %d",
373 			     cookie, refcount_val(&mf->mobj.refc));
374 		} else {
375 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
376 			     cookie, mf->page_offset, internal_offs);
377 			mf = NULL;
378 		}
379 	} else {
380 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
381 #if defined(CFG_CORE_SEL2_SPMC)
382 		/* Try to retrieve it from the SPM at S-EL2 */
383 		if (mf) {
384 			DMSG("cookie %#"PRIx64" resurrecting", cookie);
385 		} else {
386 			EMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
387 			     cookie);
388 			mf = thread_spmc_populate_mobj_from_rx(cookie);
389 		}
390 #endif
391 		if (mf) {
392 #if defined(CFG_CORE_SEL1_SPMC)
393 			mf->unregistered_by_cookie = false;
394 			mf->registered_by_cookie = true;
395 #endif
396 			assert(refcount_val(&mf->mobj.refc) == 0);
397 			refcount_set(&mf->mobj.refc, 1);
398 			refcount_set(&mf->mapcount, 0);
399 
400 			/*
401 			 * mf->page_offset is offset into the first page.
402 			 * This offset is assigned from the internal_offs
403 			 * parameter to this function.
404 			 *
405 			 * While a mobj_ffa is active (ref_count > 0) this
406 			 * will not change, but when being pushed to the
407 			 * inactive list it can be changed again.
408 			 *
409 			 * So below we're backing out the old
410 			 * mf->page_offset and then assigning a new from
411 			 * internal_offset.
412 			 */
413 			mf->mobj.size += mf->page_offset;
414 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
415 			mf->mobj.size -= internal_offs;
416 			mf->page_offset = internal_offs;
417 
418 			SLIST_INSERT_HEAD(&shm_head, mf, link);
419 		}
420 	}
421 
422 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
423 
424 	if (!mf) {
425 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
426 		     cookie, internal_offs);
427 		return NULL;
428 	}
429 	return &mf->mobj;
430 }
431 
432 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
433 			     size_t granule, paddr_t *pa)
434 {
435 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
436 	size_t full_offset = 0;
437 	paddr_t p = 0;
438 
439 	if (!pa)
440 		return TEE_ERROR_GENERIC;
441 
442 	if (offset >= mobj->size)
443 		return TEE_ERROR_GENERIC;
444 
445 	full_offset = offset + mf->page_offset;
446 	switch (granule) {
447 	case 0:
448 		p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
449 		    (full_offset & SMALL_PAGE_MASK);
450 		break;
451 	case SMALL_PAGE_SIZE:
452 		p = mf->pages[full_offset / SMALL_PAGE_SIZE];
453 		break;
454 	default:
455 		return TEE_ERROR_GENERIC;
456 	}
457 	*pa = p;
458 
459 	return TEE_SUCCESS;
460 }
461 DECLARE_KEEP_PAGER(ffa_get_pa);
462 
463 static size_t ffa_get_phys_offs(struct mobj *mobj,
464 				size_t granule __maybe_unused)
465 {
466 	assert(granule >= mobj->phys_granule);
467 
468 	return to_mobj_ffa(mobj)->page_offset;
469 }
470 
471 static void *ffa_get_va(struct mobj *mobj, size_t offset)
472 {
473 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
474 
475 	if (!mf->mm || offset >= mobj->size)
476 		return NULL;
477 
478 	return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
479 }
480 
481 static void ffa_inactivate(struct mobj *mobj)
482 {
483 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
484 	uint32_t exceptions = 0;
485 
486 	exceptions = cpu_spin_lock_xsave(&shm_lock);
487 	/*
488 	 * If refcount isn't 0 some other thread has found this mobj in
489 	 * shm_head after the mobj_put() that put us here and before we got
490 	 * the lock.
491 	 */
492 	if (refcount_val(&mobj->refc)) {
493 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
494 		goto out;
495 	}
496 
497 	DMSG("cookie %#"PRIx64, mf->cookie);
498 	if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf))
499 		panic();
500 	unmap_helper(mf);
501 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
502 out:
503 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
504 }
505 
506 static TEE_Result ffa_get_cattr(struct mobj *mobj __unused, uint32_t *cattr)
507 {
508 	if (!cattr)
509 		return TEE_ERROR_GENERIC;
510 
511 	*cattr = TEE_MATTR_CACHE_CACHED;
512 
513 	return TEE_SUCCESS;
514 }
515 
516 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
517 {
518 	assert(mobj->ops == &mobj_ffa_ops);
519 
520 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
521 }
522 
523 static uint64_t ffa_get_cookie(struct mobj *mobj)
524 {
525 	return to_mobj_ffa(mobj)->cookie;
526 }
527 
528 static TEE_Result ffa_inc_map(struct mobj *mobj)
529 {
530 	TEE_Result res = TEE_SUCCESS;
531 	uint32_t exceptions = 0;
532 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
533 
534 	if (refcount_inc(&mf->mapcount))
535 		return TEE_SUCCESS;
536 
537 	exceptions = cpu_spin_lock_xsave(&shm_lock);
538 
539 	if (refcount_val(&mf->mapcount))
540 		goto out;
541 
542 	mf->mm = tee_mm_alloc(&tee_mm_shm, mf->mobj.size);
543 	if (!mf->mm) {
544 		res = TEE_ERROR_OUT_OF_MEMORY;
545 		goto out;
546 	}
547 
548 	res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
549 				 get_page_count(mf), MEM_AREA_NSEC_SHM);
550 	if (res) {
551 		tee_mm_free(mf->mm);
552 		mf->mm = NULL;
553 		goto out;
554 	}
555 
556 	refcount_set(&mf->mapcount, 1);
557 out:
558 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
559 
560 	return res;
561 }
562 
563 static TEE_Result ffa_dec_map(struct mobj *mobj)
564 {
565 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
566 	uint32_t exceptions = 0;
567 
568 	if (!refcount_dec(&mf->mapcount))
569 		return TEE_SUCCESS;
570 
571 	exceptions = cpu_spin_lock_xsave(&shm_lock);
572 	unmap_helper(mf);
573 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
574 
575 	return TEE_SUCCESS;
576 }
577 
578 static TEE_Result mapped_shm_init(void)
579 {
580 	vaddr_t pool_start = 0;
581 	vaddr_t pool_end = 0;
582 
583 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
584 	if (!pool_start || !pool_end)
585 		panic("Can't find region for shmem pool");
586 
587 	if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end, SMALL_PAGE_SHIFT,
588 			 TEE_MM_POOL_NO_FLAGS))
589 		panic("Could not create shmem pool");
590 
591 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
592 	     pool_start, pool_end);
593 	return TEE_SUCCESS;
594 }
595 
596 /*
597  * Note: this variable is weak just to ease breaking its dependency chain
598  * when added to the unpaged area.
599  */
600 const struct mobj_ops mobj_ffa_ops __weak __rodata_unpaged("mobj_ffa_ops") = {
601 	.get_pa = ffa_get_pa,
602 	.get_phys_offs = ffa_get_phys_offs,
603 	.get_va = ffa_get_va,
604 	.get_cattr = ffa_get_cattr,
605 	.matches = ffa_matches,
606 	.free = ffa_inactivate,
607 	.get_cookie = ffa_get_cookie,
608 	.inc_map = ffa_inc_map,
609 	.dec_map = ffa_dec_map,
610 };
611 
612 preinit(mapped_shm_init);
613