xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision 7901324d9530594155991c8b283023d567741cc7)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/refcount.h>
11 #include <kernel/spinlock.h>
12 #include <mm/mobj.h>
13 #include <sys/queue.h>
14 
15 struct mobj_ffa {
16 	struct mobj mobj;
17 	SLIST_ENTRY(mobj_ffa) link;
18 	uint64_t cookie;
19 	tee_mm_entry_t *mm;
20 	struct refcount mapcount;
21 	uint16_t page_offset;
22 #ifdef CFG_CORE_SEL1_SPMC
23 	bool registered_by_cookie;
24 	bool unregistered_by_cookie;
25 #endif
26 	paddr_t pages[];
27 };
28 
29 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
30 
31 #ifdef CFG_CORE_SEL1_SPMC
32 #define NUM_SHMS	64
33 static bitstr_t bit_decl(shm_bits, NUM_SHMS);
34 #endif
35 
36 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
37 static struct mobj_ffa_head shm_inactive_head =
38 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
39 
40 static unsigned int shm_lock = SPINLOCK_UNLOCK;
41 
42 const struct mobj_ops mobj_ffa_ops;
43 
44 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
45 {
46 	assert(mobj->ops == &mobj_ffa_ops);
47 	return container_of(mobj, struct mobj_ffa, mobj);
48 }
49 
50 static size_t shm_size(size_t num_pages)
51 {
52 	size_t s = 0;
53 
54 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
55 		return 0;
56 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
57 		return 0;
58 	return s;
59 }
60 
61 static struct mobj_ffa *ffa_new(unsigned int num_pages)
62 {
63 	struct mobj_ffa *mf = NULL;
64 	size_t s = 0;
65 
66 	if (!num_pages)
67 		return NULL;
68 
69 	s = shm_size(num_pages);
70 	if (!s)
71 		return NULL;
72 	mf = calloc(1, s);
73 	if (!mf)
74 		return NULL;
75 
76 	mf->mobj.ops = &mobj_ffa_ops;
77 	mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
78 	mf->mobj.phys_granule = SMALL_PAGE_SIZE;
79 	refcount_set(&mf->mobj.refc, 0);
80 
81 	return mf;
82 }
83 
84 #ifdef CFG_CORE_SEL1_SPMC
85 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages)
86 {
87 	struct mobj_ffa *mf = NULL;
88 	uint32_t exceptions = 0;
89 	int i = 0;
90 
91 	mf = ffa_new(num_pages);
92 	if (!mf)
93 		return NULL;
94 
95 	exceptions = cpu_spin_lock_xsave(&shm_lock);
96 	bit_ffc(shm_bits, NUM_SHMS, &i);
97 	if (i != -1) {
98 		bit_set(shm_bits, i);
99 		/*
100 		 * Setting bit 44 to use one of the upper 32 bits too for
101 		 * testing.
102 		 */
103 		mf->cookie = i | BIT64(44);
104 	}
105 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
106 
107 	if (i == -1) {
108 		free(mf);
109 		return NULL;
110 	}
111 
112 	return mf;
113 }
114 #endif /*CFG_CORE_SEL1_SPMC*/
115 
116 static size_t get_page_count(struct mobj_ffa *mf)
117 {
118 	return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
119 }
120 
121 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
122 {
123 	return mf->cookie == cookie;
124 }
125 
126 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
127 {
128 	return mf == (void *)(vaddr_t)ptr;
129 }
130 
131 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
132 				      bool (*cmp_func)(struct mobj_ffa *mf,
133 						       uint64_t val),
134 				      uint64_t val)
135 {
136 	struct mobj_ffa *mf = SLIST_FIRST(head);
137 	struct mobj_ffa *p = NULL;
138 
139 	if (!mf)
140 		return NULL;
141 
142 	if (cmp_func(mf, val)) {
143 		SLIST_REMOVE_HEAD(head, link);
144 		return mf;
145 	}
146 
147 	while (true) {
148 		p = SLIST_NEXT(mf, link);
149 		if (!p)
150 			return NULL;
151 		if (cmp_func(p, val)) {
152 			SLIST_REMOVE_AFTER(mf, link);
153 			return p;
154 		}
155 		mf = p;
156 	}
157 }
158 
159 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
160 				     bool (*cmp_func)(struct mobj_ffa *mf,
161 						      uint64_t val),
162 				     uint64_t val)
163 {
164 	struct mobj_ffa *mf = NULL;
165 
166 	SLIST_FOREACH(mf, head, link)
167 		if (cmp_func(mf, val))
168 			return mf;
169 
170 	return NULL;
171 }
172 
173 #ifdef CFG_CORE_SEL1_SPMC
174 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
175 {
176 	int i = mf->cookie & ~BIT64(44);
177 	uint32_t exceptions = 0;
178 
179 	assert(i >= 0 && i < NUM_SHMS);
180 
181 	exceptions = cpu_spin_lock_xsave(&shm_lock);
182 	assert(bit_test(shm_bits, i));
183 	bit_clear(shm_bits, i);
184 	assert(!mf->mm);
185 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
186 
187 	free(mf);
188 }
189 #endif /*CFG_CORE_SEL1_SPMC*/
190 
191 #ifdef CFG_CORE_SEL2_SPMC
192 struct mobj_ffa *mobj_ffa_sel2_spmc_new(uint64_t cookie,
193 					unsigned int num_pages)
194 {
195 	struct mobj_ffa *mf = NULL;
196 
197 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
198 	mf = ffa_new(num_pages);
199 	if (mf)
200 		mf->cookie = cookie;
201 	return mf;
202 }
203 
204 void mobj_ffa_sel2_spmc_delete(struct mobj_ffa *mf)
205 {
206 	free(mf);
207 }
208 #endif /*CFG_CORE_SEL2_SPMC*/
209 
210 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
211 				 paddr_t pa, unsigned int num_pages)
212 {
213 	unsigned int n = 0;
214 	size_t tot_page_count = get_page_count(mf);
215 
216 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
217 		return TEE_ERROR_BAD_PARAMETERS;
218 
219 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
220 		return TEE_ERROR_BAD_PARAMETERS;
221 
222 	for (n = 0; n < num_pages; n++)
223 		mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
224 
225 	(*idx) += n;
226 	return TEE_SUCCESS;
227 }
228 
229 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
230 {
231 	return mf->cookie;
232 }
233 
234 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
235 {
236 	uint32_t exceptions = 0;
237 
238 	exceptions = cpu_spin_lock_xsave(&shm_lock);
239 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
240 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
241 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
242 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
243 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
244 
245 	return mf->cookie;
246 }
247 
248 static void unmap_helper(struct mobj_ffa *mf)
249 {
250 	if (mf->mm) {
251 		core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
252 				     get_page_count(mf));
253 		tee_mm_free(mf->mm);
254 		mf->mm = NULL;
255 	}
256 }
257 
258 #ifdef CFG_CORE_SEL1_SPMC
259 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
260 {
261 	TEE_Result res = TEE_SUCCESS;
262 	struct mobj_ffa *mf = NULL;
263 	uint32_t exceptions = 0;
264 
265 	exceptions = cpu_spin_lock_xsave(&shm_lock);
266 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
267 	/*
268 	 * If the mobj is found here it's still active and cannot be
269 	 * reclaimed.
270 	 */
271 	if (mf) {
272 		DMSG("cookie %#"PRIx64" busy refc %u",
273 		     cookie, refcount_val(&mf->mobj.refc));
274 		res = TEE_ERROR_BUSY;
275 		goto out;
276 	}
277 
278 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
279 	if (!mf) {
280 		res = TEE_ERROR_ITEM_NOT_FOUND;
281 		goto out;
282 	}
283 	/*
284 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
285 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
286 	 */
287 	if (mf->registered_by_cookie && !mf->unregistered_by_cookie) {
288 		DMSG("cookie %#"PRIx64" busy", cookie);
289 		res = TEE_ERROR_BUSY;
290 		goto out;
291 	}
292 
293 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
294 		panic();
295 	res = TEE_SUCCESS;
296 out:
297 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
298 	if (!res)
299 		mobj_ffa_sel1_spmc_delete(mf);
300 	return res;
301 }
302 #endif /*CFG_CORE_SEL1_SPMC*/
303 
304 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
305 {
306 	TEE_Result res = TEE_SUCCESS;
307 	struct mobj_ffa *mf = NULL;
308 	uint32_t exceptions = 0;
309 
310 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
311 	exceptions = cpu_spin_lock_xsave(&shm_lock);
312 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
313 	/*
314 	 * If the mobj is found here it's still active and cannot be
315 	 * unregistered.
316 	 */
317 	if (mf) {
318 		DMSG("cookie %#"PRIx64" busy refc %u",
319 		     cookie, refcount_val(&mf->mobj.refc));
320 		res = TEE_ERROR_BUSY;
321 		goto out;
322 	}
323 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
324 	/*
325 	 * If the mobj isn't found or if it already has been unregistered.
326 	 */
327 #ifdef CFG_CORE_SEL2_SPMC
328 	if (!mf) {
329 #else
330 	if (!mf || mf->unregistered_by_cookie) {
331 #endif
332 		res = TEE_ERROR_ITEM_NOT_FOUND;
333 		goto out;
334 	}
335 
336 #ifdef CFG_CORE_SEL2_SPMC
337 	mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
338 	mobj_ffa_sel2_spmc_delete(mf);
339 	thread_spmc_relinquish(cookie);
340 #else
341 	mf->unregistered_by_cookie = true;
342 #endif
343 	res = TEE_SUCCESS;
344 
345 out:
346 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
347 	return res;
348 }
349 
350 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
351 				    unsigned int internal_offs)
352 {
353 	struct mobj_ffa *mf = NULL;
354 	uint32_t exceptions = 0;
355 
356 	if (internal_offs >= SMALL_PAGE_SIZE)
357 		return NULL;
358 	exceptions = cpu_spin_lock_xsave(&shm_lock);
359 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
360 	if (mf) {
361 		if (mf->page_offset == internal_offs) {
362 			if (!refcount_inc(&mf->mobj.refc)) {
363 				/*
364 				 * If refcount is 0 some other thread has
365 				 * called mobj_put() on this reached 0 and
366 				 * before ffa_inactivate() got the lock we
367 				 * found it. Let's reinitialize it.
368 				 */
369 				refcount_set(&mf->mobj.refc, 1);
370 			}
371 			DMSG("cookie %#"PRIx64" active: refc %d",
372 			     cookie, refcount_val(&mf->mobj.refc));
373 		} else {
374 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
375 			     cookie, mf->page_offset, internal_offs);
376 			mf = NULL;
377 		}
378 	} else {
379 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
380 #if defined(CFG_CORE_SEL2_SPMC)
381 		/* Try to retrieve it from the SPM at S-EL2 */
382 		if (mf) {
383 			DMSG("cookie %#"PRIx64" resurrecting", cookie);
384 		} else {
385 			EMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
386 			     cookie);
387 			mf = thread_spmc_populate_mobj_from_rx(cookie);
388 		}
389 #endif
390 		if (mf) {
391 #if defined(CFG_CORE_SEL1_SPMC)
392 			mf->unregistered_by_cookie = false;
393 			mf->registered_by_cookie = true;
394 #endif
395 			assert(refcount_val(&mf->mobj.refc) == 0);
396 			refcount_set(&mf->mobj.refc, 1);
397 			refcount_set(&mf->mapcount, 0);
398 			mf->mobj.size += mf->page_offset;
399 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
400 			mf->mobj.size -= internal_offs;
401 			mf->page_offset = internal_offs;
402 			SLIST_INSERT_HEAD(&shm_head, mf, link);
403 		}
404 	}
405 
406 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
407 
408 	if (!mf) {
409 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
410 		     cookie, internal_offs);
411 		return NULL;
412 	}
413 	return &mf->mobj;
414 }
415 
416 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
417 			     size_t granule, paddr_t *pa)
418 {
419 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
420 	size_t full_offset = 0;
421 	paddr_t p = 0;
422 
423 	if (!pa)
424 		return TEE_ERROR_GENERIC;
425 
426 	if (offset >= mobj->size)
427 		return TEE_ERROR_GENERIC;
428 
429 	full_offset = offset + mf->page_offset;
430 	switch (granule) {
431 	case 0:
432 		p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
433 		    (full_offset & SMALL_PAGE_MASK);
434 		break;
435 	case SMALL_PAGE_SIZE:
436 		p = mf->pages[full_offset / SMALL_PAGE_SIZE];
437 		break;
438 	default:
439 		return TEE_ERROR_GENERIC;
440 	}
441 	*pa = p;
442 
443 	return TEE_SUCCESS;
444 }
445 DECLARE_KEEP_PAGER(ffa_get_pa);
446 
447 static size_t ffa_get_phys_offs(struct mobj *mobj,
448 				size_t granule __maybe_unused)
449 {
450 	assert(granule >= mobj->phys_granule);
451 
452 	return to_mobj_ffa(mobj)->page_offset;
453 }
454 
455 static void *ffa_get_va(struct mobj *mobj, size_t offset)
456 {
457 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
458 
459 	if (!mf->mm || offset >= mobj->size)
460 		return NULL;
461 
462 	return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
463 }
464 
465 static void ffa_inactivate(struct mobj *mobj)
466 {
467 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
468 	uint32_t exceptions = 0;
469 
470 	exceptions = cpu_spin_lock_xsave(&shm_lock);
471 	/*
472 	 * If refcount isn't 0 some other thread has found this mobj in
473 	 * shm_head after the mobj_put() that put us here and before we got
474 	 * the lock.
475 	 */
476 	if (refcount_val(&mobj->refc)) {
477 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
478 		goto out;
479 	}
480 
481 	DMSG("cookie %#"PRIx64, mf->cookie);
482 	if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf))
483 		panic();
484 	unmap_helper(mf);
485 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
486 out:
487 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
488 }
489 
490 static TEE_Result ffa_get_cattr(struct mobj *mobj __unused, uint32_t *cattr)
491 {
492 	if (!cattr)
493 		return TEE_ERROR_GENERIC;
494 
495 	*cattr = TEE_MATTR_CACHE_CACHED;
496 
497 	return TEE_SUCCESS;
498 }
499 
500 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
501 {
502 	assert(mobj->ops == &mobj_ffa_ops);
503 
504 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
505 }
506 
507 static uint64_t ffa_get_cookie(struct mobj *mobj)
508 {
509 	return to_mobj_ffa(mobj)->cookie;
510 }
511 
512 static TEE_Result ffa_inc_map(struct mobj *mobj)
513 {
514 	TEE_Result res = TEE_SUCCESS;
515 	uint32_t exceptions = 0;
516 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
517 
518 	if (refcount_inc(&mf->mapcount))
519 		return TEE_SUCCESS;
520 
521 	exceptions = cpu_spin_lock_xsave(&shm_lock);
522 
523 	if (refcount_val(&mf->mapcount))
524 		goto out;
525 
526 	mf->mm = tee_mm_alloc(&tee_mm_shm, mf->mobj.size);
527 	if (!mf->mm) {
528 		res = TEE_ERROR_OUT_OF_MEMORY;
529 		goto out;
530 	}
531 
532 	res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
533 				 get_page_count(mf), MEM_AREA_NSEC_SHM);
534 	if (res) {
535 		tee_mm_free(mf->mm);
536 		mf->mm = NULL;
537 		goto out;
538 	}
539 
540 	refcount_set(&mf->mapcount, 1);
541 out:
542 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
543 
544 	return res;
545 }
546 
547 static TEE_Result ffa_dec_map(struct mobj *mobj)
548 {
549 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
550 	uint32_t exceptions = 0;
551 
552 	if (!refcount_dec(&mf->mapcount))
553 		return TEE_SUCCESS;
554 
555 	exceptions = cpu_spin_lock_xsave(&shm_lock);
556 	unmap_helper(mf);
557 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
558 
559 	return TEE_SUCCESS;
560 }
561 
562 static TEE_Result mapped_shm_init(void)
563 {
564 	vaddr_t pool_start = 0;
565 	vaddr_t pool_end = 0;
566 
567 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
568 	if (!pool_start || !pool_end)
569 		panic("Can't find region for shmem pool");
570 
571 	if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end, SMALL_PAGE_SHIFT,
572 			 TEE_MM_POOL_NO_FLAGS))
573 		panic("Could not create shmem pool");
574 
575 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
576 	     pool_start, pool_end);
577 	return TEE_SUCCESS;
578 }
579 
580 /*
581  * Note: this variable is weak just to ease breaking its dependency chain
582  * when added to the unpaged area.
583  */
584 const struct mobj_ops mobj_ffa_ops __weak __rodata_unpaged("mobj_ffa_ops") = {
585 	.get_pa = ffa_get_pa,
586 	.get_phys_offs = ffa_get_phys_offs,
587 	.get_va = ffa_get_va,
588 	.get_cattr = ffa_get_cattr,
589 	.matches = ffa_matches,
590 	.free = ffa_inactivate,
591 	.get_cookie = ffa_get_cookie,
592 	.inc_map = ffa_inc_map,
593 	.dec_map = ffa_dec_map,
594 };
595 
596 service_init(mapped_shm_init);
597