xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision c1bdf4fc0c68e4555eaddbc7c1944d48d4637287)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/refcount.h>
11 #include <kernel/spinlock.h>
12 #include <mm/mobj.h>
13 #include <sys/queue.h>
14 
15 struct mobj_ffa {
16 	struct mobj mobj;
17 	SLIST_ENTRY(mobj_ffa) link;
18 	uint64_t cookie;
19 	tee_mm_entry_t *mm;
20 	struct refcount mapcount;
21 	uint16_t page_offset;
22 	bool registered_by_cookie;
23 	bool unregistered_by_cookie;
24 	paddr_t pages[];
25 };
26 
27 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
28 
29 #ifdef CFG_CORE_SEL1_SPMC
30 #define NUM_SHMS	64
31 static bitstr_t bit_decl(shm_bits, NUM_SHMS);
32 #endif
33 
34 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
35 static struct mobj_ffa_head shm_inactive_head =
36 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
37 
38 static unsigned int shm_lock = SPINLOCK_UNLOCK;
39 
40 static const struct mobj_ops mobj_ffa_ops __rodata_unpaged;
41 
42 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
43 {
44 	assert(mobj->ops == &mobj_ffa_ops);
45 	return container_of(mobj, struct mobj_ffa, mobj);
46 }
47 
48 static size_t shm_size(size_t num_pages)
49 {
50 	size_t s = 0;
51 
52 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
53 		return 0;
54 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
55 		return 0;
56 	return s;
57 }
58 
59 static struct mobj_ffa *ffa_new(unsigned int num_pages)
60 {
61 	struct mobj_ffa *mf = NULL;
62 	size_t s = 0;
63 
64 	if (!num_pages)
65 		return NULL;
66 
67 	s = shm_size(num_pages);
68 	if (!s)
69 		return NULL;
70 	mf = calloc(1, s);
71 	if (!mf)
72 		return NULL;
73 
74 	mf->mobj.ops = &mobj_ffa_ops;
75 	mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
76 	mf->mobj.phys_granule = SMALL_PAGE_SIZE;
77 	refcount_set(&mf->mobj.refc, 0);
78 
79 	return mf;
80 }
81 
82 #ifdef CFG_CORE_SEL1_SPMC
83 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages)
84 {
85 	struct mobj_ffa *mf = NULL;
86 	uint32_t exceptions = 0;
87 	int i = 0;
88 
89 	mf = ffa_new(num_pages);
90 	if (!mf)
91 		return NULL;
92 
93 	exceptions = cpu_spin_lock_xsave(&shm_lock);
94 	bit_ffc(shm_bits, NUM_SHMS, &i);
95 	if (i != -1) {
96 		bit_set(shm_bits, i);
97 		/*
98 		 * Setting bit 44 to use one of the upper 32 bits too for
99 		 * testing.
100 		 */
101 		mf->cookie = i | BIT64(44);
102 	}
103 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
104 
105 	if (i == -1) {
106 		free(mf);
107 		return NULL;
108 	}
109 
110 	return mf;
111 }
112 #endif /*CFG_CORE_SEL1_SPMC*/
113 
114 static size_t get_page_count(struct mobj_ffa *mf)
115 {
116 	return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
117 }
118 
119 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
120 {
121 	return mf->cookie == cookie;
122 }
123 
124 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
125 {
126 	return mf == (void *)(vaddr_t)ptr;
127 }
128 
129 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
130 				      bool (*cmp_func)(struct mobj_ffa *mf,
131 						       uint64_t val),
132 				      uint64_t val)
133 {
134 	struct mobj_ffa *mf = SLIST_FIRST(head);
135 	struct mobj_ffa *p = NULL;
136 
137 	if (!mf)
138 		return NULL;
139 
140 	if (cmp_func(mf, val)) {
141 		SLIST_REMOVE_HEAD(head, link);
142 		return mf;
143 	}
144 
145 	while (true) {
146 		p = SLIST_NEXT(mf, link);
147 		if (!p)
148 			return NULL;
149 		if (cmp_func(p, val)) {
150 			SLIST_REMOVE_AFTER(mf, link);
151 			return p;
152 		}
153 		mf = p;
154 	}
155 }
156 
157 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
158 				     bool (*cmp_func)(struct mobj_ffa *mf,
159 						      uint64_t val),
160 				     uint64_t val)
161 {
162 	struct mobj_ffa *mf = NULL;
163 
164 	SLIST_FOREACH(mf, head, link)
165 		if (cmp_func(mf, val))
166 			return mf;
167 
168 	return NULL;
169 }
170 
171 #ifdef CFG_CORE_SEL1_SPMC
172 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
173 {
174 	int i = mf->cookie & ~BIT64(44);
175 	uint32_t exceptions = 0;
176 
177 	assert(i >= 0 && i < NUM_SHMS);
178 
179 	exceptions = cpu_spin_lock_xsave(&shm_lock);
180 	assert(bit_test(shm_bits, i));
181 	bit_clear(shm_bits, i);
182 	assert(!mf->mm);
183 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
184 
185 	free(mf);
186 }
187 #endif /*CFG_CORE_SEL1_SPMC*/
188 
189 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
190 				 paddr_t pa, unsigned int num_pages)
191 {
192 	unsigned int n = 0;
193 	size_t tot_page_count = get_page_count(mf);
194 
195 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
196 		return TEE_ERROR_BAD_PARAMETERS;
197 
198 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
199 		return TEE_ERROR_BAD_PARAMETERS;
200 
201 	for (n = 0; n < num_pages; n++)
202 		mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
203 
204 	(*idx) += n;
205 	return TEE_SUCCESS;
206 }
207 
208 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
209 {
210 	return mf->cookie;
211 }
212 
213 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
214 {
215 	uint32_t exceptions = 0;
216 
217 	exceptions = cpu_spin_lock_xsave(&shm_lock);
218 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
219 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
220 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
221 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
222 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
223 
224 	return mf->cookie;
225 }
226 
227 static void unmap_helper(struct mobj_ffa *mf)
228 {
229 	if (mf->mm) {
230 		core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
231 				     get_page_count(mf));
232 		tee_mm_free(mf->mm);
233 		mf->mm = NULL;
234 	}
235 }
236 
237 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
238 {
239 	TEE_Result res = TEE_SUCCESS;
240 	struct mobj_ffa *mf = NULL;
241 	uint32_t exceptions = 0;
242 
243 	exceptions = cpu_spin_lock_xsave(&shm_lock);
244 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
245 	/*
246 	 * If the mobj is found here it's still active and cannot be
247 	 * unregistered.
248 	 */
249 	if (mf) {
250 		DMSG("cookie %#"PRIx64" busy refc %u",
251 		     cookie, refcount_val(&mf->mobj.refc));
252 		res = TEE_ERROR_BUSY;
253 		goto out;
254 	}
255 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
256 	/*
257 	 * If the mobj isn't found or if it already has been unregistered.
258 	 */
259 	if (!mf || mf->unregistered_by_cookie) {
260 		res = TEE_ERROR_ITEM_NOT_FOUND;
261 		goto out;
262 	}
263 	mf->unregistered_by_cookie = true;
264 	res = TEE_SUCCESS;
265 
266 out:
267 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
268 	return res;
269 }
270 
271 #ifdef CFG_CORE_SEL1_SPMC
272 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
273 {
274 	TEE_Result res = TEE_SUCCESS;
275 	struct mobj_ffa *mf = NULL;
276 	uint32_t exceptions = 0;
277 
278 	assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
279 	exceptions = cpu_spin_lock_xsave(&shm_lock);
280 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
281 	/*
282 	 * If the mobj is found here it's still active and cannot be
283 	 * reclaimed.
284 	 */
285 	if (mf) {
286 		DMSG("cookie %#"PRIx64" busy refc %u",
287 		     cookie, refcount_val(&mf->mobj.refc));
288 		res = TEE_ERROR_BUSY;
289 		goto out;
290 	}
291 
292 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
293 	if (!mf) {
294 		res = TEE_ERROR_ITEM_NOT_FOUND;
295 		goto out;
296 	}
297 	/*
298 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
299 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
300 	 */
301 	if (mf->registered_by_cookie && !mf->unregistered_by_cookie) {
302 		DMSG("cookie %#"PRIx64" busy", cookie);
303 		res = TEE_ERROR_BUSY;
304 		goto out;
305 	}
306 
307 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
308 		panic();
309 	res = TEE_SUCCESS;
310 out:
311 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
312 	if (!res)
313 		mobj_ffa_sel1_spmc_delete(mf);
314 	return res;
315 }
316 #endif /*CFG_CORE_SEL1_SPMC*/
317 
318 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie, unsigned int internal_offs)
319 {
320 	struct mobj_ffa *mf = NULL;
321 	uint32_t exceptions = 0;
322 
323 	if (internal_offs >= SMALL_PAGE_SIZE)
324 		return NULL;
325 
326 	exceptions = cpu_spin_lock_xsave(&shm_lock);
327 
328 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
329 	if (mf) {
330 		if (mf->page_offset == internal_offs) {
331 			if (!refcount_inc(&mf->mobj.refc)) {
332 				/*
333 				 * If refcount is 0 some other thread has
334 				 * called mobj_put() on this reached 0 and
335 				 * before ffa_inactivate() got the lock we
336 				 * found it. Let's reinitialize it.
337 				 */
338 				refcount_set(&mf->mobj.refc, 1);
339 			}
340 			DMSG("cookie %#"PRIx64" active: refc %d",
341 			     cookie, refcount_val(&mf->mobj.refc));
342 		} else {
343 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
344 			     cookie, mf->page_offset, internal_offs);
345 			mf = NULL;
346 		}
347 	} else {
348 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
349 		if (mf) {
350 			mf->unregistered_by_cookie = false;
351 			mf->registered_by_cookie = true;
352 			assert(refcount_val(&mf->mobj.refc) == 0);
353 			refcount_set(&mf->mobj.refc, 1);
354 			refcount_set(&mf->mapcount, 0);
355 			mf->mobj.size += mf->page_offset;
356 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
357 			mf->mobj.size -= internal_offs;
358 			mf->page_offset = internal_offs;
359 			SLIST_INSERT_HEAD(&shm_head, mf, link);
360 		}
361 	}
362 
363 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
364 
365 	if (!mf) {
366 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
367 		     cookie, internal_offs);
368 		return NULL;
369 	}
370 
371 	return &mf->mobj;
372 }
373 
374 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
375 			     size_t granule, paddr_t *pa)
376 {
377 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
378 	size_t full_offset = 0;
379 	paddr_t p = 0;
380 
381 	if (!pa)
382 		return TEE_ERROR_GENERIC;
383 
384 	if (offset >= mobj->size)
385 		return TEE_ERROR_GENERIC;
386 
387 	full_offset = offset + mf->page_offset;
388 	switch (granule) {
389 	case 0:
390 		p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
391 		    (full_offset & SMALL_PAGE_MASK);
392 		break;
393 	case SMALL_PAGE_SIZE:
394 		p = mf->pages[full_offset / SMALL_PAGE_SIZE];
395 		break;
396 	default:
397 		return TEE_ERROR_GENERIC;
398 	}
399 	*pa = p;
400 
401 	return TEE_SUCCESS;
402 }
403 DECLARE_KEEP_PAGER(ffa_get_pa);
404 
405 static size_t ffa_get_phys_offs(struct mobj *mobj,
406 				size_t granule __maybe_unused)
407 {
408 	assert(granule >= mobj->phys_granule);
409 
410 	return to_mobj_ffa(mobj)->page_offset;
411 }
412 
413 static void *ffa_get_va(struct mobj *mobj, size_t offset)
414 {
415 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
416 
417 	if (!mf->mm || offset >= mobj->size)
418 		return NULL;
419 
420 	return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
421 }
422 
423 static void ffa_inactivate(struct mobj *mobj)
424 {
425 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
426 	uint32_t exceptions = 0;
427 
428 	exceptions = cpu_spin_lock_xsave(&shm_lock);
429 	/*
430 	 * If refcount isn't 0 some other thread has found this mobj in
431 	 * shm_head after the mobj_put() that put us here and before we got
432 	 * the lock.
433 	 */
434 	if (refcount_val(&mobj->refc)) {
435 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
436 		goto out;
437 	}
438 
439 	DMSG("cookie %#"PRIx64, mf->cookie);
440 	if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf))
441 		panic();
442 	unmap_helper(mf);
443 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
444 out:
445 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
446 }
447 
448 static TEE_Result ffa_get_cattr(struct mobj *mobj __unused, uint32_t *cattr)
449 {
450 	if (!cattr)
451 		return TEE_ERROR_GENERIC;
452 
453 	*cattr = TEE_MATTR_CACHE_CACHED;
454 
455 	return TEE_SUCCESS;
456 }
457 
458 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
459 {
460 	assert(mobj->ops == &mobj_ffa_ops);
461 
462 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
463 }
464 
465 static uint64_t ffa_get_cookie(struct mobj *mobj)
466 {
467 	return to_mobj_ffa(mobj)->cookie;
468 }
469 
470 static TEE_Result ffa_inc_map(struct mobj *mobj)
471 {
472 	TEE_Result res = TEE_SUCCESS;
473 	uint32_t exceptions = 0;
474 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
475 
476 	if (refcount_inc(&mf->mapcount))
477 		return TEE_SUCCESS;
478 
479 	exceptions = cpu_spin_lock_xsave(&shm_lock);
480 
481 	if (refcount_val(&mf->mapcount))
482 		goto out;
483 
484 	mf->mm = tee_mm_alloc(&tee_mm_shm, mf->mobj.size);
485 	if (!mf->mm) {
486 		res = TEE_ERROR_OUT_OF_MEMORY;
487 		goto out;
488 	}
489 
490 	res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
491 				 get_page_count(mf), MEM_AREA_NSEC_SHM);
492 	if (res) {
493 		tee_mm_free(mf->mm);
494 		mf->mm = NULL;
495 		goto out;
496 	}
497 
498 	refcount_set(&mf->mapcount, 1);
499 out:
500 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
501 
502 	return res;
503 }
504 
505 static TEE_Result ffa_dec_map(struct mobj *mobj)
506 {
507 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
508 	uint32_t exceptions = 0;
509 
510 	if (!refcount_dec(&mf->mapcount))
511 		return TEE_SUCCESS;
512 
513 	exceptions = cpu_spin_lock_xsave(&shm_lock);
514 	unmap_helper(mf);
515 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
516 
517 	return TEE_SUCCESS;
518 }
519 
520 static TEE_Result mapped_shm_init(void)
521 {
522 	vaddr_t pool_start = 0;
523 	vaddr_t pool_end = 0;
524 
525 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
526 	if (!pool_start || !pool_end)
527 		panic("Can't find region for shmem pool");
528 
529 	if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end, SMALL_PAGE_SHIFT,
530 			 TEE_MM_POOL_NO_FLAGS))
531 		panic("Could not create shmem pool");
532 
533 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
534 	     pool_start, pool_end);
535 	return TEE_SUCCESS;
536 }
537 
538 static const struct mobj_ops mobj_ffa_ops __rodata_unpaged = {
539 	.get_pa = ffa_get_pa,
540 	.get_phys_offs = ffa_get_phys_offs,
541 	.get_va = ffa_get_va,
542 	.get_cattr = ffa_get_cattr,
543 	.matches = ffa_matches,
544 	.free = ffa_inactivate,
545 	.get_cookie = ffa_get_cookie,
546 	.inc_map = ffa_inc_map,
547 	.dec_map = ffa_dec_map,
548 };
549 
550 service_init(mapped_shm_init);
551