xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision 3688e132bde5eeeadfe5b53085c19d12a01ce433)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/refcount.h>
11 #include <kernel/spinlock.h>
12 #include <mm/mobj.h>
13 #include <sys/queue.h>
14 
15 struct mobj_ffa {
16 	struct mobj mobj;
17 	SLIST_ENTRY(mobj_ffa) link;
18 	uint64_t cookie;
19 	tee_mm_entry_t *mm;
20 	struct refcount mapcount;
21 	uint16_t page_offset;
22 	bool registered_by_cookie;
23 	bool unregistered_by_cookie;
24 	paddr_t pages[];
25 };
26 
27 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
28 
29 #ifdef CFG_CORE_SEL1_SPMC
30 #define NUM_SHMS	64
31 static bitstr_t bit_decl(shm_bits, NUM_SHMS);
32 #endif
33 
34 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
35 static struct mobj_ffa_head shm_inactive_head =
36 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
37 
38 static unsigned int shm_lock = SPINLOCK_UNLOCK;
39 
40 static const struct mobj_ops mobj_ffa_ops __rodata_unpaged;
41 
42 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
43 {
44 	assert(mobj->ops == &mobj_ffa_ops);
45 	return container_of(mobj, struct mobj_ffa, mobj);
46 }
47 
48 static struct mobj_ffa *to_mobj_ffa_may_fail(struct mobj *mobj)
49 {
50 	if (mobj && mobj->ops != &mobj_ffa_ops)
51 		return NULL;
52 
53 	return container_of(mobj, struct mobj_ffa, mobj);
54 }
55 
56 static size_t shm_size(size_t num_pages)
57 {
58 	size_t s = 0;
59 
60 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
61 		return 0;
62 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
63 		return 0;
64 	return s;
65 }
66 
67 static struct mobj_ffa *ffa_new(unsigned int num_pages)
68 {
69 	struct mobj_ffa *mf = NULL;
70 	size_t s = 0;
71 
72 	if (!num_pages)
73 		return NULL;
74 
75 	s = shm_size(num_pages);
76 	if (!s)
77 		return NULL;
78 	mf = calloc(1, s);
79 	if (!mf)
80 		return NULL;
81 
82 	mf->mobj.ops = &mobj_ffa_ops;
83 	mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
84 	mf->mobj.phys_granule = SMALL_PAGE_SIZE;
85 	refcount_set(&mf->mobj.refc, 0);
86 
87 	return mf;
88 }
89 
90 #ifdef CFG_CORE_SEL1_SPMC
91 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages)
92 {
93 	struct mobj_ffa *mf = NULL;
94 	uint32_t exceptions = 0;
95 	int i = 0;
96 
97 	mf = ffa_new(num_pages);
98 	if (!mf)
99 		return NULL;
100 
101 	exceptions = cpu_spin_lock_xsave(&shm_lock);
102 	bit_ffc(shm_bits, NUM_SHMS, &i);
103 	if (i != -1) {
104 		bit_set(shm_bits, i);
105 		/*
106 		 * + 1 to avoid a cookie value 0, setting bit 44 to use one
107 		 * of the upper 32 bits too for testing.
108 		 */
109 		mf->cookie = (i + 1) | BIT64(44);
110 	}
111 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
112 
113 	if (i == -1) {
114 		free(mf);
115 		return NULL;
116 	}
117 
118 	return mf;
119 }
120 #endif /*CFG_CORE_SEL1_SPMC*/
121 
122 static size_t get_page_count(struct mobj_ffa *mf)
123 {
124 	return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
125 }
126 
127 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
128 {
129 	return mf->cookie == cookie;
130 }
131 
132 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
133 {
134 	return mf == (void *)(vaddr_t)ptr;
135 }
136 
137 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
138 				      bool (*cmp_func)(struct mobj_ffa *mf,
139 						       uint64_t val),
140 				      uint64_t val)
141 {
142 	struct mobj_ffa *mf = SLIST_FIRST(head);
143 	struct mobj_ffa *p = NULL;
144 
145 	if (!mf)
146 		return NULL;
147 
148 	if (cmp_func(mf, val)) {
149 		SLIST_REMOVE_HEAD(head, link);
150 		return mf;
151 	}
152 
153 	while (true) {
154 		p = SLIST_NEXT(mf, link);
155 		if (!p)
156 			return NULL;
157 		if (cmp_func(p, val)) {
158 			SLIST_REMOVE_AFTER(mf, link);
159 			return p;
160 		}
161 		mf = p;
162 	}
163 }
164 
165 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
166 				     bool (*cmp_func)(struct mobj_ffa *mf,
167 						      uint64_t val),
168 				     uint64_t val)
169 {
170 	struct mobj_ffa *mf = NULL;
171 
172 	SLIST_FOREACH(mf, head, link)
173 		if (cmp_func(mf, val))
174 			return mf;
175 
176 	return NULL;
177 }
178 
179 #ifdef CFG_CORE_SEL1_SPMC
180 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
181 {
182 	int i = (mf->cookie - 1) & ~BIT64(44);
183 	uint32_t exceptions = 0;
184 
185 	assert(i >= 0 && i < NUM_SHMS);
186 
187 	exceptions = cpu_spin_lock_xsave(&shm_lock);
188 	assert(bit_test(shm_bits, i));
189 	bit_clear(shm_bits, i);
190 	assert(!mf->mm);
191 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
192 
193 	free(mf);
194 }
195 #endif /*CFG_CORE_SEL1_SPMC*/
196 
197 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
198 				 paddr_t pa, unsigned int num_pages)
199 {
200 	unsigned int n = 0;
201 	size_t tot_page_count = get_page_count(mf);
202 
203 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
204 		return TEE_ERROR_BAD_PARAMETERS;
205 
206 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
207 		return TEE_ERROR_BAD_PARAMETERS;
208 
209 	for (n = 0; n < num_pages; n++)
210 		mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
211 
212 	(*idx) += n;
213 	return TEE_SUCCESS;
214 }
215 
216 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
217 {
218 	return mf->cookie;
219 }
220 
221 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
222 {
223 	uint32_t exceptions = 0;
224 
225 	exceptions = cpu_spin_lock_xsave(&shm_lock);
226 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
227 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
228 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
229 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
230 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
231 
232 	return mf->cookie;
233 }
234 
235 static void unmap_helper(struct mobj_ffa *mf)
236 {
237 	if (mf->mm) {
238 		core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
239 				     get_page_count(mf));
240 		tee_mm_free(mf->mm);
241 		mf->mm = NULL;
242 	}
243 }
244 
245 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
246 {
247 	TEE_Result res = TEE_SUCCESS;
248 	struct mobj_ffa *mf = NULL;
249 	uint32_t exceptions = 0;
250 
251 	exceptions = cpu_spin_lock_xsave(&shm_lock);
252 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
253 	/*
254 	 * If the mobj is found here it's still active and cannot be
255 	 * unregistered.
256 	 */
257 	if (mf) {
258 		DMSG("cookie %#"PRIx64" busy refc %u",
259 		     cookie, refcount_val(&mf->mobj.refc));
260 		res = TEE_ERROR_BUSY;
261 		goto out;
262 	}
263 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
264 	/*
265 	 * If the mobj isn't found or if it already has been unregistered.
266 	 */
267 	if (!mf || mf->unregistered_by_cookie) {
268 		res = TEE_ERROR_ITEM_NOT_FOUND;
269 		goto out;
270 	}
271 	mf->unregistered_by_cookie = true;
272 	res = TEE_SUCCESS;
273 
274 out:
275 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
276 	return res;
277 }
278 
279 #ifdef CFG_CORE_SEL1_SPMC
280 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
281 {
282 	TEE_Result res = TEE_SUCCESS;
283 	struct mobj_ffa *mf = NULL;
284 	uint32_t exceptions = 0;
285 
286 	exceptions = cpu_spin_lock_xsave(&shm_lock);
287 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
288 	/*
289 	 * If the mobj is found here it's still active and cannot be
290 	 * reclaimed.
291 	 */
292 	if (mf) {
293 		DMSG("cookie %#"PRIx64" busy refc %u",
294 		     cookie, refcount_val(&mf->mobj.refc));
295 		res = TEE_ERROR_BUSY;
296 		goto out;
297 	}
298 
299 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
300 	if (!mf) {
301 		res = TEE_ERROR_ITEM_NOT_FOUND;
302 		goto out;
303 	}
304 	/*
305 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
306 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
307 	 */
308 	if (mf->registered_by_cookie && !mf->unregistered_by_cookie) {
309 		DMSG("cookie %#"PRIx64" busy", cookie);
310 		res = TEE_ERROR_BUSY;
311 		goto out;
312 	}
313 
314 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
315 		panic();
316 	res = TEE_SUCCESS;
317 out:
318 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
319 	if (!res)
320 		mobj_ffa_sel1_spmc_delete(mf);
321 	return res;
322 }
323 #endif /*CFG_CORE_SEL1_SPMC*/
324 
325 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie, unsigned int internal_offs)
326 {
327 	struct mobj_ffa *mf = NULL;
328 	uint32_t exceptions = 0;
329 
330 	if (internal_offs >= SMALL_PAGE_SIZE)
331 		return NULL;
332 
333 	exceptions = cpu_spin_lock_xsave(&shm_lock);
334 
335 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
336 	if (mf) {
337 		if (mf->page_offset == internal_offs) {
338 			if (!refcount_inc(&mf->mobj.refc)) {
339 				/*
340 				 * If refcount is 0 some other thread has
341 				 * called mobj_put() on this reached 0 and
342 				 * before ffa_inactivate() got the lock we
343 				 * found it. Let's reinitialize it.
344 				 */
345 				refcount_set(&mf->mobj.refc, 1);
346 			}
347 			DMSG("cookie %#"PRIx64" active: refc %d",
348 			     cookie, refcount_val(&mf->mobj.refc));
349 		} else {
350 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
351 			     cookie, mf->page_offset, internal_offs);
352 			mf = NULL;
353 		}
354 	} else {
355 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
356 		if (mf) {
357 			mf->unregistered_by_cookie = false;
358 			mf->registered_by_cookie = true;
359 			assert(refcount_val(&mf->mobj.refc) == 0);
360 			refcount_set(&mf->mobj.refc, 1);
361 			refcount_set(&mf->mapcount, 0);
362 			mf->mobj.size += mf->page_offset;
363 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
364 			mf->mobj.size -= internal_offs;
365 			mf->page_offset = internal_offs;
366 			SLIST_INSERT_HEAD(&shm_head, mf, link);
367 		}
368 	}
369 
370 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
371 
372 	if (!mf) {
373 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
374 		     cookie, internal_offs);
375 		return NULL;
376 	}
377 
378 	return &mf->mobj;
379 }
380 
381 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
382 			     size_t granule, paddr_t *pa)
383 {
384 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
385 	size_t full_offset = 0;
386 	paddr_t p = 0;
387 
388 	if (!pa)
389 		return TEE_ERROR_GENERIC;
390 
391 	if (offset >= mobj->size)
392 		return TEE_ERROR_GENERIC;
393 
394 	full_offset = offset + mf->page_offset;
395 	switch (granule) {
396 	case 0:
397 		p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
398 		    (full_offset & SMALL_PAGE_MASK);
399 		break;
400 	case SMALL_PAGE_SIZE:
401 		p = mf->pages[full_offset / SMALL_PAGE_SIZE];
402 		break;
403 	default:
404 		return TEE_ERROR_GENERIC;
405 	}
406 	*pa = p;
407 
408 	return TEE_SUCCESS;
409 }
410 DECLARE_KEEP_PAGER(ffa_get_pa);
411 
412 static size_t ffa_get_phys_offs(struct mobj *mobj,
413 				size_t granule __maybe_unused)
414 {
415 	assert(granule >= mobj->phys_granule);
416 
417 	return to_mobj_ffa(mobj)->page_offset;
418 }
419 
420 static void *ffa_get_va(struct mobj *mobj, size_t offset)
421 {
422 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
423 
424 	if (!mf->mm || offset >= mobj->size)
425 		return NULL;
426 
427 	return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
428 }
429 
430 static void ffa_inactivate(struct mobj *mobj)
431 {
432 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
433 	uint32_t exceptions = 0;
434 
435 	exceptions = cpu_spin_lock_xsave(&shm_lock);
436 	/*
437 	 * If refcount isn't 0 some other thread has found this mobj in
438 	 * shm_head after the mobj_put() that put us here and before we got
439 	 * the lock.
440 	 */
441 	if (refcount_val(&mobj->refc)) {
442 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
443 		goto out;
444 	}
445 
446 	DMSG("cookie %#"PRIx64, mf->cookie);
447 	if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf))
448 		panic();
449 	unmap_helper(mf);
450 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
451 out:
452 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
453 }
454 
455 static TEE_Result ffa_get_cattr(struct mobj *mobj __unused, uint32_t *cattr)
456 {
457 	if (!cattr)
458 		return TEE_ERROR_GENERIC;
459 
460 	*cattr = TEE_MATTR_CACHE_CACHED;
461 
462 	return TEE_SUCCESS;
463 }
464 
465 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
466 {
467 	assert(mobj->ops == &mobj_ffa_ops);
468 
469 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
470 }
471 
472 static uint64_t ffa_get_cookie(struct mobj *mobj)
473 {
474 	return to_mobj_ffa(mobj)->cookie;
475 }
476 
477 static const struct mobj_ops mobj_ffa_ops __rodata_unpaged = {
478 	.get_pa = ffa_get_pa,
479 	.get_phys_offs = ffa_get_phys_offs,
480 	.get_va = ffa_get_va,
481 	.get_cattr = ffa_get_cattr,
482 	.matches = ffa_matches,
483 	.free = ffa_inactivate,
484 	.get_cookie = ffa_get_cookie,
485 };
486 
487 TEE_Result mobj_inc_map(struct mobj *mobj)
488 {
489 	TEE_Result res = TEE_SUCCESS;
490 	uint32_t exceptions = 0;
491 	struct mobj_ffa *mf = to_mobj_ffa_may_fail(mobj);
492 
493 	if (!mf)
494 		return TEE_ERROR_GENERIC;
495 
496 	if (refcount_inc(&mf->mapcount))
497 		return TEE_SUCCESS;
498 
499 	exceptions = cpu_spin_lock_xsave(&shm_lock);
500 
501 	if (refcount_val(&mf->mapcount))
502 		goto out;
503 
504 	mf->mm = tee_mm_alloc(&tee_mm_shm, mf->mobj.size);
505 	if (!mf->mm) {
506 		res = TEE_ERROR_OUT_OF_MEMORY;
507 		goto out;
508 	}
509 
510 	res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
511 				 get_page_count(mf), MEM_AREA_NSEC_SHM);
512 	if (res) {
513 		tee_mm_free(mf->mm);
514 		mf->mm = NULL;
515 		goto out;
516 	}
517 
518 	refcount_set(&mf->mapcount, 1);
519 out:
520 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
521 
522 	return res;
523 }
524 
525 TEE_Result mobj_dec_map(struct mobj *mobj)
526 {
527 	struct mobj_ffa *mf = to_mobj_ffa_may_fail(mobj);
528 	uint32_t exceptions = 0;
529 
530 	if (!mf)
531 		return TEE_ERROR_GENERIC;
532 
533 	if (!refcount_dec(&mf->mapcount))
534 		return TEE_SUCCESS;
535 
536 	exceptions = cpu_spin_lock_xsave(&shm_lock);
537 	unmap_helper(mf);
538 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
539 
540 	return TEE_SUCCESS;
541 }
542 
543 static TEE_Result mapped_shm_init(void)
544 {
545 	vaddr_t pool_start = 0;
546 	vaddr_t pool_end = 0;
547 
548 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
549 	if (!pool_start || !pool_end)
550 		panic("Can't find region for shmem pool");
551 
552 	if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end, SMALL_PAGE_SHIFT,
553 			 TEE_MM_POOL_NO_FLAGS))
554 		panic("Could not create shmem pool");
555 
556 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
557 	     pool_start, pool_end);
558 	return TEE_SUCCESS;
559 }
560 
561 service_init(mapped_shm_init);
562