xref: /optee_os/core/arch/arm/mm/mobj_ffa.c (revision 9fc2442cc66c279cb962c90c4375746fc9b28bb9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2020, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/refcount.h>
11 #include <kernel/spinlock.h>
12 #include <mm/mobj.h>
13 #include <sys/queue.h>
14 
15 struct mobj_ffa {
16 	struct mobj mobj;
17 	SLIST_ENTRY(mobj_ffa) link;
18 	uint64_t cookie;
19 	tee_mm_entry_t *mm;
20 	struct refcount mapcount;
21 	uint16_t page_offset;
22 	bool registered_by_cookie;
23 	bool unregistered_by_cookie;
24 	paddr_t pages[];
25 };
26 
27 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
28 
29 #ifdef CFG_CORE_SEL1_SPMC
30 #define NUM_SHMS	64
31 static bitstr_t bit_decl(shm_bits, NUM_SHMS);
32 #endif
33 
34 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
35 static struct mobj_ffa_head shm_inactive_head =
36 	SLIST_HEAD_INITIALIZER(shm_inactive_head);
37 
38 static unsigned int shm_lock = SPINLOCK_UNLOCK;
39 
40 static const struct mobj_ops mobj_ffa_ops __rodata_unpaged;
41 
42 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
43 {
44 	assert(mobj->ops == &mobj_ffa_ops);
45 	return container_of(mobj, struct mobj_ffa, mobj);
46 }
47 
48 static size_t shm_size(size_t num_pages)
49 {
50 	size_t s = 0;
51 
52 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
53 		return 0;
54 	if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
55 		return 0;
56 	return s;
57 }
58 
59 static struct mobj_ffa *ffa_new(unsigned int num_pages)
60 {
61 	struct mobj_ffa *mf = NULL;
62 	size_t s = 0;
63 
64 	if (!num_pages)
65 		return NULL;
66 
67 	s = shm_size(num_pages);
68 	if (!s)
69 		return NULL;
70 	mf = calloc(1, s);
71 	if (!mf)
72 		return NULL;
73 
74 	mf->mobj.ops = &mobj_ffa_ops;
75 	mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
76 	mf->mobj.phys_granule = SMALL_PAGE_SIZE;
77 	refcount_set(&mf->mobj.refc, 0);
78 
79 	return mf;
80 }
81 
82 #ifdef CFG_CORE_SEL1_SPMC
83 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages)
84 {
85 	struct mobj_ffa *mf = NULL;
86 	uint32_t exceptions = 0;
87 	int i = 0;
88 
89 	mf = ffa_new(num_pages);
90 	if (!mf)
91 		return NULL;
92 
93 	exceptions = cpu_spin_lock_xsave(&shm_lock);
94 	bit_ffc(shm_bits, NUM_SHMS, &i);
95 	if (i != -1) {
96 		bit_set(shm_bits, i);
97 		/*
98 		 * + 1 to avoid a cookie value 0, setting bit 44 to use one
99 		 * of the upper 32 bits too for testing.
100 		 */
101 		mf->cookie = (i + 1) | BIT64(44);
102 	}
103 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
104 
105 	if (i == -1) {
106 		free(mf);
107 		return NULL;
108 	}
109 
110 	return mf;
111 }
112 #endif /*CFG_CORE_SEL1_SPMC*/
113 
114 static size_t get_page_count(struct mobj_ffa *mf)
115 {
116 	return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
117 }
118 
119 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
120 {
121 	return mf->cookie == cookie;
122 }
123 
124 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
125 {
126 	return mf == (void *)(vaddr_t)ptr;
127 }
128 
129 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
130 				      bool (*cmp_func)(struct mobj_ffa *mf,
131 						       uint64_t val),
132 				      uint64_t val)
133 {
134 	struct mobj_ffa *mf = SLIST_FIRST(head);
135 	struct mobj_ffa *p = NULL;
136 
137 	if (!mf)
138 		return NULL;
139 
140 	if (cmp_func(mf, val)) {
141 		SLIST_REMOVE_HEAD(head, link);
142 		return mf;
143 	}
144 
145 	while (true) {
146 		p = SLIST_NEXT(mf, link);
147 		if (!p)
148 			return NULL;
149 		if (cmp_func(p, val)) {
150 			SLIST_REMOVE_AFTER(mf, link);
151 			return p;
152 		}
153 		mf = p;
154 	}
155 }
156 
157 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
158 				     bool (*cmp_func)(struct mobj_ffa *mf,
159 						      uint64_t val),
160 				     uint64_t val)
161 {
162 	struct mobj_ffa *mf = NULL;
163 
164 	SLIST_FOREACH(mf, head, link)
165 		if (cmp_func(mf, val))
166 			return mf;
167 
168 	return NULL;
169 }
170 
171 #ifdef CFG_CORE_SEL1_SPMC
172 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
173 {
174 	int i = (mf->cookie - 1) & ~BIT64(44);
175 	uint32_t exceptions = 0;
176 
177 	assert(i >= 0 && i < NUM_SHMS);
178 
179 	exceptions = cpu_spin_lock_xsave(&shm_lock);
180 	assert(bit_test(shm_bits, i));
181 	bit_clear(shm_bits, i);
182 	assert(!mf->mm);
183 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
184 
185 	free(mf);
186 }
187 #endif /*CFG_CORE_SEL1_SPMC*/
188 
189 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
190 				 paddr_t pa, unsigned int num_pages)
191 {
192 	unsigned int n = 0;
193 	size_t tot_page_count = get_page_count(mf);
194 
195 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
196 		return TEE_ERROR_BAD_PARAMETERS;
197 
198 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
199 		return TEE_ERROR_BAD_PARAMETERS;
200 
201 	for (n = 0; n < num_pages; n++)
202 		mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
203 
204 	(*idx) += n;
205 	return TEE_SUCCESS;
206 }
207 
208 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
209 {
210 	return mf->cookie;
211 }
212 
213 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
214 {
215 	uint32_t exceptions = 0;
216 
217 	exceptions = cpu_spin_lock_xsave(&shm_lock);
218 	assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
219 	assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
220 	assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
221 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
222 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
223 
224 	return mf->cookie;
225 }
226 
227 static void unmap_helper(struct mobj_ffa *mf)
228 {
229 	if (mf->mm) {
230 		core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
231 				     get_page_count(mf));
232 		tee_mm_free(mf->mm);
233 		mf->mm = NULL;
234 	}
235 }
236 
237 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
238 {
239 	TEE_Result res = TEE_SUCCESS;
240 	struct mobj_ffa *mf = NULL;
241 	uint32_t exceptions = 0;
242 
243 	exceptions = cpu_spin_lock_xsave(&shm_lock);
244 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
245 	/*
246 	 * If the mobj is found here it's still active and cannot be
247 	 * unregistered.
248 	 */
249 	if (mf) {
250 		DMSG("cookie %#"PRIx64" busy refc %u",
251 		     cookie, refcount_val(&mf->mobj.refc));
252 		res = TEE_ERROR_BUSY;
253 		goto out;
254 	}
255 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
256 	/*
257 	 * If the mobj isn't found or if it already has been unregistered.
258 	 */
259 	if (!mf || mf->unregistered_by_cookie) {
260 		res = TEE_ERROR_ITEM_NOT_FOUND;
261 		goto out;
262 	}
263 	mf->unregistered_by_cookie = true;
264 	res = TEE_SUCCESS;
265 
266 out:
267 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
268 	return res;
269 }
270 
271 #ifdef CFG_CORE_SEL1_SPMC
272 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
273 {
274 	TEE_Result res = TEE_SUCCESS;
275 	struct mobj_ffa *mf = NULL;
276 	uint32_t exceptions = 0;
277 
278 	exceptions = cpu_spin_lock_xsave(&shm_lock);
279 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
280 	/*
281 	 * If the mobj is found here it's still active and cannot be
282 	 * reclaimed.
283 	 */
284 	if (mf) {
285 		DMSG("cookie %#"PRIx64" busy refc %u",
286 		     cookie, refcount_val(&mf->mobj.refc));
287 		res = TEE_ERROR_BUSY;
288 		goto out;
289 	}
290 
291 	mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
292 	if (!mf) {
293 		res = TEE_ERROR_ITEM_NOT_FOUND;
294 		goto out;
295 	}
296 	/*
297 	 * If the mobj has been registered via mobj_ffa_get_by_cookie()
298 	 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
299 	 */
300 	if (mf->registered_by_cookie && !mf->unregistered_by_cookie) {
301 		DMSG("cookie %#"PRIx64" busy", cookie);
302 		res = TEE_ERROR_BUSY;
303 		goto out;
304 	}
305 
306 	if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
307 		panic();
308 	res = TEE_SUCCESS;
309 out:
310 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
311 	if (!res)
312 		mobj_ffa_sel1_spmc_delete(mf);
313 	return res;
314 }
315 #endif /*CFG_CORE_SEL1_SPMC*/
316 
317 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie, unsigned int internal_offs)
318 {
319 	struct mobj_ffa *mf = NULL;
320 	uint32_t exceptions = 0;
321 
322 	if (internal_offs >= SMALL_PAGE_SIZE)
323 		return NULL;
324 
325 	exceptions = cpu_spin_lock_xsave(&shm_lock);
326 
327 	mf = find_in_list(&shm_head, cmp_cookie, cookie);
328 	if (mf) {
329 		if (mf->page_offset == internal_offs) {
330 			if (!refcount_inc(&mf->mobj.refc)) {
331 				/*
332 				 * If refcount is 0 some other thread has
333 				 * called mobj_put() on this reached 0 and
334 				 * before ffa_inactivate() got the lock we
335 				 * found it. Let's reinitialize it.
336 				 */
337 				refcount_set(&mf->mobj.refc, 1);
338 			}
339 			DMSG("cookie %#"PRIx64" active: refc %d",
340 			     cookie, refcount_val(&mf->mobj.refc));
341 		} else {
342 			EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
343 			     cookie, mf->page_offset, internal_offs);
344 			mf = NULL;
345 		}
346 	} else {
347 		mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
348 		if (mf) {
349 			mf->unregistered_by_cookie = false;
350 			mf->registered_by_cookie = true;
351 			assert(refcount_val(&mf->mobj.refc) == 0);
352 			refcount_set(&mf->mobj.refc, 1);
353 			refcount_set(&mf->mapcount, 0);
354 			mf->mobj.size += mf->page_offset;
355 			assert(!(mf->mobj.size & SMALL_PAGE_MASK));
356 			mf->mobj.size -= internal_offs;
357 			mf->page_offset = internal_offs;
358 			SLIST_INSERT_HEAD(&shm_head, mf, link);
359 		}
360 	}
361 
362 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
363 
364 	if (!mf) {
365 		EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
366 		     cookie, internal_offs);
367 		return NULL;
368 	}
369 
370 	return &mf->mobj;
371 }
372 
373 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
374 			     size_t granule, paddr_t *pa)
375 {
376 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
377 	size_t full_offset = 0;
378 	paddr_t p = 0;
379 
380 	if (!pa)
381 		return TEE_ERROR_GENERIC;
382 
383 	if (offset >= mobj->size)
384 		return TEE_ERROR_GENERIC;
385 
386 	full_offset = offset + mf->page_offset;
387 	switch (granule) {
388 	case 0:
389 		p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
390 		    (full_offset & SMALL_PAGE_MASK);
391 		break;
392 	case SMALL_PAGE_SIZE:
393 		p = mf->pages[full_offset / SMALL_PAGE_SIZE];
394 		break;
395 	default:
396 		return TEE_ERROR_GENERIC;
397 	}
398 	*pa = p;
399 
400 	return TEE_SUCCESS;
401 }
402 DECLARE_KEEP_PAGER(ffa_get_pa);
403 
404 static size_t ffa_get_phys_offs(struct mobj *mobj,
405 				size_t granule __maybe_unused)
406 {
407 	assert(granule >= mobj->phys_granule);
408 
409 	return to_mobj_ffa(mobj)->page_offset;
410 }
411 
412 static void *ffa_get_va(struct mobj *mobj, size_t offset)
413 {
414 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
415 
416 	if (!mf->mm || offset >= mobj->size)
417 		return NULL;
418 
419 	return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
420 }
421 
422 static void ffa_inactivate(struct mobj *mobj)
423 {
424 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
425 	uint32_t exceptions = 0;
426 
427 	exceptions = cpu_spin_lock_xsave(&shm_lock);
428 	/*
429 	 * If refcount isn't 0 some other thread has found this mobj in
430 	 * shm_head after the mobj_put() that put us here and before we got
431 	 * the lock.
432 	 */
433 	if (refcount_val(&mobj->refc)) {
434 		DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
435 		goto out;
436 	}
437 
438 	DMSG("cookie %#"PRIx64, mf->cookie);
439 	if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf))
440 		panic();
441 	unmap_helper(mf);
442 	SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
443 out:
444 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
445 }
446 
447 static TEE_Result ffa_get_cattr(struct mobj *mobj __unused, uint32_t *cattr)
448 {
449 	if (!cattr)
450 		return TEE_ERROR_GENERIC;
451 
452 	*cattr = TEE_MATTR_CACHE_CACHED;
453 
454 	return TEE_SUCCESS;
455 }
456 
457 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
458 {
459 	assert(mobj->ops == &mobj_ffa_ops);
460 
461 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
462 }
463 
464 static uint64_t ffa_get_cookie(struct mobj *mobj)
465 {
466 	return to_mobj_ffa(mobj)->cookie;
467 }
468 
469 static TEE_Result ffa_inc_map(struct mobj *mobj)
470 {
471 	TEE_Result res = TEE_SUCCESS;
472 	uint32_t exceptions = 0;
473 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
474 
475 	if (refcount_inc(&mf->mapcount))
476 		return TEE_SUCCESS;
477 
478 	exceptions = cpu_spin_lock_xsave(&shm_lock);
479 
480 	if (refcount_val(&mf->mapcount))
481 		goto out;
482 
483 	mf->mm = tee_mm_alloc(&tee_mm_shm, mf->mobj.size);
484 	if (!mf->mm) {
485 		res = TEE_ERROR_OUT_OF_MEMORY;
486 		goto out;
487 	}
488 
489 	res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
490 				 get_page_count(mf), MEM_AREA_NSEC_SHM);
491 	if (res) {
492 		tee_mm_free(mf->mm);
493 		mf->mm = NULL;
494 		goto out;
495 	}
496 
497 	refcount_set(&mf->mapcount, 1);
498 out:
499 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
500 
501 	return res;
502 }
503 
504 static TEE_Result ffa_dec_map(struct mobj *mobj)
505 {
506 	struct mobj_ffa *mf = to_mobj_ffa(mobj);
507 	uint32_t exceptions = 0;
508 
509 	if (!refcount_dec(&mf->mapcount))
510 		return TEE_SUCCESS;
511 
512 	exceptions = cpu_spin_lock_xsave(&shm_lock);
513 	unmap_helper(mf);
514 	cpu_spin_unlock_xrestore(&shm_lock, exceptions);
515 
516 	return TEE_SUCCESS;
517 }
518 
519 static TEE_Result mapped_shm_init(void)
520 {
521 	vaddr_t pool_start = 0;
522 	vaddr_t pool_end = 0;
523 
524 	core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
525 	if (!pool_start || !pool_end)
526 		panic("Can't find region for shmem pool");
527 
528 	if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end, SMALL_PAGE_SHIFT,
529 			 TEE_MM_POOL_NO_FLAGS))
530 		panic("Could not create shmem pool");
531 
532 	DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
533 	     pool_start, pool_end);
534 	return TEE_SUCCESS;
535 }
536 
537 static const struct mobj_ops mobj_ffa_ops __rodata_unpaged = {
538 	.get_pa = ffa_get_pa,
539 	.get_phys_offs = ffa_get_phys_offs,
540 	.get_va = ffa_get_va,
541 	.get_cattr = ffa_get_cattr,
542 	.matches = ffa_matches,
543 	.free = ffa_inactivate,
544 	.get_cookie = ffa_get_cookie,
545 	.inc_map = ffa_inc_map,
546 	.dec_map = ffa_dec_map,
547 };
548 
549 service_init(mapped_shm_init);
550