1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016-2024, Linaro Limited
4 */
5
6 #include <assert.h>
7 #include <initcall.h>
8 #include <keep.h>
9 #include <kernel/linker.h>
10 #include <kernel/mutex.h>
11 #include <kernel/panic.h>
12 #include <kernel/refcount.h>
13 #include <kernel/spinlock.h>
14 #include <kernel/tee_misc.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <optee_msg.h>
19 #include <stdlib.h>
20 #include <tee_api_types.h>
21 #include <types_ext.h>
22 #include <util.h>
23
24 static struct mutex shm_mu = MUTEX_INITIALIZER;
25 static struct condvar shm_cv = CONDVAR_INITIALIZER;
26 static size_t shm_release_waiters;
27
28 /*
29 * mobj_reg_shm implementation. Describes shared memory provided by normal world
30 */
31
32 struct mobj_reg_shm {
33 struct mobj mobj;
34 SLIST_ENTRY(mobj_reg_shm) next;
35 uint64_t cookie;
36 tee_mm_entry_t *mm;
37 paddr_t page_offset;
38 struct refcount mapcount;
39 bool guarded;
40 bool releasing;
41 bool release_frees;
42 paddr_t pages[];
43 };
44
45 /*
46 * struct mobj_protmem - describes protected memory lent by normal world
47 */
48 struct mobj_protmem {
49 struct mobj mobj;
50 SLIST_ENTRY(mobj_protmem) next;
51 uint64_t cookie;
52 paddr_t pa;
53 enum mobj_use_case use_case;
54 bool releasing;
55 bool release_frees;
56 };
57
mobj_reg_shm_size(size_t nr_pages)58 static size_t mobj_reg_shm_size(size_t nr_pages)
59 {
60 size_t s = 0;
61
62 if (MUL_OVERFLOW(sizeof(paddr_t), nr_pages, &s))
63 return 0;
64 if (ADD_OVERFLOW(sizeof(struct mobj_reg_shm), s, &s))
65 return 0;
66 return s;
67 }
68
69 static SLIST_HEAD(reg_shm_head, mobj_reg_shm) reg_shm_list =
70 SLIST_HEAD_INITIALIZER(reg_shm_head);
71
72 static unsigned int reg_shm_slist_lock = SPINLOCK_UNLOCK;
73 static unsigned int reg_shm_map_lock = SPINLOCK_UNLOCK;
74
75 /* Access is serialized with reg_shm_slist_lock */
76 static SLIST_HEAD(protmem_head, mobj_protmem) protmem_list =
77 SLIST_HEAD_INITIALIZER(protmem_head);
78
79 static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj);
80
mobj_reg_shm_get_pa(struct mobj * mobj,size_t offst,size_t granule,paddr_t * pa)81 static TEE_Result mobj_reg_shm_get_pa(struct mobj *mobj, size_t offst,
82 size_t granule, paddr_t *pa)
83 {
84 struct mobj_reg_shm *mobj_reg_shm = to_mobj_reg_shm(mobj);
85 size_t full_offset = 0;
86 paddr_t p = 0;
87
88 if (!pa)
89 return TEE_ERROR_GENERIC;
90
91 if (offst >= mobj->size)
92 return TEE_ERROR_GENERIC;
93
94 full_offset = offst + mobj_reg_shm->page_offset;
95 switch (granule) {
96 case 0:
97 p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE] +
98 (full_offset & SMALL_PAGE_MASK);
99 break;
100 case SMALL_PAGE_SIZE:
101 p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE];
102 break;
103 default:
104 return TEE_ERROR_GENERIC;
105 }
106 *pa = p;
107
108 return TEE_SUCCESS;
109 }
110 DECLARE_KEEP_PAGER(mobj_reg_shm_get_pa);
111
mobj_reg_shm_get_phys_offs(struct mobj * mobj,size_t granule __maybe_unused)112 static size_t mobj_reg_shm_get_phys_offs(struct mobj *mobj,
113 size_t granule __maybe_unused)
114 {
115 assert(granule >= mobj->phys_granule);
116 return to_mobj_reg_shm(mobj)->page_offset;
117 }
118
mobj_reg_shm_get_va(struct mobj * mobj,size_t offst,size_t len)119 static void *mobj_reg_shm_get_va(struct mobj *mobj, size_t offst, size_t len)
120 {
121 struct mobj_reg_shm *mrs = to_mobj_reg_shm(mobj);
122
123 if (!mrs->mm || !mobj_check_offset_and_len(mobj, offst, len))
124 return NULL;
125
126 return (void *)(vaddr_t)(tee_mm_get_smem(mrs->mm) + offst +
127 mrs->page_offset);
128 }
129
reg_shm_unmap_helper(struct mobj_reg_shm * r)130 static void reg_shm_unmap_helper(struct mobj_reg_shm *r)
131 {
132 assert(r->mm);
133 assert(r->mm->pool->shift == SMALL_PAGE_SHIFT);
134 core_mmu_unmap_pages(tee_mm_get_smem(r->mm), r->mm->size);
135 tee_mm_free(r->mm);
136 r->mm = NULL;
137 }
138
reg_shm_free_helper(struct mobj_reg_shm * mobj_reg_shm)139 static void reg_shm_free_helper(struct mobj_reg_shm *mobj_reg_shm)
140 {
141 uint32_t exceptions = cpu_spin_lock_xsave(®_shm_map_lock);
142
143 if (mobj_reg_shm->mm)
144 reg_shm_unmap_helper(mobj_reg_shm);
145
146 cpu_spin_unlock_xrestore(®_shm_map_lock, exceptions);
147
148 SLIST_REMOVE(®_shm_list, mobj_reg_shm, mobj_reg_shm, next);
149 free(mobj_reg_shm);
150 }
151
mobj_reg_shm_free(struct mobj * mobj)152 static void mobj_reg_shm_free(struct mobj *mobj)
153 {
154 struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
155 uint32_t exceptions = 0;
156
157 if (r->guarded && !r->releasing) {
158 /*
159 * Guarded registersted shared memory can't be released
160 * by cookie, only by mobj_put(). However, unguarded
161 * registered shared memory can also be freed by mobj_put()
162 * unless mobj_reg_shm_release_by_cookie() is waiting for
163 * the mobj to be released.
164 */
165 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
166 reg_shm_free_helper(r);
167 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
168 } else {
169 /*
170 * We've reached the point where an unguarded reg shm can
171 * be released by cookie. Notify eventual waiters.
172 */
173 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
174 r->release_frees = true;
175 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
176
177 mutex_lock(&shm_mu);
178 if (shm_release_waiters)
179 condvar_broadcast(&shm_cv);
180 mutex_unlock(&shm_mu);
181 }
182 }
183
mobj_reg_shm_get_mem_type(struct mobj * mobj __unused,uint32_t * mt)184 static TEE_Result mobj_reg_shm_get_mem_type(struct mobj *mobj __unused,
185 uint32_t *mt)
186 {
187 if (!mt)
188 return TEE_ERROR_GENERIC;
189
190 *mt = TEE_MATTR_MEM_TYPE_CACHED;
191
192 return TEE_SUCCESS;
193 }
194
mobj_reg_shm_inc_map(struct mobj * mobj)195 static TEE_Result mobj_reg_shm_inc_map(struct mobj *mobj)
196 {
197 TEE_Result res = TEE_SUCCESS;
198 struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
199 uint32_t exceptions = 0;
200 size_t sz = 0;
201
202 while (true) {
203 if (refcount_inc(&r->mapcount))
204 return TEE_SUCCESS;
205
206 exceptions = cpu_spin_lock_xsave(®_shm_map_lock);
207
208 if (!refcount_val(&r->mapcount))
209 break; /* continue to reinitialize */
210 /*
211 * If another thread beat us to initialize mapcount,
212 * restart to make sure we still increase it.
213 */
214 cpu_spin_unlock_xrestore(®_shm_map_lock, exceptions);
215 }
216
217 /*
218 * If we have beaten another thread calling mobj_reg_shm_dec_map()
219 * to get the lock we need only to reinitialize mapcount to 1.
220 */
221 if (!r->mm) {
222 sz = ROUNDUP(mobj->size + r->page_offset, SMALL_PAGE_SIZE);
223 r->mm = tee_mm_alloc(&core_virt_shm_pool, sz);
224 if (!r->mm) {
225 res = TEE_ERROR_OUT_OF_MEMORY;
226 goto out;
227 }
228
229 res = core_mmu_map_pages(tee_mm_get_smem(r->mm), r->pages,
230 sz / SMALL_PAGE_SIZE,
231 MEM_AREA_NSEC_SHM);
232 if (res) {
233 tee_mm_free(r->mm);
234 r->mm = NULL;
235 goto out;
236 }
237 }
238
239 refcount_set(&r->mapcount, 1);
240 out:
241 cpu_spin_unlock_xrestore(®_shm_map_lock, exceptions);
242
243 return res;
244 }
245
mobj_reg_shm_dec_map(struct mobj * mobj)246 static TEE_Result mobj_reg_shm_dec_map(struct mobj *mobj)
247 {
248 struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
249 uint32_t exceptions = 0;
250
251 if (!refcount_dec(&r->mapcount))
252 return TEE_SUCCESS;
253
254 exceptions = cpu_spin_lock_xsave(®_shm_map_lock);
255
256 /*
257 * Check that another thread hasn't been able to:
258 * - increase the mapcount
259 * - or, increase the mapcount, decrease it again, and set r->mm to
260 * NULL
261 * before we acquired the spinlock
262 */
263 if (!refcount_val(&r->mapcount) && r->mm)
264 reg_shm_unmap_helper(r);
265
266 cpu_spin_unlock_xrestore(®_shm_map_lock, exceptions);
267
268 return TEE_SUCCESS;
269 }
270
271 static bool mobj_reg_shm_matches(struct mobj *mobj, enum buf_is_attr attr);
272
mobj_reg_shm_get_cookie(struct mobj * mobj)273 static uint64_t mobj_reg_shm_get_cookie(struct mobj *mobj)
274 {
275 return to_mobj_reg_shm(mobj)->cookie;
276 }
277
278 /*
279 * When CFG_PREALLOC_RPC_CACHE is disabled, this variable is weak just
280 * to ease breaking its dependency chain when added to the unpaged area.
281 * When CFG_PREALLOC_RPC_CACHE is enabled, releasing RPC preallocated
282 * shm mandates these resources to be unpaged.
283 */
284 const struct mobj_ops mobj_reg_shm_ops
285 __weak __relrodata_unpaged("mobj_reg_shm_ops") = {
286 .get_pa = mobj_reg_shm_get_pa,
287 .get_phys_offs = mobj_reg_shm_get_phys_offs,
288 .get_va = mobj_reg_shm_get_va,
289 .get_mem_type = mobj_reg_shm_get_mem_type,
290 .matches = mobj_reg_shm_matches,
291 .free = mobj_reg_shm_free,
292 .get_cookie = mobj_reg_shm_get_cookie,
293 .inc_map = mobj_reg_shm_inc_map,
294 .dec_map = mobj_reg_shm_dec_map,
295 };
296
297 #ifdef CFG_PREALLOC_RPC_CACHE
298 /* Releasing RPC preallocated shm mandates few resources to be unpaged */
299 DECLARE_KEEP_PAGER(mobj_reg_shm_get_cookie);
300 DECLARE_KEEP_PAGER(mobj_reg_shm_matches);
301 DECLARE_KEEP_PAGER(mobj_reg_shm_free);
302 #endif
303
mobj_reg_shm_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)304 static bool mobj_reg_shm_matches(struct mobj *mobj __maybe_unused,
305 enum buf_is_attr attr)
306 {
307 assert(mobj->ops == &mobj_reg_shm_ops);
308
309 return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
310 }
311
to_mobj_reg_shm(struct mobj * mobj)312 static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj)
313 {
314 assert(mobj->ops == &mobj_reg_shm_ops);
315 return container_of(mobj, struct mobj_reg_shm, mobj);
316 }
317
check_reg_shm_conflict(struct mobj_reg_shm * r,paddr_t pa,paddr_size_t size)318 static TEE_Result check_reg_shm_conflict(struct mobj_reg_shm *r, paddr_t pa,
319 paddr_size_t size)
320 {
321 size_t n = 0;
322
323 for (n = 0; n < r->mobj.size / SMALL_PAGE_SIZE; n++)
324 if (core_is_buffer_intersect(pa, size, r->pages[n],
325 SMALL_PAGE_SIZE))
326 return TEE_ERROR_BAD_PARAMETERS;
327
328 return TEE_SUCCESS;
329 }
330
check_protmem_conflict(struct mobj_reg_shm * r)331 static TEE_Result check_protmem_conflict(struct mobj_reg_shm *r)
332 {
333 struct mobj_protmem *m = NULL;
334 TEE_Result res = TEE_SUCCESS;
335
336 SLIST_FOREACH(m, &protmem_list, next) {
337 res = check_reg_shm_conflict(r, m->pa, m->mobj.size);
338 if (res)
339 break;
340 }
341
342 return res;
343 }
344
mobj_reg_shm_alloc(paddr_t * pages,size_t num_pages,paddr_t page_offset,uint64_t cookie)345 struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
346 paddr_t page_offset, uint64_t cookie)
347 {
348 struct mobj_reg_shm *mobj_reg_shm = NULL;
349 TEE_Result res = TEE_SUCCESS;
350 size_t i = 0;
351 uint32_t exceptions = 0;
352 size_t s = 0;
353
354 if (!num_pages || page_offset >= SMALL_PAGE_SIZE)
355 return NULL;
356
357 s = mobj_reg_shm_size(num_pages);
358 if (!s)
359 return NULL;
360 mobj_reg_shm = calloc(1, s);
361 if (!mobj_reg_shm)
362 return NULL;
363
364 mobj_reg_shm->mobj.ops = &mobj_reg_shm_ops;
365 mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE - page_offset;
366 mobj_reg_shm->mobj.phys_granule = SMALL_PAGE_SIZE;
367 refcount_set(&mobj_reg_shm->mobj.refc, 1);
368 mobj_reg_shm->cookie = cookie;
369 mobj_reg_shm->guarded = true;
370 mobj_reg_shm->page_offset = page_offset;
371 memcpy(mobj_reg_shm->pages, pages, sizeof(*pages) * num_pages);
372
373 /* Ensure loaded references match format and security constraints */
374 for (i = 0; i < num_pages; i++) {
375 if (mobj_reg_shm->pages[i] & SMALL_PAGE_MASK)
376 goto err;
377
378 /* Only Non-secure memory can be mapped there */
379 if (!core_pbuf_is(CORE_MEM_NON_SEC, mobj_reg_shm->pages[i],
380 SMALL_PAGE_SIZE))
381 goto err;
382 }
383
384 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
385 res = check_protmem_conflict(mobj_reg_shm);
386 if (!res)
387 SLIST_INSERT_HEAD(®_shm_list, mobj_reg_shm, next);
388 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
389
390 if (res)
391 goto err;
392
393 return &mobj_reg_shm->mobj;
394 err:
395 free(mobj_reg_shm);
396 return NULL;
397 }
398
mobj_reg_shm_unguard(struct mobj * mobj)399 void mobj_reg_shm_unguard(struct mobj *mobj)
400 {
401 uint32_t exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
402
403 to_mobj_reg_shm(mobj)->guarded = false;
404 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
405 }
406
reg_shm_find_unlocked(uint64_t cookie)407 static struct mobj_reg_shm *reg_shm_find_unlocked(uint64_t cookie)
408 {
409 struct mobj_reg_shm *mobj_reg_shm = NULL;
410
411 SLIST_FOREACH(mobj_reg_shm, ®_shm_list, next)
412 if (mobj_reg_shm->cookie == cookie)
413 return mobj_reg_shm;
414
415 return NULL;
416 }
417
protmem_find_unlocked(uint64_t cookie)418 static struct mobj_protmem *protmem_find_unlocked(uint64_t cookie)
419 {
420 struct mobj_protmem *m = NULL;
421
422 SLIST_FOREACH(m, &protmem_list, next)
423 if (m->cookie == cookie)
424 return m;
425
426 return NULL;
427 }
428
mobj_reg_shm_get_by_cookie(uint64_t cookie)429 struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie)
430 {
431 struct mobj_reg_shm *rs = NULL;
432 struct mobj_protmem *rm = NULL;
433 uint32_t exceptions = 0;
434 struct mobj *m = NULL;
435
436 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
437 rs = reg_shm_find_unlocked(cookie);
438 if (rs) {
439 m = mobj_get(&rs->mobj);
440 goto out;
441 }
442 rm = protmem_find_unlocked(cookie);
443 if (rm)
444 m = mobj_get(&rm->mobj);
445
446 out:
447 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
448
449 return m;
450 }
451
mobj_reg_shm_release_by_cookie(uint64_t cookie)452 TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie)
453 {
454 uint32_t exceptions = 0;
455 struct mobj_reg_shm *r = NULL;
456
457 /*
458 * Try to find r and see can be released by this function, if so
459 * call mobj_put(). Otherwise this function is called either by
460 * wrong cookie and perhaps a second time, regardless return
461 * TEE_ERROR_BAD_PARAMETERS.
462 */
463 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
464 r = reg_shm_find_unlocked(cookie);
465 if (!r || r->guarded || r->releasing)
466 r = NULL;
467 else
468 r->releasing = true;
469
470 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
471
472 if (!r)
473 return TEE_ERROR_BAD_PARAMETERS;
474
475 mobj_put(&r->mobj);
476
477 /*
478 * We've established that this function can release the cookie.
479 * Now we wait until mobj_reg_shm_free() is called by the last
480 * mobj_put() needed to free this mobj. Note that the call to
481 * mobj_put() above could very well be that call.
482 *
483 * Once mobj_reg_shm_free() is called it will set r->release_frees
484 * to true and we can free the mobj here.
485 */
486 mutex_lock(&shm_mu);
487 shm_release_waiters++;
488 assert(shm_release_waiters);
489
490 while (true) {
491 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
492 if (r->release_frees) {
493 reg_shm_free_helper(r);
494 r = NULL;
495 }
496 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
497
498 if (!r)
499 break;
500 condvar_wait(&shm_cv, &shm_mu);
501 }
502
503 assert(shm_release_waiters);
504 shm_release_waiters--;
505 mutex_unlock(&shm_mu);
506
507 return TEE_SUCCESS;
508 }
509
mobj_mapped_shm_alloc(paddr_t * pages,size_t num_pages,paddr_t page_offset,uint64_t cookie)510 struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
511 paddr_t page_offset, uint64_t cookie)
512 {
513 struct mobj *mobj = mobj_reg_shm_alloc(pages, num_pages,
514 page_offset, cookie);
515
516 if (!mobj)
517 return NULL;
518
519 if (mobj_inc_map(mobj)) {
520 mobj_put(mobj);
521 return NULL;
522 }
523
524 return mobj;
525 }
526
mobj_mapped_shm_init(void)527 static TEE_Result mobj_mapped_shm_init(void)
528 {
529 vaddr_t pool_start = 0;
530 vaddr_t pool_end = 0;
531
532 core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
533 if (!pool_start || !pool_end)
534 panic("Can't find region for shmem pool");
535
536 if (!tee_mm_init(&core_virt_shm_pool, pool_start, pool_end - pool_start,
537 SMALL_PAGE_SHIFT, TEE_MM_POOL_NO_FLAGS))
538 panic("Could not create shmem pool");
539
540 DMSG("Shared memory address range: %" PRIxVA ", %" PRIxVA,
541 pool_start, pool_end);
542 return TEE_SUCCESS;
543 }
544
545 preinit(mobj_mapped_shm_init);
546
547 #ifdef CFG_CORE_DYN_PROTMEM
548 static struct mobj_protmem *to_mobj_protmem(struct mobj *mobj);
549
check_reg_shm_list_conflict(paddr_t pa,paddr_size_t size)550 static TEE_Result check_reg_shm_list_conflict(paddr_t pa, paddr_size_t size)
551 {
552 struct mobj_reg_shm *r = NULL;
553 TEE_Result res = TEE_SUCCESS;
554
555 SLIST_FOREACH(r, ®_shm_list, next) {
556 res = check_reg_shm_conflict(r, pa, size);
557 if (res)
558 break;
559 }
560
561 return res;
562 }
563
protect_mem(struct mobj_protmem * m)564 static TEE_Result protect_mem(struct mobj_protmem *m)
565 {
566 if ((m->pa | m->mobj.size) & SMALL_PAGE_MASK)
567 return TEE_ERROR_BAD_PARAMETERS;
568
569 DMSG("use_case %d pa %#"PRIxPA", size %#zx",
570 m->use_case, m->pa, m->mobj.size);
571
572 return plat_set_protmem_range(m->use_case, m->pa, m->mobj.size);
573 }
574
restore_mem(struct mobj_protmem * m)575 static TEE_Result restore_mem(struct mobj_protmem *m)
576 {
577 DMSG("use_case %d pa %#"PRIxPA", size %#zx",
578 m->use_case, m->pa, m->mobj.size);
579
580 return plat_set_protmem_range(MOBJ_USE_CASE_NS_SHM, m->pa,
581 m->mobj.size);
582 }
583
mobj_protmem_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)584 static TEE_Result mobj_protmem_get_pa(struct mobj *mobj, size_t offs,
585 size_t granule, paddr_t *pa)
586 {
587 struct mobj_protmem *m = to_mobj_protmem(mobj);
588 paddr_t p = 0;
589
590 if (!pa)
591 return TEE_ERROR_GENERIC;
592
593 if (offs >= mobj->size)
594 return TEE_ERROR_GENERIC;
595
596 p = m->pa + offs;
597 if (granule) {
598 if (granule != SMALL_PAGE_SIZE)
599 return TEE_ERROR_GENERIC;
600 p &= ~(granule - 1);
601 }
602 *pa = p;
603
604 return TEE_SUCCESS;
605 }
606
mobj_protmem_get_mem_type(struct mobj * mobj __unused,uint32_t * mt)607 static TEE_Result mobj_protmem_get_mem_type(struct mobj *mobj __unused,
608 uint32_t *mt)
609 {
610 if (!mt)
611 return TEE_ERROR_GENERIC;
612
613 *mt = TEE_MATTR_MEM_TYPE_CACHED;
614
615 return TEE_SUCCESS;
616 }
617
mobj_protmem_matches(struct mobj * mobj __unused,enum buf_is_attr attr)618 static bool mobj_protmem_matches(struct mobj *mobj __unused,
619 enum buf_is_attr attr)
620 {
621 return attr == CORE_MEM_SEC || attr == CORE_MEM_SDP_MEM;
622 }
623
protmem_free_helper(struct mobj_protmem * mobj_protmem)624 static void protmem_free_helper(struct mobj_protmem *mobj_protmem)
625 {
626 uint32_t exceptions = 0;
627
628 exceptions = cpu_spin_lock_xsave(®_shm_map_lock);
629 SLIST_REMOVE(&protmem_list, mobj_protmem, mobj_protmem, next);
630 cpu_spin_unlock_xrestore(®_shm_map_lock, exceptions);
631
632 restore_mem(mobj_protmem);
633 free(mobj_protmem);
634 }
635
mobj_protmem_free(struct mobj * mobj)636 static void mobj_protmem_free(struct mobj *mobj)
637 {
638 struct mobj_protmem *r = to_mobj_protmem(mobj);
639 uint32_t exceptions = 0;
640
641 if (!r->releasing) {
642 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
643 protmem_free_helper(r);
644 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
645 } else {
646 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
647 r->release_frees = true;
648 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
649
650 mutex_lock(&shm_mu);
651 if (shm_release_waiters)
652 condvar_broadcast(&shm_cv);
653 mutex_unlock(&shm_mu);
654 }
655 }
656
mobj_protmem_get_cookie(struct mobj * mobj)657 static uint64_t mobj_protmem_get_cookie(struct mobj *mobj)
658 {
659 return to_mobj_protmem(mobj)->cookie;
660 }
661
mobj_protmem_inc_map(struct mobj * mobj __maybe_unused)662 static TEE_Result mobj_protmem_inc_map(struct mobj *mobj __maybe_unused)
663 {
664 assert(to_mobj_protmem(mobj));
665 return TEE_ERROR_BAD_PARAMETERS;
666 }
667
mobj_protmem_dec_map(struct mobj * mobj __maybe_unused)668 static TEE_Result mobj_protmem_dec_map(struct mobj *mobj __maybe_unused)
669 {
670 assert(to_mobj_protmem(mobj));
671 return TEE_ERROR_BAD_PARAMETERS;
672 }
673
674 const struct mobj_ops mobj_protmem_ops
675 __relrodata_unpaged("mobj_protmem_ops") = {
676 .get_pa = mobj_protmem_get_pa,
677 .get_mem_type = mobj_protmem_get_mem_type,
678 .matches = mobj_protmem_matches,
679 .free = mobj_protmem_free,
680 .get_cookie = mobj_protmem_get_cookie,
681 .inc_map = mobj_protmem_inc_map,
682 .dec_map = mobj_protmem_dec_map,
683 };
684
to_mobj_protmem(struct mobj * mobj)685 static struct mobj_protmem *to_mobj_protmem(struct mobj *mobj)
686 {
687 assert(mobj->ops == &mobj_protmem_ops);
688 return container_of(mobj, struct mobj_protmem, mobj);
689 }
690
mobj_protmem_alloc(paddr_t pa,paddr_size_t size,uint64_t cookie,enum mobj_use_case use_case)691 struct mobj *mobj_protmem_alloc(paddr_t pa, paddr_size_t size, uint64_t cookie,
692 enum mobj_use_case use_case)
693 {
694 TEE_Result res = TEE_SUCCESS;
695 struct mobj_protmem *m = NULL;
696 uint32_t exceptions = 0;
697
698 if (use_case == MOBJ_USE_CASE_NS_SHM ||
699 !core_pbuf_is(CORE_MEM_NON_SEC, pa, size))
700 return NULL;
701
702 m = calloc(1, sizeof(*m));
703 if (!m)
704 return NULL;
705
706 m->mobj.ops = &mobj_protmem_ops;
707 m->use_case = use_case;
708 m->mobj.size = size;
709 m->mobj.phys_granule = SMALL_PAGE_SIZE;
710 refcount_set(&m->mobj.refc, 1);
711 m->cookie = cookie;
712 m->pa = pa;
713
714 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
715 res = check_reg_shm_list_conflict(pa, size);
716 if (res)
717 goto out;
718
719 res = protect_mem(m);
720 if (res)
721 goto out;
722 SLIST_INSERT_HEAD(&protmem_list, m, next);
723 out:
724 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
725
726 if (res) {
727 free(m);
728 return NULL;
729 }
730
731 return &m->mobj;
732 }
733
mobj_protmem_release_by_cookie(uint64_t cookie)734 TEE_Result mobj_protmem_release_by_cookie(uint64_t cookie)
735 {
736 uint32_t exceptions = 0;
737 struct mobj_protmem *rm = NULL;
738
739 /*
740 * Try to find m and see can be released by this function, if so
741 * call mobj_put(). Otherwise this function is called either by
742 * wrong cookie and perhaps a second time, regardless return
743 * TEE_ERROR_BAD_PARAMETERS.
744 */
745 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
746 rm = protmem_find_unlocked(cookie);
747 if (!rm || rm->releasing)
748 rm = NULL;
749 else
750 rm->releasing = true;
751
752 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
753
754 if (!rm)
755 return TEE_ERROR_BAD_PARAMETERS;
756
757 mobj_put(&rm->mobj);
758
759 /*
760 * We've established that this function can release the cookie.
761 * Now we wait until mobj_reg_shm_free() is called by the last
762 * mobj_put() needed to free this mobj. Note that the call to
763 * mobj_put() above could very well be that call.
764 *
765 * Once mobj_reg_shm_free() is called it will set r->release_frees
766 * to true and we can free the mobj here.
767 */
768 mutex_lock(&shm_mu);
769 shm_release_waiters++;
770 assert(shm_release_waiters);
771
772 while (true) {
773 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
774 if (rm->release_frees) {
775 protmem_free_helper(rm);
776 rm = NULL;
777 }
778 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
779
780 if (!rm)
781 break;
782 condvar_wait(&shm_cv, &shm_mu);
783 }
784
785 assert(shm_release_waiters);
786 shm_release_waiters--;
787 mutex_unlock(&shm_mu);
788
789 return TEE_SUCCESS;
790 }
791
protmem_find_by_pa_unlocked(paddr_t pa,paddr_size_t sz)792 static struct mobj_protmem *protmem_find_by_pa_unlocked(paddr_t pa,
793 paddr_size_t sz)
794 {
795 struct mobj_protmem *m = NULL;
796
797 if (!sz)
798 sz = 1;
799
800 SLIST_FOREACH(m, &protmem_list, next)
801 if (core_is_buffer_inside(pa, sz, m->pa, m->mobj.size))
802 return m;
803
804 return NULL;
805 }
806
mobj_protmem_get_by_pa(paddr_t pa,paddr_size_t size)807 struct mobj *mobj_protmem_get_by_pa(paddr_t pa, paddr_size_t size)
808 {
809 struct mobj_protmem *rm = NULL;
810 struct mobj *mobj = NULL;
811 uint32_t exceptions = 0;
812
813 exceptions = cpu_spin_lock_xsave(®_shm_slist_lock);
814
815 rm = protmem_find_by_pa_unlocked(pa, size);
816 if (rm && !rm->releasing)
817 mobj = mobj_get(&rm->mobj);
818
819 cpu_spin_unlock_xrestore(®_shm_slist_lock, exceptions);
820
821 return mobj;
822 }
823 #endif /*CFG_CORE_DYN_PROTMEM*/
824