xref: /optee_os/core/mm/mobj.c (revision 39e8c2000b86b694635852e2b299bc998c849b63)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/linker.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <mm/core_mmu.h>
17 #include <mm/mobj.h>
18 #include <mm/tee_pager.h>
19 #include <mm/vm.h>
20 #include <optee_msg.h>
21 #include <stdlib.h>
22 #include <tee_api_types.h>
23 #include <types_ext.h>
24 #include <util.h>
25 
26 struct mobj *mobj_sec_ddr;
27 struct mobj *mobj_tee_ram_rx;
28 struct mobj *mobj_tee_ram_rw;
29 
30 /*
31  * mobj_phys implementation
32  */
33 
34 struct mobj_phys {
35 	struct mobj mobj;
36 	enum buf_is_attr battr;
37 	uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */
38 	vaddr_t va;
39 	paddr_t pa;
40 };
41 
42 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
43 
44 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len)
45 {
46 	struct mobj_phys *moph = to_mobj_phys(mobj);
47 
48 	if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len))
49 		return NULL;
50 
51 	return (void *)(moph->va + offset);
52 }
53 
54 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
55 				   size_t granule, paddr_t *pa)
56 {
57 	struct mobj_phys *moph = to_mobj_phys(mobj);
58 	paddr_t p;
59 
60 	if (!pa)
61 		return TEE_ERROR_GENERIC;
62 
63 	p = moph->pa + offs;
64 
65 	if (granule) {
66 		if (granule != SMALL_PAGE_SIZE &&
67 		    granule != CORE_MMU_PGDIR_SIZE)
68 			return TEE_ERROR_GENERIC;
69 		p &= ~(granule - 1);
70 	}
71 
72 	*pa = p;
73 	return TEE_SUCCESS;
74 }
75 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
76 
77 static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr)
78 {
79 	struct mobj_phys *moph = to_mobj_phys(mobj);
80 
81 	if (!cattr)
82 		return TEE_ERROR_GENERIC;
83 
84 	*cattr = moph->cattr;
85 	return TEE_SUCCESS;
86 }
87 
88 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
89 {
90 	struct mobj_phys *moph = to_mobj_phys(mobj);
91 	enum buf_is_attr a;
92 
93 	a = moph->battr;
94 
95 	switch (attr) {
96 	case CORE_MEM_SEC:
97 		return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
98 		       a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
99 	case CORE_MEM_NON_SEC:
100 		return a == CORE_MEM_NSEC_SHM;
101 	case CORE_MEM_TEE_RAM:
102 	case CORE_MEM_TA_RAM:
103 	case CORE_MEM_NSEC_SHM:
104 	case CORE_MEM_SDP_MEM:
105 		return attr == a;
106 	default:
107 		return false;
108 	}
109 }
110 
111 static void mobj_phys_free(struct mobj *mobj)
112 {
113 	struct mobj_phys *moph = to_mobj_phys(mobj);
114 
115 	free(moph);
116 }
117 
118 /*
119  * Note: this variable is weak just to ease breaking its dependency chain
120  * when added to the unpaged area.
121  */
122 const struct mobj_ops mobj_phys_ops
123 __weak __relrodata_unpaged("mobj_phys_ops") = {
124 	.get_va = mobj_phys_get_va,
125 	.get_pa = mobj_phys_get_pa,
126 	.get_phys_offs = NULL, /* only offset 0 */
127 	.get_cattr = mobj_phys_get_cattr,
128 	.matches = mobj_phys_matches,
129 	.free = mobj_phys_free,
130 };
131 
132 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
133 {
134 	assert(mobj->ops == &mobj_phys_ops);
135 	return container_of(mobj, struct mobj_phys, mobj);
136 }
137 
138 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t cattr,
139 				   enum buf_is_attr battr,
140 				   enum teecore_memtypes area_type)
141 {
142 	void *va = NULL;
143 	struct mobj_phys *moph = NULL;
144 	struct tee_mmap_region *map = NULL;
145 
146 	if ((pa & CORE_MMU_USER_PARAM_MASK) ||
147 	    (size & CORE_MMU_USER_PARAM_MASK)) {
148 		DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
149 		return NULL;
150 	}
151 
152 	if (pa) {
153 		va = phys_to_virt(pa, area_type, size);
154 	} else {
155 		map = core_mmu_find_mapping_exclusive(area_type, size);
156 		if (!map)
157 			return NULL;
158 
159 		pa = map->pa;
160 		va = (void *)map->va;
161 	}
162 
163 	/* Only SDP memory may not have a virtual address */
164 	if (!va && battr != CORE_MEM_SDP_MEM)
165 		return NULL;
166 
167 	moph = calloc(1, sizeof(*moph));
168 	if (!moph)
169 		return NULL;
170 
171 	moph->battr = battr;
172 	moph->cattr = cattr;
173 	moph->mobj.size = size;
174 	moph->mobj.ops = &mobj_phys_ops;
175 	refcount_set(&moph->mobj.refc, 1);
176 	moph->pa = pa;
177 	moph->va = (vaddr_t)va;
178 
179 	return &moph->mobj;
180 }
181 
182 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
183 			     enum buf_is_attr battr)
184 {
185 	enum teecore_memtypes area_type;
186 
187 	switch (battr) {
188 	case CORE_MEM_TEE_RAM:
189 		area_type = MEM_AREA_TEE_RAM_RW_DATA;
190 		break;
191 	case CORE_MEM_TA_RAM:
192 		area_type = MEM_AREA_TA_RAM;
193 		break;
194 	case CORE_MEM_NSEC_SHM:
195 		area_type = MEM_AREA_NSEC_SHM;
196 		break;
197 	case CORE_MEM_SDP_MEM:
198 		area_type = MEM_AREA_SDP_MEM;
199 		break;
200 	default:
201 		DMSG("can't allocate with specified attribute");
202 		return NULL;
203 	}
204 
205 	return mobj_phys_init(pa, size, cattr, battr, area_type);
206 }
207 
208 /*
209  * mobj_virt implementation
210  */
211 
212 static void mobj_virt_assert_type(struct mobj *mobj);
213 
214 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset,
215 			      size_t len __maybe_unused)
216 {
217 	mobj_virt_assert_type(mobj);
218 	assert(mobj_check_offset_and_len(mobj, offset, len));
219 
220 	return (void *)(vaddr_t)offset;
221 }
222 
223 /*
224  * Note: this variable is weak just to ease breaking its dependency chain
225  * when added to the unpaged area.
226  */
227 const struct mobj_ops mobj_virt_ops
228 __weak __relrodata_unpaged("mobj_virt_ops") = {
229 	.get_va = mobj_virt_get_va,
230 };
231 
232 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
233 {
234 	assert(mobj->ops == &mobj_virt_ops);
235 }
236 
237 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
238 
239 /*
240  * mobj_mm implementation
241  */
242 
243 struct mobj_mm {
244 	tee_mm_entry_t *mm;
245 	struct mobj *parent_mobj;
246 	struct mobj mobj;
247 };
248 
249 static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
250 
251 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
252 {
253 	tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
254 
255 	return (mm->offset << mm->pool->shift) + offs;
256 }
257 
258 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs, size_t len)
259 {
260 	return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
261 			   mobj_mm_offs(mobj, offs), len);
262 }
263 
264 
265 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
266 				    size_t granule, paddr_t *pa)
267 {
268 	return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
269 			   mobj_mm_offs(mobj, offs), granule, pa);
270 }
271 DECLARE_KEEP_PAGER(mobj_mm_get_pa);
272 
273 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule)
274 {
275 	return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule);
276 }
277 
278 static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr)
279 {
280 	return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr);
281 }
282 
283 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
284 {
285 	return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
286 }
287 
288 static void mobj_mm_free(struct mobj *mobj)
289 {
290 	struct mobj_mm *m = to_mobj_mm(mobj);
291 
292 	tee_mm_free(m->mm);
293 	free(m);
294 }
295 
296 /*
297  * Note: this variable is weak just to ease breaking its dependency chain
298  * when added to the unpaged area.
299  */
300 const struct mobj_ops mobj_mm_ops __weak __relrodata_unpaged("mobj_mm_ops") = {
301 	.get_va = mobj_mm_get_va,
302 	.get_pa = mobj_mm_get_pa,
303 	.get_phys_offs = mobj_mm_get_phys_offs,
304 	.get_cattr = mobj_mm_get_cattr,
305 	.matches = mobj_mm_matches,
306 	.free = mobj_mm_free,
307 };
308 
309 static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
310 {
311 	assert(mobj->ops == &mobj_mm_ops);
312 	return container_of(mobj, struct mobj_mm, mobj);
313 }
314 
315 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
316 			      tee_mm_pool_t *pool)
317 {
318 	struct mobj_mm *m = calloc(1, sizeof(*m));
319 
320 	if (!m)
321 		return NULL;
322 
323 	m->mm = tee_mm_alloc(pool, size);
324 	if (!m->mm) {
325 		free(m);
326 		return NULL;
327 	}
328 
329 	m->parent_mobj = mobj_parent;
330 	m->mobj.size = size;
331 	m->mobj.ops = &mobj_mm_ops;
332 	refcount_set(&m->mobj.refc, 1);
333 
334 	return &m->mobj;
335 }
336 
337 
338 /*
339  * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
340  * - it is physically contiguous.
341  * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
342  * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
343  *   generic CORE_MEM_NON_SEC.
344  */
345 
346 struct mobj_shm {
347 	struct mobj mobj;
348 	paddr_t pa;
349 	uint64_t cookie;
350 };
351 
352 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
353 
354 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
355 {
356 	struct mobj_shm *m = to_mobj_shm(mobj);
357 
358 	if (!mobj_check_offset_and_len(mobj, offset, len))
359 		return NULL;
360 
361 	return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM,
362 			    mobj->size - offset);
363 }
364 
365 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
366 				   size_t granule, paddr_t *pa)
367 {
368 	struct mobj_shm *m = to_mobj_shm(mobj);
369 	paddr_t p;
370 
371 	if (!pa || offs >= mobj->size)
372 		return TEE_ERROR_GENERIC;
373 
374 	p = m->pa + offs;
375 
376 	if (granule) {
377 		if (granule != SMALL_PAGE_SIZE &&
378 		    granule != CORE_MMU_PGDIR_SIZE)
379 			return TEE_ERROR_GENERIC;
380 		p &= ~(granule - 1);
381 	}
382 
383 	*pa = p;
384 	return TEE_SUCCESS;
385 }
386 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
387 
388 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
389 {
390 	assert(IS_POWER_OF_TWO(granule));
391 	return to_mobj_shm(mobj)->pa & (granule - 1);
392 }
393 
394 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
395 {
396 	return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
397 }
398 
399 static void mobj_shm_free(struct mobj *mobj)
400 {
401 	struct mobj_shm *m = to_mobj_shm(mobj);
402 
403 	free(m);
404 }
405 
406 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
407 {
408 	return to_mobj_shm(mobj)->cookie;
409 }
410 
411 /*
412  * Note: this variable is weak just to ease breaking its dependency chain
413  * when added to the unpaged area.
414  */
415 const struct mobj_ops mobj_shm_ops
416 __weak __relrodata_unpaged("mobj_shm_ops") = {
417 	.get_va = mobj_shm_get_va,
418 	.get_pa = mobj_shm_get_pa,
419 	.get_phys_offs = mobj_shm_get_phys_offs,
420 	.matches = mobj_shm_matches,
421 	.free = mobj_shm_free,
422 	.get_cookie = mobj_shm_get_cookie,
423 };
424 
425 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
426 {
427 	assert(mobj->ops == &mobj_shm_ops);
428 	return container_of(mobj, struct mobj_shm, mobj);
429 }
430 
431 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
432 {
433 	struct mobj_shm *m;
434 
435 	if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
436 		return NULL;
437 
438 	m = calloc(1, sizeof(*m));
439 	if (!m)
440 		return NULL;
441 
442 	m->mobj.size = size;
443 	m->mobj.ops = &mobj_shm_ops;
444 	refcount_set(&m->mobj.refc, 1);
445 	m->pa = pa;
446 	m->cookie = cookie;
447 
448 	return &m->mobj;
449 }
450 
451 #ifdef CFG_PAGED_USER_TA
452 /*
453  * mobj_seccpy_shm implementation
454  */
455 
456 struct mobj_seccpy_shm {
457 	struct user_ta_ctx *utc;
458 	vaddr_t va;
459 	struct mobj mobj;
460 	struct fobj *fobj;
461 };
462 
463 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
464 
465 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
466 {
467 	assert(mobj_is_seccpy_shm(mobj));
468 	return container_of(mobj, struct mobj_seccpy_shm, mobj);
469 }
470 
471 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs, size_t len)
472 {
473 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
474 
475 	if (&m->utc->ta_ctx.ts_ctx != thread_get_tsd()->ctx)
476 		return NULL;
477 
478 	if (!mobj_check_offset_and_len(mobj, offs, len))
479 		return NULL;
480 	return (void *)(m->va + offs);
481 }
482 
483 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
484 				 enum buf_is_attr attr)
485 {
486 	assert(mobj_is_seccpy_shm(mobj));
487 
488 	return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
489 }
490 
491 static void mobj_seccpy_shm_free(struct mobj *mobj)
492 {
493 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
494 
495 	tee_pager_rem_um_region(&m->utc->uctx, m->va, mobj->size);
496 	vm_rem_rwmem(&m->utc->uctx, mobj, m->va);
497 	fobj_put(m->fobj);
498 	free(m);
499 }
500 
501 static struct fobj *mobj_seccpy_shm_get_fobj(struct mobj *mobj)
502 {
503 	return fobj_get(to_mobj_seccpy_shm(mobj)->fobj);
504 }
505 
506 /*
507  * Note: this variable is weak just to ease breaking its dependency chain
508  * when added to the unpaged area.
509  */
510 const struct mobj_ops mobj_seccpy_shm_ops
511 __weak __relrodata_unpaged("mobj_seccpy_shm_ops") = {
512 	.get_va = mobj_seccpy_shm_get_va,
513 	.matches = mobj_seccpy_shm_matches,
514 	.free = mobj_seccpy_shm_free,
515 	.get_fobj = mobj_seccpy_shm_get_fobj,
516 };
517 
518 static bool mobj_is_seccpy_shm(struct mobj *mobj)
519 {
520 	return mobj && mobj->ops == &mobj_seccpy_shm_ops;
521 }
522 
523 struct mobj *mobj_seccpy_shm_alloc(size_t size)
524 {
525 	struct thread_specific_data *tsd = thread_get_tsd();
526 	struct mobj_seccpy_shm *m;
527 	struct user_ta_ctx *utc;
528 	vaddr_t va = 0;
529 
530 	if (!is_user_ta_ctx(tsd->ctx))
531 		return NULL;
532 	utc = to_user_ta_ctx(tsd->ctx);
533 
534 	m = calloc(1, sizeof(*m));
535 	if (!m)
536 		return NULL;
537 
538 	m->mobj.size = size;
539 	m->mobj.ops = &mobj_seccpy_shm_ops;
540 	refcount_set(&m->mobj.refc, 1);
541 
542 	if (vm_add_rwmem(&utc->uctx, &m->mobj, &va) != TEE_SUCCESS)
543 		goto bad;
544 
545 	m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) /
546 				      SMALL_PAGE_SIZE);
547 	if (tee_pager_add_um_region(&utc->uctx, va, m->fobj,
548 				    TEE_MATTR_PRW | TEE_MATTR_URW))
549 		goto bad;
550 
551 	m->va = va;
552 	m->utc = to_user_ta_ctx(tsd->ctx);
553 	return &m->mobj;
554 bad:
555 	if (va)
556 		vm_rem_rwmem(&utc->uctx, &m->mobj, va);
557 	fobj_put(m->fobj);
558 	free(m);
559 	return NULL;
560 }
561 
562 
563 #endif /*CFG_PAGED_USER_TA*/
564 
565 struct mobj_with_fobj {
566 	struct fobj *fobj;
567 	struct file *file;
568 	struct mobj mobj;
569 };
570 
571 const struct mobj_ops mobj_with_fobj_ops;
572 
573 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file)
574 {
575 	struct mobj_with_fobj *m = NULL;
576 
577 	if (!fobj)
578 		return NULL;
579 
580 	m = calloc(1, sizeof(*m));
581 	if (!m)
582 		return NULL;
583 
584 	m->mobj.ops = &mobj_with_fobj_ops;
585 	refcount_set(&m->mobj.refc, 1);
586 	m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
587 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
588 	m->fobj = fobj_get(fobj);
589 	m->file = file_get(file);
590 
591 	return &m->mobj;
592 }
593 
594 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
595 {
596 	assert(mobj && mobj->ops == &mobj_with_fobj_ops);
597 
598 	return container_of(mobj, struct mobj_with_fobj, mobj);
599 }
600 
601 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
602 				 enum buf_is_attr attr)
603 {
604 	assert(to_mobj_with_fobj(mobj));
605 
606 	/*
607 	 * All fobjs are supposed to be mapped secure so classify it as
608 	 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
609 	 * needed it can probably be carried in another way than to put the
610 	 * burden directly on fobj.
611 	 */
612 	return attr == CORE_MEM_SEC;
613 }
614 
615 static void mobj_with_fobj_free(struct mobj *mobj)
616 {
617 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
618 
619 	fobj_put(m->fobj);
620 	file_put(m->file);
621 	free(m);
622 }
623 
624 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
625 {
626 	return fobj_get(to_mobj_with_fobj(mobj)->fobj);
627 }
628 
629 static TEE_Result mobj_with_fobj_get_cattr(struct mobj *mobj __unused,
630 					   uint32_t *cattr)
631 {
632 	if (!cattr)
633 		return TEE_ERROR_GENERIC;
634 
635 	/* All fobjs are mapped as normal cached memory */
636 	*cattr = TEE_MATTR_MEM_TYPE_CACHED;
637 
638 	return TEE_SUCCESS;
639 }
640 
641 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
642 					size_t granule, paddr_t *pa)
643 {
644 	struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
645 	paddr_t p = 0;
646 
647 	if (!f->fobj->ops->get_pa) {
648 		assert(mobj_is_paged(mobj));
649 		return TEE_ERROR_NOT_SUPPORTED;
650 	}
651 
652 	p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
653 	    offs % SMALL_PAGE_SIZE;
654 
655 	if (granule) {
656 		if (granule != SMALL_PAGE_SIZE &&
657 		    granule != CORE_MMU_PGDIR_SIZE)
658 			return TEE_ERROR_GENERIC;
659 		p &= ~(granule - 1);
660 	}
661 
662 	*pa = p;
663 
664 	return TEE_SUCCESS;
665 }
666 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
667 
668 /*
669  * Note: this variable is weak just to ease breaking its dependency chain
670  * when added to the unpaged area.
671  */
672 const struct mobj_ops mobj_with_fobj_ops
673 __weak __relrodata_unpaged("mobj_with_fobj_ops") = {
674 	.matches = mobj_with_fobj_matches,
675 	.free = mobj_with_fobj_free,
676 	.get_fobj = mobj_with_fobj_get_fobj,
677 	.get_cattr = mobj_with_fobj_get_cattr,
678 	.get_pa = mobj_with_fobj_get_pa,
679 };
680 
681 #ifdef CFG_PAGED_USER_TA
682 bool mobj_is_paged(struct mobj *mobj)
683 {
684 	if (mobj->ops == &mobj_seccpy_shm_ops)
685 		return true;
686 
687 	if (mobj->ops == &mobj_with_fobj_ops &&
688 	    !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
689 		return true;
690 
691 	return false;
692 }
693 #endif /*CFG_PAGED_USER_TA*/
694 
695 static TEE_Result mobj_init(void)
696 {
697 	mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo,
698 				       tee_mm_sec_ddr.size,
699 				       TEE_MATTR_MEM_TYPE_CACHED,
700 				       CORE_MEM_TA_RAM);
701 	if (!mobj_sec_ddr)
702 		panic("Failed to register secure ta ram");
703 
704 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
705 		mobj_tee_ram_rx = mobj_phys_init(0,
706 						 VCORE_UNPG_RX_SZ,
707 						 TEE_MATTR_MEM_TYPE_CACHED,
708 						 CORE_MEM_TEE_RAM,
709 						 MEM_AREA_TEE_RAM_RX);
710 		if (!mobj_tee_ram_rx)
711 			panic("Failed to register tee ram rx");
712 
713 		mobj_tee_ram_rw = mobj_phys_init(0,
714 						 VCORE_UNPG_RW_SZ,
715 						 TEE_MATTR_MEM_TYPE_CACHED,
716 						 CORE_MEM_TEE_RAM,
717 						 MEM_AREA_TEE_RAM_RW_DATA);
718 		if (!mobj_tee_ram_rw)
719 			panic("Failed to register tee ram rw");
720 	} else {
721 		mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START,
722 						 VCORE_UNPG_RW_PA +
723 						 VCORE_UNPG_RW_SZ -
724 						 TEE_RAM_START,
725 						 TEE_MATTR_MEM_TYPE_CACHED,
726 						 CORE_MEM_TEE_RAM,
727 						 MEM_AREA_TEE_RAM_RW_DATA);
728 		if (!mobj_tee_ram_rw)
729 			panic("Failed to register tee ram");
730 
731 		mobj_tee_ram_rx = mobj_tee_ram_rw;
732 	}
733 
734 	return TEE_SUCCESS;
735 }
736 
737 driver_init_late(mobj_init);
738