xref: /optee_os/core/mm/mobj.c (revision 997ff82731597ddcf8d6ad0fb3301adca8c0c6a8)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/linker.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <mm/core_mmu.h>
17 #include <mm/mobj.h>
18 #include <mm/tee_pager.h>
19 #include <mm/vm.h>
20 #include <optee_msg.h>
21 #include <stdlib.h>
22 #include <tee_api_types.h>
23 #include <types_ext.h>
24 #include <util.h>
25 
26 struct mobj *mobj_sec_ddr;
27 struct mobj *mobj_tee_ram_rx;
28 struct mobj *mobj_tee_ram_rw;
29 
30 /*
31  * mobj_phys implementation
32  */
33 
34 struct mobj_phys {
35 	struct mobj mobj;
36 	enum buf_is_attr battr;
37 	uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */
38 	vaddr_t va;
39 	paddr_t pa;
40 };
41 
42 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
43 
44 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len)
45 {
46 	struct mobj_phys *moph = to_mobj_phys(mobj);
47 
48 	if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len))
49 		return NULL;
50 
51 	return (void *)(moph->va + offset);
52 }
53 
54 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
55 				   size_t granule, paddr_t *pa)
56 {
57 	struct mobj_phys *moph = to_mobj_phys(mobj);
58 	paddr_t p;
59 
60 	if (!pa)
61 		return TEE_ERROR_GENERIC;
62 
63 	p = moph->pa + offs;
64 
65 	if (granule) {
66 		if (granule != SMALL_PAGE_SIZE &&
67 		    granule != CORE_MMU_PGDIR_SIZE)
68 			return TEE_ERROR_GENERIC;
69 		p &= ~(granule - 1);
70 	}
71 
72 	*pa = p;
73 	return TEE_SUCCESS;
74 }
75 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
76 
77 static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr)
78 {
79 	struct mobj_phys *moph = to_mobj_phys(mobj);
80 
81 	if (!cattr)
82 		return TEE_ERROR_GENERIC;
83 
84 	*cattr = moph->cattr;
85 	return TEE_SUCCESS;
86 }
87 
88 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
89 {
90 	struct mobj_phys *moph = to_mobj_phys(mobj);
91 	enum buf_is_attr a;
92 
93 	a = moph->battr;
94 
95 	switch (attr) {
96 	case CORE_MEM_SEC:
97 		return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
98 		       a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
99 	case CORE_MEM_NON_SEC:
100 		return a == CORE_MEM_NSEC_SHM;
101 	case CORE_MEM_TEE_RAM:
102 	case CORE_MEM_TA_RAM:
103 	case CORE_MEM_NSEC_SHM:
104 	case CORE_MEM_SDP_MEM:
105 		return attr == a;
106 	default:
107 		return false;
108 	}
109 }
110 
111 static void mobj_phys_free(struct mobj *mobj)
112 {
113 	struct mobj_phys *moph = to_mobj_phys(mobj);
114 
115 	free(moph);
116 }
117 
118 /*
119  * Note: this variable is weak just to ease breaking its dependency chain
120  * when added to the unpaged area.
121  */
122 const struct mobj_ops mobj_phys_ops __weak __rodata_unpaged("mobj_phys_ops") = {
123 	.get_va = mobj_phys_get_va,
124 	.get_pa = mobj_phys_get_pa,
125 	.get_phys_offs = NULL, /* only offset 0 */
126 	.get_cattr = mobj_phys_get_cattr,
127 	.matches = mobj_phys_matches,
128 	.free = mobj_phys_free,
129 };
130 
131 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
132 {
133 	assert(mobj->ops == &mobj_phys_ops);
134 	return container_of(mobj, struct mobj_phys, mobj);
135 }
136 
137 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t cattr,
138 				   enum buf_is_attr battr,
139 				   enum teecore_memtypes area_type)
140 {
141 	void *va = NULL;
142 	struct mobj_phys *moph = NULL;
143 	struct tee_mmap_region *map = NULL;
144 
145 	if ((pa & CORE_MMU_USER_PARAM_MASK) ||
146 	    (size & CORE_MMU_USER_PARAM_MASK)) {
147 		DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
148 		return NULL;
149 	}
150 
151 	if (pa) {
152 		va = phys_to_virt(pa, area_type, size);
153 	} else {
154 		map = core_mmu_find_mapping_exclusive(area_type, size);
155 		if (!map)
156 			return NULL;
157 
158 		pa = map->pa;
159 		va = (void *)map->va;
160 	}
161 
162 	/* Only SDP memory may not have a virtual address */
163 	if (!va && battr != CORE_MEM_SDP_MEM)
164 		return NULL;
165 
166 	moph = calloc(1, sizeof(*moph));
167 	if (!moph)
168 		return NULL;
169 
170 	moph->battr = battr;
171 	moph->cattr = cattr;
172 	moph->mobj.size = size;
173 	moph->mobj.ops = &mobj_phys_ops;
174 	refcount_set(&moph->mobj.refc, 1);
175 	moph->pa = pa;
176 	moph->va = (vaddr_t)va;
177 
178 	return &moph->mobj;
179 }
180 
181 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
182 			     enum buf_is_attr battr)
183 {
184 	enum teecore_memtypes area_type;
185 
186 	switch (battr) {
187 	case CORE_MEM_TEE_RAM:
188 		area_type = MEM_AREA_TEE_RAM_RW_DATA;
189 		break;
190 	case CORE_MEM_TA_RAM:
191 		area_type = MEM_AREA_TA_RAM;
192 		break;
193 	case CORE_MEM_NSEC_SHM:
194 		area_type = MEM_AREA_NSEC_SHM;
195 		break;
196 	case CORE_MEM_SDP_MEM:
197 		area_type = MEM_AREA_SDP_MEM;
198 		break;
199 	default:
200 		DMSG("can't allocate with specified attribute");
201 		return NULL;
202 	}
203 
204 	return mobj_phys_init(pa, size, cattr, battr, area_type);
205 }
206 
207 /*
208  * mobj_virt implementation
209  */
210 
211 static void mobj_virt_assert_type(struct mobj *mobj);
212 
213 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset,
214 			      size_t len __maybe_unused)
215 {
216 	mobj_virt_assert_type(mobj);
217 	assert(mobj_check_offset_and_len(mobj, offset, len));
218 
219 	return (void *)(vaddr_t)offset;
220 }
221 
222 /*
223  * Note: this variable is weak just to ease breaking its dependency chain
224  * when added to the unpaged area.
225  */
226 const struct mobj_ops mobj_virt_ops __weak __rodata_unpaged("mobj_virt_ops") = {
227 	.get_va = mobj_virt_get_va,
228 };
229 
230 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
231 {
232 	assert(mobj->ops == &mobj_virt_ops);
233 }
234 
235 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
236 
237 /*
238  * mobj_mm implementation
239  */
240 
241 struct mobj_mm {
242 	tee_mm_entry_t *mm;
243 	struct mobj *parent_mobj;
244 	struct mobj mobj;
245 };
246 
247 static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
248 
249 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
250 {
251 	tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
252 
253 	return (mm->offset << mm->pool->shift) + offs;
254 }
255 
256 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs, size_t len)
257 {
258 	return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
259 			   mobj_mm_offs(mobj, offs), len);
260 }
261 
262 
263 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
264 				    size_t granule, paddr_t *pa)
265 {
266 	return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
267 			   mobj_mm_offs(mobj, offs), granule, pa);
268 }
269 DECLARE_KEEP_PAGER(mobj_mm_get_pa);
270 
271 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule)
272 {
273 	return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule);
274 }
275 
276 static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr)
277 {
278 	return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr);
279 }
280 
281 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
282 {
283 	return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
284 }
285 
286 static void mobj_mm_free(struct mobj *mobj)
287 {
288 	struct mobj_mm *m = to_mobj_mm(mobj);
289 
290 	tee_mm_free(m->mm);
291 	free(m);
292 }
293 
294 /*
295  * Note: this variable is weak just to ease breaking its dependency chain
296  * when added to the unpaged area.
297  */
298 const struct mobj_ops mobj_mm_ops __weak __rodata_unpaged("mobj_mm_ops") = {
299 	.get_va = mobj_mm_get_va,
300 	.get_pa = mobj_mm_get_pa,
301 	.get_phys_offs = mobj_mm_get_phys_offs,
302 	.get_cattr = mobj_mm_get_cattr,
303 	.matches = mobj_mm_matches,
304 	.free = mobj_mm_free,
305 };
306 
307 static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
308 {
309 	assert(mobj->ops == &mobj_mm_ops);
310 	return container_of(mobj, struct mobj_mm, mobj);
311 }
312 
313 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
314 			      tee_mm_pool_t *pool)
315 {
316 	struct mobj_mm *m = calloc(1, sizeof(*m));
317 
318 	if (!m)
319 		return NULL;
320 
321 	m->mm = tee_mm_alloc(pool, size);
322 	if (!m->mm) {
323 		free(m);
324 		return NULL;
325 	}
326 
327 	m->parent_mobj = mobj_parent;
328 	m->mobj.size = size;
329 	m->mobj.ops = &mobj_mm_ops;
330 	refcount_set(&m->mobj.refc, 1);
331 
332 	return &m->mobj;
333 }
334 
335 
336 /*
337  * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
338  * - it is physically contiguous.
339  * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
340  * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
341  *   generic CORE_MEM_NON_SEC.
342  */
343 
344 struct mobj_shm {
345 	struct mobj mobj;
346 	paddr_t pa;
347 	uint64_t cookie;
348 };
349 
350 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
351 
352 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
353 {
354 	struct mobj_shm *m = to_mobj_shm(mobj);
355 
356 	if (!mobj_check_offset_and_len(mobj, offset, len))
357 		return NULL;
358 
359 	return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM,
360 			    mobj->size - offset);
361 }
362 
363 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
364 				   size_t granule, paddr_t *pa)
365 {
366 	struct mobj_shm *m = to_mobj_shm(mobj);
367 	paddr_t p;
368 
369 	if (!pa || offs >= mobj->size)
370 		return TEE_ERROR_GENERIC;
371 
372 	p = m->pa + offs;
373 
374 	if (granule) {
375 		if (granule != SMALL_PAGE_SIZE &&
376 		    granule != CORE_MMU_PGDIR_SIZE)
377 			return TEE_ERROR_GENERIC;
378 		p &= ~(granule - 1);
379 	}
380 
381 	*pa = p;
382 	return TEE_SUCCESS;
383 }
384 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
385 
386 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
387 {
388 	assert(IS_POWER_OF_TWO(granule));
389 	return to_mobj_shm(mobj)->pa & (granule - 1);
390 }
391 
392 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
393 {
394 	return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
395 }
396 
397 static void mobj_shm_free(struct mobj *mobj)
398 {
399 	struct mobj_shm *m = to_mobj_shm(mobj);
400 
401 	free(m);
402 }
403 
404 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
405 {
406 	return to_mobj_shm(mobj)->cookie;
407 }
408 
409 /*
410  * Note: this variable is weak just to ease breaking its dependency chain
411  * when added to the unpaged area.
412  */
413 const struct mobj_ops mobj_shm_ops __weak __rodata_unpaged("mobj_shm_ops") = {
414 	.get_va = mobj_shm_get_va,
415 	.get_pa = mobj_shm_get_pa,
416 	.get_phys_offs = mobj_shm_get_phys_offs,
417 	.matches = mobj_shm_matches,
418 	.free = mobj_shm_free,
419 	.get_cookie = mobj_shm_get_cookie,
420 };
421 
422 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
423 {
424 	assert(mobj->ops == &mobj_shm_ops);
425 	return container_of(mobj, struct mobj_shm, mobj);
426 }
427 
428 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
429 {
430 	struct mobj_shm *m;
431 
432 	if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
433 		return NULL;
434 
435 	m = calloc(1, sizeof(*m));
436 	if (!m)
437 		return NULL;
438 
439 	m->mobj.size = size;
440 	m->mobj.ops = &mobj_shm_ops;
441 	refcount_set(&m->mobj.refc, 1);
442 	m->pa = pa;
443 	m->cookie = cookie;
444 
445 	return &m->mobj;
446 }
447 
448 #ifdef CFG_PAGED_USER_TA
449 /*
450  * mobj_seccpy_shm implementation
451  */
452 
453 struct mobj_seccpy_shm {
454 	struct user_ta_ctx *utc;
455 	vaddr_t va;
456 	struct mobj mobj;
457 	struct fobj *fobj;
458 };
459 
460 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
461 
462 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
463 {
464 	assert(mobj_is_seccpy_shm(mobj));
465 	return container_of(mobj, struct mobj_seccpy_shm, mobj);
466 }
467 
468 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs, size_t len)
469 {
470 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
471 
472 	if (&m->utc->ta_ctx.ts_ctx != thread_get_tsd()->ctx)
473 		return NULL;
474 
475 	if (!mobj_check_offset_and_len(mobj, offs, len))
476 		return NULL;
477 	return (void *)(m->va + offs);
478 }
479 
480 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
481 				 enum buf_is_attr attr)
482 {
483 	assert(mobj_is_seccpy_shm(mobj));
484 
485 	return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
486 }
487 
488 static void mobj_seccpy_shm_free(struct mobj *mobj)
489 {
490 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
491 
492 	tee_pager_rem_um_region(&m->utc->uctx, m->va, mobj->size);
493 	vm_rem_rwmem(&m->utc->uctx, mobj, m->va);
494 	fobj_put(m->fobj);
495 	free(m);
496 }
497 
498 static struct fobj *mobj_seccpy_shm_get_fobj(struct mobj *mobj)
499 {
500 	return fobj_get(to_mobj_seccpy_shm(mobj)->fobj);
501 }
502 
503 /*
504  * Note: this variable is weak just to ease breaking its dependency chain
505  * when added to the unpaged area.
506  */
507 const struct mobj_ops mobj_seccpy_shm_ops
508 __weak __rodata_unpaged("mobj_seccpy_shm_ops") = {
509 	.get_va = mobj_seccpy_shm_get_va,
510 	.matches = mobj_seccpy_shm_matches,
511 	.free = mobj_seccpy_shm_free,
512 	.get_fobj = mobj_seccpy_shm_get_fobj,
513 };
514 
515 static bool mobj_is_seccpy_shm(struct mobj *mobj)
516 {
517 	return mobj && mobj->ops == &mobj_seccpy_shm_ops;
518 }
519 
520 struct mobj *mobj_seccpy_shm_alloc(size_t size)
521 {
522 	struct thread_specific_data *tsd = thread_get_tsd();
523 	struct mobj_seccpy_shm *m;
524 	struct user_ta_ctx *utc;
525 	vaddr_t va = 0;
526 
527 	if (!is_user_ta_ctx(tsd->ctx))
528 		return NULL;
529 	utc = to_user_ta_ctx(tsd->ctx);
530 
531 	m = calloc(1, sizeof(*m));
532 	if (!m)
533 		return NULL;
534 
535 	m->mobj.size = size;
536 	m->mobj.ops = &mobj_seccpy_shm_ops;
537 	refcount_set(&m->mobj.refc, 1);
538 
539 	if (vm_add_rwmem(&utc->uctx, &m->mobj, &va) != TEE_SUCCESS)
540 		goto bad;
541 
542 	m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) /
543 				      SMALL_PAGE_SIZE);
544 	if (tee_pager_add_um_region(&utc->uctx, va, m->fobj,
545 				    TEE_MATTR_PRW | TEE_MATTR_URW))
546 		goto bad;
547 
548 	m->va = va;
549 	m->utc = to_user_ta_ctx(tsd->ctx);
550 	return &m->mobj;
551 bad:
552 	if (va)
553 		vm_rem_rwmem(&utc->uctx, &m->mobj, va);
554 	fobj_put(m->fobj);
555 	free(m);
556 	return NULL;
557 }
558 
559 
560 #endif /*CFG_PAGED_USER_TA*/
561 
562 struct mobj_with_fobj {
563 	struct fobj *fobj;
564 	struct file *file;
565 	struct mobj mobj;
566 };
567 
568 const struct mobj_ops mobj_with_fobj_ops;
569 
570 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file)
571 {
572 	struct mobj_with_fobj *m = NULL;
573 
574 	if (!fobj)
575 		return NULL;
576 
577 	m = calloc(1, sizeof(*m));
578 	if (!m)
579 		return NULL;
580 
581 	m->mobj.ops = &mobj_with_fobj_ops;
582 	refcount_set(&m->mobj.refc, 1);
583 	m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
584 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
585 	m->fobj = fobj_get(fobj);
586 	m->file = file_get(file);
587 
588 	return &m->mobj;
589 }
590 
591 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
592 {
593 	assert(mobj && mobj->ops == &mobj_with_fobj_ops);
594 
595 	return container_of(mobj, struct mobj_with_fobj, mobj);
596 }
597 
598 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
599 				 enum buf_is_attr attr)
600 {
601 	assert(to_mobj_with_fobj(mobj));
602 
603 	/*
604 	 * All fobjs are supposed to be mapped secure so classify it as
605 	 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
606 	 * needed it can probably be carried in another way than to put the
607 	 * burden directly on fobj.
608 	 */
609 	return attr == CORE_MEM_SEC;
610 }
611 
612 static void mobj_with_fobj_free(struct mobj *mobj)
613 {
614 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
615 
616 	fobj_put(m->fobj);
617 	file_put(m->file);
618 	free(m);
619 }
620 
621 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
622 {
623 	return fobj_get(to_mobj_with_fobj(mobj)->fobj);
624 }
625 
626 static TEE_Result mobj_with_fobj_get_cattr(struct mobj *mobj __unused,
627 					   uint32_t *cattr)
628 {
629 	if (!cattr)
630 		return TEE_ERROR_GENERIC;
631 
632 	/* All fobjs are mapped as normal cached memory */
633 	*cattr = TEE_MATTR_CACHE_CACHED;
634 
635 	return TEE_SUCCESS;
636 }
637 
638 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
639 					size_t granule, paddr_t *pa)
640 {
641 	struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
642 	paddr_t p = 0;
643 
644 	if (!f->fobj->ops->get_pa) {
645 		assert(mobj_is_paged(mobj));
646 		return TEE_ERROR_NOT_SUPPORTED;
647 	}
648 
649 	p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
650 	    offs % SMALL_PAGE_SIZE;
651 
652 	if (granule) {
653 		if (granule != SMALL_PAGE_SIZE &&
654 		    granule != CORE_MMU_PGDIR_SIZE)
655 			return TEE_ERROR_GENERIC;
656 		p &= ~(granule - 1);
657 	}
658 
659 	*pa = p;
660 
661 	return TEE_SUCCESS;
662 }
663 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
664 
665 /*
666  * Note: this variable is weak just to ease breaking its dependency chain
667  * when added to the unpaged area.
668  */
669 const struct mobj_ops mobj_with_fobj_ops
670 __weak __rodata_unpaged("mobj_with_fobj_ops") = {
671 	.matches = mobj_with_fobj_matches,
672 	.free = mobj_with_fobj_free,
673 	.get_fobj = mobj_with_fobj_get_fobj,
674 	.get_cattr = mobj_with_fobj_get_cattr,
675 	.get_pa = mobj_with_fobj_get_pa,
676 };
677 
678 #ifdef CFG_PAGED_USER_TA
679 bool mobj_is_paged(struct mobj *mobj)
680 {
681 	if (mobj->ops == &mobj_seccpy_shm_ops)
682 		return true;
683 
684 	if (mobj->ops == &mobj_with_fobj_ops &&
685 	    !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
686 		return true;
687 
688 	return false;
689 }
690 #endif /*CFG_PAGED_USER_TA*/
691 
692 static TEE_Result mobj_init(void)
693 {
694 	mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo,
695 				       tee_mm_sec_ddr.size,
696 				       TEE_MATTR_CACHE_CACHED, CORE_MEM_TA_RAM);
697 	if (!mobj_sec_ddr)
698 		panic("Failed to register secure ta ram");
699 
700 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
701 		mobj_tee_ram_rx = mobj_phys_init(0,
702 						 VCORE_UNPG_RX_SZ,
703 						 TEE_MATTR_CACHE_CACHED,
704 						 CORE_MEM_TEE_RAM,
705 						 MEM_AREA_TEE_RAM_RX);
706 		if (!mobj_tee_ram_rx)
707 			panic("Failed to register tee ram rx");
708 
709 		mobj_tee_ram_rw = mobj_phys_init(0,
710 						 VCORE_UNPG_RW_SZ,
711 						 TEE_MATTR_CACHE_CACHED,
712 						 CORE_MEM_TEE_RAM,
713 						 MEM_AREA_TEE_RAM_RW_DATA);
714 		if (!mobj_tee_ram_rw)
715 			panic("Failed to register tee ram rw");
716 	} else {
717 		mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START,
718 						 VCORE_UNPG_RW_PA +
719 						 VCORE_UNPG_RW_SZ -
720 						 TEE_RAM_START,
721 						 TEE_MATTR_CACHE_CACHED,
722 						 CORE_MEM_TEE_RAM,
723 						 MEM_AREA_TEE_RAM_RW_DATA);
724 		if (!mobj_tee_ram_rw)
725 			panic("Failed to register tee ram");
726 
727 		mobj_tee_ram_rx = mobj_tee_ram_rw;
728 	}
729 
730 	return TEE_SUCCESS;
731 }
732 
733 driver_init_late(mobj_init);
734