xref: /optee_os/core/mm/mobj.c (revision c282ebd61200b0cb0830399c1c33514dbd129dfd)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/linker.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <mm/core_mmu.h>
17 #include <mm/mobj.h>
18 #include <mm/tee_pager.h>
19 #include <mm/vm.h>
20 #include <optee_msg.h>
21 #include <sm/optee_smc.h>
22 #include <stdlib.h>
23 #include <tee_api_types.h>
24 #include <types_ext.h>
25 #include <util.h>
26 
27 struct mobj *mobj_sec_ddr;
28 struct mobj *mobj_tee_ram_rx;
29 struct mobj *mobj_tee_ram_rw;
30 
31 /*
32  * mobj_phys implementation
33  */
34 
35 struct mobj_phys {
36 	struct mobj mobj;
37 	enum buf_is_attr battr;
38 	uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */
39 	vaddr_t va;
40 	paddr_t pa;
41 };
42 
43 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
44 
45 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset)
46 {
47 	struct mobj_phys *moph = to_mobj_phys(mobj);
48 
49 	if (!moph->va || offset >= mobj->size)
50 		return NULL;
51 
52 	return (void *)(moph->va + offset);
53 }
54 
55 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
56 				   size_t granule, paddr_t *pa)
57 {
58 	struct mobj_phys *moph = to_mobj_phys(mobj);
59 	paddr_t p;
60 
61 	if (!pa)
62 		return TEE_ERROR_GENERIC;
63 
64 	p = moph->pa + offs;
65 
66 	if (granule) {
67 		if (granule != SMALL_PAGE_SIZE &&
68 		    granule != CORE_MMU_PGDIR_SIZE)
69 			return TEE_ERROR_GENERIC;
70 		p &= ~(granule - 1);
71 	}
72 
73 	*pa = p;
74 	return TEE_SUCCESS;
75 }
76 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
77 
78 static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr)
79 {
80 	struct mobj_phys *moph = to_mobj_phys(mobj);
81 
82 	if (!cattr)
83 		return TEE_ERROR_GENERIC;
84 
85 	*cattr = moph->cattr;
86 	return TEE_SUCCESS;
87 }
88 
89 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
90 {
91 	struct mobj_phys *moph = to_mobj_phys(mobj);
92 	enum buf_is_attr a;
93 
94 	a = moph->battr;
95 
96 	switch (attr) {
97 	case CORE_MEM_SEC:
98 		return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
99 		       a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
100 	case CORE_MEM_NON_SEC:
101 		return a == CORE_MEM_NSEC_SHM;
102 	case CORE_MEM_TEE_RAM:
103 	case CORE_MEM_TA_RAM:
104 	case CORE_MEM_NSEC_SHM:
105 	case CORE_MEM_SDP_MEM:
106 		return attr == a;
107 	default:
108 		return false;
109 	}
110 }
111 
112 static void mobj_phys_free(struct mobj *mobj)
113 {
114 	struct mobj_phys *moph = to_mobj_phys(mobj);
115 
116 	free(moph);
117 }
118 
119 /*
120  * Note: this variable is weak just to ease breaking its dependency chain
121  * when added to the unpaged area.
122  */
123 const struct mobj_ops mobj_phys_ops __weak __rodata_unpaged("mobj_phys_ops") = {
124 	.get_va = mobj_phys_get_va,
125 	.get_pa = mobj_phys_get_pa,
126 	.get_phys_offs = NULL, /* only offset 0 */
127 	.get_cattr = mobj_phys_get_cattr,
128 	.matches = mobj_phys_matches,
129 	.free = mobj_phys_free,
130 };
131 
132 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
133 {
134 	assert(mobj->ops == &mobj_phys_ops);
135 	return container_of(mobj, struct mobj_phys, mobj);
136 }
137 
138 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t cattr,
139 				   enum buf_is_attr battr,
140 				   enum teecore_memtypes area_type)
141 {
142 	void *va = NULL;
143 	struct mobj_phys *moph = NULL;
144 	struct tee_mmap_region *map = NULL;
145 
146 	if ((pa & CORE_MMU_USER_PARAM_MASK) ||
147 	    (size & CORE_MMU_USER_PARAM_MASK)) {
148 		DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
149 		return NULL;
150 	}
151 
152 	if (pa) {
153 		va = phys_to_virt(pa, area_type, size);
154 	} else {
155 		map = core_mmu_find_mapping_exclusive(area_type, size);
156 		if (!map)
157 			return NULL;
158 
159 		pa = map->pa;
160 		va = (void *)map->va;
161 	}
162 
163 	/* Only SDP memory may not have a virtual address */
164 	if (!va && battr != CORE_MEM_SDP_MEM)
165 		return NULL;
166 
167 	moph = calloc(1, sizeof(*moph));
168 	if (!moph)
169 		return NULL;
170 
171 	moph->battr = battr;
172 	moph->cattr = cattr;
173 	moph->mobj.size = size;
174 	moph->mobj.ops = &mobj_phys_ops;
175 	refcount_set(&moph->mobj.refc, 1);
176 	moph->pa = pa;
177 	moph->va = (vaddr_t)va;
178 
179 	return &moph->mobj;
180 }
181 
182 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
183 			     enum buf_is_attr battr)
184 {
185 	enum teecore_memtypes area_type;
186 
187 	switch (battr) {
188 	case CORE_MEM_TEE_RAM:
189 		area_type = MEM_AREA_TEE_RAM_RW_DATA;
190 		break;
191 	case CORE_MEM_TA_RAM:
192 		area_type = MEM_AREA_TA_RAM;
193 		break;
194 	case CORE_MEM_NSEC_SHM:
195 		area_type = MEM_AREA_NSEC_SHM;
196 		break;
197 	case CORE_MEM_SDP_MEM:
198 		area_type = MEM_AREA_SDP_MEM;
199 		break;
200 	default:
201 		DMSG("can't allocate with specified attribute");
202 		return NULL;
203 	}
204 
205 	return mobj_phys_init(pa, size, cattr, battr, area_type);
206 }
207 
208 /*
209  * mobj_virt implementation
210  */
211 
212 static void mobj_virt_assert_type(struct mobj *mobj);
213 
214 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset)
215 {
216 	mobj_virt_assert_type(mobj);
217 
218 	return (void *)(vaddr_t)offset;
219 }
220 
221 /*
222  * Note: this variable is weak just to ease breaking its dependency chain
223  * when added to the unpaged area.
224  */
225 const struct mobj_ops mobj_virt_ops __weak __rodata_unpaged("mobj_virt_ops") = {
226 	.get_va = mobj_virt_get_va,
227 };
228 
229 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
230 {
231 	assert(mobj->ops == &mobj_virt_ops);
232 }
233 
234 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
235 
236 /*
237  * mobj_mm implementation
238  */
239 
240 struct mobj_mm {
241 	tee_mm_entry_t *mm;
242 	struct mobj *parent_mobj;
243 	struct mobj mobj;
244 };
245 
246 static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
247 
248 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
249 {
250 	tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
251 
252 	return (mm->offset << mm->pool->shift) + offs;
253 }
254 
255 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs)
256 {
257 	return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
258 			   mobj_mm_offs(mobj, offs));
259 }
260 
261 
262 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
263 				    size_t granule, paddr_t *pa)
264 {
265 	return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
266 			   mobj_mm_offs(mobj, offs), granule, pa);
267 }
268 DECLARE_KEEP_PAGER(mobj_mm_get_pa);
269 
270 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule)
271 {
272 	return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule);
273 }
274 
275 static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr)
276 {
277 	return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr);
278 }
279 
280 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
281 {
282 	return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
283 }
284 
285 static void mobj_mm_free(struct mobj *mobj)
286 {
287 	struct mobj_mm *m = to_mobj_mm(mobj);
288 
289 	tee_mm_free(m->mm);
290 	free(m);
291 }
292 
293 /*
294  * Note: this variable is weak just to ease breaking its dependency chain
295  * when added to the unpaged area.
296  */
297 const struct mobj_ops mobj_mm_ops __weak __rodata_unpaged("mobj_mm_ops") = {
298 	.get_va = mobj_mm_get_va,
299 	.get_pa = mobj_mm_get_pa,
300 	.get_phys_offs = mobj_mm_get_phys_offs,
301 	.get_cattr = mobj_mm_get_cattr,
302 	.matches = mobj_mm_matches,
303 	.free = mobj_mm_free,
304 };
305 
306 static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
307 {
308 	assert(mobj->ops == &mobj_mm_ops);
309 	return container_of(mobj, struct mobj_mm, mobj);
310 }
311 
312 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
313 			      tee_mm_pool_t *pool)
314 {
315 	struct mobj_mm *m = calloc(1, sizeof(*m));
316 
317 	if (!m)
318 		return NULL;
319 
320 	m->mm = tee_mm_alloc(pool, size);
321 	if (!m->mm) {
322 		free(m);
323 		return NULL;
324 	}
325 
326 	m->parent_mobj = mobj_parent;
327 	m->mobj.size = size;
328 	m->mobj.ops = &mobj_mm_ops;
329 	refcount_set(&m->mobj.refc, 1);
330 
331 	return &m->mobj;
332 }
333 
334 
335 /*
336  * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
337  * - it is physically contiguous.
338  * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
339  * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
340  *   generic CORE_MEM_NON_SEC.
341  */
342 
343 struct mobj_shm {
344 	struct mobj mobj;
345 	paddr_t pa;
346 	uint64_t cookie;
347 };
348 
349 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
350 
351 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset)
352 {
353 	struct mobj_shm *m = to_mobj_shm(mobj);
354 
355 	if (offset >= mobj->size)
356 		return NULL;
357 
358 	return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM,
359 			    mobj->size - offset);
360 }
361 
362 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
363 				   size_t granule, paddr_t *pa)
364 {
365 	struct mobj_shm *m = to_mobj_shm(mobj);
366 	paddr_t p;
367 
368 	if (!pa || offs >= mobj->size)
369 		return TEE_ERROR_GENERIC;
370 
371 	p = m->pa + offs;
372 
373 	if (granule) {
374 		if (granule != SMALL_PAGE_SIZE &&
375 		    granule != CORE_MMU_PGDIR_SIZE)
376 			return TEE_ERROR_GENERIC;
377 		p &= ~(granule - 1);
378 	}
379 
380 	*pa = p;
381 	return TEE_SUCCESS;
382 }
383 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
384 
385 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
386 {
387 	assert(IS_POWER_OF_TWO(granule));
388 	return to_mobj_shm(mobj)->pa & (granule - 1);
389 }
390 
391 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
392 {
393 	return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
394 }
395 
396 static void mobj_shm_free(struct mobj *mobj)
397 {
398 	struct mobj_shm *m = to_mobj_shm(mobj);
399 
400 	free(m);
401 }
402 
403 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
404 {
405 	return to_mobj_shm(mobj)->cookie;
406 }
407 
408 /*
409  * Note: this variable is weak just to ease breaking its dependency chain
410  * when added to the unpaged area.
411  */
412 const struct mobj_ops mobj_shm_ops __weak __rodata_unpaged("mobj_shm_ops") = {
413 	.get_va = mobj_shm_get_va,
414 	.get_pa = mobj_shm_get_pa,
415 	.get_phys_offs = mobj_shm_get_phys_offs,
416 	.matches = mobj_shm_matches,
417 	.free = mobj_shm_free,
418 	.get_cookie = mobj_shm_get_cookie,
419 };
420 
421 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
422 {
423 	assert(mobj->ops == &mobj_shm_ops);
424 	return container_of(mobj, struct mobj_shm, mobj);
425 }
426 
427 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
428 {
429 	struct mobj_shm *m;
430 
431 	if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
432 		return NULL;
433 
434 	m = calloc(1, sizeof(*m));
435 	if (!m)
436 		return NULL;
437 
438 	m->mobj.size = size;
439 	m->mobj.ops = &mobj_shm_ops;
440 	refcount_set(&m->mobj.refc, 1);
441 	m->pa = pa;
442 	m->cookie = cookie;
443 
444 	return &m->mobj;
445 }
446 
447 #ifdef CFG_PAGED_USER_TA
448 /*
449  * mobj_seccpy_shm implementation
450  */
451 
452 struct mobj_seccpy_shm {
453 	struct user_ta_ctx *utc;
454 	vaddr_t va;
455 	struct mobj mobj;
456 	struct fobj *fobj;
457 };
458 
459 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
460 
461 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
462 {
463 	assert(mobj_is_seccpy_shm(mobj));
464 	return container_of(mobj, struct mobj_seccpy_shm, mobj);
465 }
466 
467 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs)
468 {
469 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
470 
471 	if (&m->utc->ta_ctx.ts_ctx != thread_get_tsd()->ctx)
472 		return NULL;
473 
474 	if (offs >= mobj->size)
475 		return NULL;
476 	return (void *)(m->va + offs);
477 }
478 
479 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
480 				 enum buf_is_attr attr)
481 {
482 	assert(mobj_is_seccpy_shm(mobj));
483 
484 	return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
485 }
486 
487 static void mobj_seccpy_shm_free(struct mobj *mobj)
488 {
489 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
490 
491 	tee_pager_rem_um_region(&m->utc->uctx, m->va, mobj->size);
492 	vm_rem_rwmem(&m->utc->uctx, mobj, m->va);
493 	fobj_put(m->fobj);
494 	free(m);
495 }
496 
497 static struct fobj *mobj_seccpy_shm_get_fobj(struct mobj *mobj)
498 {
499 	return fobj_get(to_mobj_seccpy_shm(mobj)->fobj);
500 }
501 
502 /*
503  * Note: this variable is weak just to ease breaking its dependency chain
504  * when added to the unpaged area.
505  */
506 const struct mobj_ops mobj_seccpy_shm_ops
507 __weak __rodata_unpaged("mobj_seccpy_shm_ops") = {
508 	.get_va = mobj_seccpy_shm_get_va,
509 	.matches = mobj_seccpy_shm_matches,
510 	.free = mobj_seccpy_shm_free,
511 	.get_fobj = mobj_seccpy_shm_get_fobj,
512 };
513 
514 static bool mobj_is_seccpy_shm(struct mobj *mobj)
515 {
516 	return mobj && mobj->ops == &mobj_seccpy_shm_ops;
517 }
518 
519 struct mobj *mobj_seccpy_shm_alloc(size_t size)
520 {
521 	struct thread_specific_data *tsd = thread_get_tsd();
522 	struct mobj_seccpy_shm *m;
523 	struct user_ta_ctx *utc;
524 	vaddr_t va = 0;
525 
526 	if (!is_user_ta_ctx(tsd->ctx))
527 		return NULL;
528 	utc = to_user_ta_ctx(tsd->ctx);
529 
530 	m = calloc(1, sizeof(*m));
531 	if (!m)
532 		return NULL;
533 
534 	m->mobj.size = size;
535 	m->mobj.ops = &mobj_seccpy_shm_ops;
536 	refcount_set(&m->mobj.refc, 1);
537 
538 	if (vm_add_rwmem(&utc->uctx, &m->mobj, &va) != TEE_SUCCESS)
539 		goto bad;
540 
541 	m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) /
542 				      SMALL_PAGE_SIZE);
543 	if (tee_pager_add_um_region(&utc->uctx, va, m->fobj,
544 				    TEE_MATTR_PRW | TEE_MATTR_URW))
545 		goto bad;
546 
547 	m->va = va;
548 	m->utc = to_user_ta_ctx(tsd->ctx);
549 	return &m->mobj;
550 bad:
551 	if (va)
552 		vm_rem_rwmem(&utc->uctx, &m->mobj, va);
553 	fobj_put(m->fobj);
554 	free(m);
555 	return NULL;
556 }
557 
558 
559 #endif /*CFG_PAGED_USER_TA*/
560 
561 struct mobj_with_fobj {
562 	struct fobj *fobj;
563 	struct file *file;
564 	struct mobj mobj;
565 };
566 
567 const struct mobj_ops mobj_with_fobj_ops;
568 
569 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file)
570 {
571 	struct mobj_with_fobj *m = NULL;
572 
573 	if (!fobj)
574 		return NULL;
575 
576 	m = calloc(1, sizeof(*m));
577 	if (!m)
578 		return NULL;
579 
580 	m->mobj.ops = &mobj_with_fobj_ops;
581 	refcount_set(&m->mobj.refc, 1);
582 	m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
583 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
584 	m->fobj = fobj_get(fobj);
585 	m->file = file_get(file);
586 
587 	return &m->mobj;
588 }
589 
590 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
591 {
592 	assert(mobj && mobj->ops == &mobj_with_fobj_ops);
593 
594 	return container_of(mobj, struct mobj_with_fobj, mobj);
595 }
596 
597 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
598 				 enum buf_is_attr attr)
599 {
600 	assert(to_mobj_with_fobj(mobj));
601 
602 	/*
603 	 * All fobjs are supposed to be mapped secure so classify it as
604 	 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
605 	 * needed it can probably be carried in another way than to put the
606 	 * burden directly on fobj.
607 	 */
608 	return attr == CORE_MEM_SEC;
609 }
610 
611 static void mobj_with_fobj_free(struct mobj *mobj)
612 {
613 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
614 
615 	fobj_put(m->fobj);
616 	file_put(m->file);
617 	free(m);
618 }
619 
620 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
621 {
622 	return fobj_get(to_mobj_with_fobj(mobj)->fobj);
623 }
624 
625 static TEE_Result mobj_with_fobj_get_cattr(struct mobj *mobj __unused,
626 					   uint32_t *cattr)
627 {
628 	if (!cattr)
629 		return TEE_ERROR_GENERIC;
630 
631 	/* All fobjs are mapped as normal cached memory */
632 	*cattr = TEE_MATTR_CACHE_CACHED;
633 
634 	return TEE_SUCCESS;
635 }
636 
637 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
638 					size_t granule, paddr_t *pa)
639 {
640 	struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
641 	paddr_t p = 0;
642 
643 	if (!f->fobj->ops->get_pa) {
644 		assert(mobj_is_paged(mobj));
645 		return TEE_ERROR_NOT_SUPPORTED;
646 	}
647 
648 	p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
649 	    offs % SMALL_PAGE_SIZE;
650 
651 	if (granule) {
652 		if (granule != SMALL_PAGE_SIZE &&
653 		    granule != CORE_MMU_PGDIR_SIZE)
654 			return TEE_ERROR_GENERIC;
655 		p &= ~(granule - 1);
656 	}
657 
658 	*pa = p;
659 
660 	return TEE_SUCCESS;
661 }
662 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
663 
664 /*
665  * Note: this variable is weak just to ease breaking its dependency chain
666  * when added to the unpaged area.
667  */
668 const struct mobj_ops mobj_with_fobj_ops
669 __weak __rodata_unpaged("mobj_with_fobj_ops") = {
670 	.matches = mobj_with_fobj_matches,
671 	.free = mobj_with_fobj_free,
672 	.get_fobj = mobj_with_fobj_get_fobj,
673 	.get_cattr = mobj_with_fobj_get_cattr,
674 	.get_pa = mobj_with_fobj_get_pa,
675 };
676 
677 #ifdef CFG_PAGED_USER_TA
678 bool mobj_is_paged(struct mobj *mobj)
679 {
680 	if (mobj->ops == &mobj_seccpy_shm_ops)
681 		return true;
682 
683 	if (mobj->ops == &mobj_with_fobj_ops &&
684 	    !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
685 		return true;
686 
687 	return false;
688 }
689 #endif /*CFG_PAGED_USER_TA*/
690 
691 static TEE_Result mobj_init(void)
692 {
693 	mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo,
694 				       tee_mm_sec_ddr.hi - tee_mm_sec_ddr.lo,
695 				       OPTEE_SMC_SHM_CACHED, CORE_MEM_TA_RAM);
696 	if (!mobj_sec_ddr)
697 		panic("Failed to register secure ta ram");
698 
699 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
700 		mobj_tee_ram_rx = mobj_phys_init(0,
701 						 VCORE_UNPG_RX_SZ,
702 						 TEE_MATTR_CACHE_CACHED,
703 						 CORE_MEM_TEE_RAM,
704 						 MEM_AREA_TEE_RAM_RX);
705 		if (!mobj_tee_ram_rx)
706 			panic("Failed to register tee ram rx");
707 
708 		mobj_tee_ram_rw = mobj_phys_init(0,
709 						 VCORE_UNPG_RW_SZ,
710 						 TEE_MATTR_CACHE_CACHED,
711 						 CORE_MEM_TEE_RAM,
712 						 MEM_AREA_TEE_RAM_RW_DATA);
713 		if (!mobj_tee_ram_rw)
714 			panic("Failed to register tee ram rw");
715 	} else {
716 		mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START,
717 						 VCORE_UNPG_RW_PA +
718 						 VCORE_UNPG_RW_SZ -
719 						 TEE_RAM_START,
720 						 TEE_MATTR_CACHE_CACHED,
721 						 CORE_MEM_TEE_RAM,
722 						 MEM_AREA_TEE_RAM_RW_DATA);
723 		if (!mobj_tee_ram_rw)
724 			panic("Failed to register tee ram");
725 
726 		mobj_tee_ram_rx = mobj_tee_ram_rw;
727 	}
728 
729 	return TEE_SUCCESS;
730 }
731 
732 driver_init_late(mobj_init);
733