xref: /optee_os/core/mm/mobj.c (revision ff01e2452169e6c72d309067ce9f9ac84d88d1e1)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/linker.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <mm/core_mmu.h>
17 #include <mm/mobj.h>
18 #include <mm/tee_pager.h>
19 #include <mm/vm.h>
20 #include <optee_msg.h>
21 #include <sm/optee_smc.h>
22 #include <stdlib.h>
23 #include <tee_api_types.h>
24 #include <types_ext.h>
25 #include <util.h>
26 
27 struct mobj *mobj_sec_ddr;
28 struct mobj *mobj_tee_ram_rx;
29 struct mobj *mobj_tee_ram_rw;
30 
31 /*
32  * mobj_phys implementation
33  */
34 
35 struct mobj_phys {
36 	struct mobj mobj;
37 	enum buf_is_attr battr;
38 	uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */
39 	vaddr_t va;
40 	paddr_t pa;
41 };
42 
43 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
44 
45 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset)
46 {
47 	struct mobj_phys *moph = to_mobj_phys(mobj);
48 
49 	if (!moph->va || offset >= mobj->size)
50 		return NULL;
51 
52 	return (void *)(moph->va + offset);
53 }
54 
55 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
56 				   size_t granule, paddr_t *pa)
57 {
58 	struct mobj_phys *moph = to_mobj_phys(mobj);
59 	paddr_t p;
60 
61 	if (!pa)
62 		return TEE_ERROR_GENERIC;
63 
64 	p = moph->pa + offs;
65 
66 	if (granule) {
67 		if (granule != SMALL_PAGE_SIZE &&
68 		    granule != CORE_MMU_PGDIR_SIZE)
69 			return TEE_ERROR_GENERIC;
70 		p &= ~(granule - 1);
71 	}
72 
73 	*pa = p;
74 	return TEE_SUCCESS;
75 }
76 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
77 
78 static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr)
79 {
80 	struct mobj_phys *moph = to_mobj_phys(mobj);
81 
82 	if (!cattr)
83 		return TEE_ERROR_GENERIC;
84 
85 	*cattr = moph->cattr;
86 	return TEE_SUCCESS;
87 }
88 
89 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
90 {
91 	struct mobj_phys *moph = to_mobj_phys(mobj);
92 	enum buf_is_attr a;
93 
94 	a = moph->battr;
95 
96 	switch (attr) {
97 	case CORE_MEM_SEC:
98 		return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
99 		       a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
100 	case CORE_MEM_NON_SEC:
101 		return a == CORE_MEM_NSEC_SHM;
102 	case CORE_MEM_TEE_RAM:
103 	case CORE_MEM_TA_RAM:
104 	case CORE_MEM_NSEC_SHM:
105 	case CORE_MEM_SDP_MEM:
106 		return attr == a;
107 	default:
108 		return false;
109 	}
110 }
111 
112 static void mobj_phys_free(struct mobj *mobj)
113 {
114 	struct mobj_phys *moph = to_mobj_phys(mobj);
115 
116 	free(moph);
117 }
118 
119 /*
120  * Note: this variable is weak just to ease breaking its dependency chain
121  * when added to the unpaged area.
122  */
123 const struct mobj_ops mobj_phys_ops __weak __rodata_unpaged("mobj_phys_ops") = {
124 	.get_va = mobj_phys_get_va,
125 	.get_pa = mobj_phys_get_pa,
126 	.get_phys_offs = NULL, /* only offset 0 */
127 	.get_cattr = mobj_phys_get_cattr,
128 	.matches = mobj_phys_matches,
129 	.free = mobj_phys_free,
130 };
131 
132 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
133 {
134 	assert(mobj->ops == &mobj_phys_ops);
135 	return container_of(mobj, struct mobj_phys, mobj);
136 }
137 
138 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t cattr,
139 				   enum buf_is_attr battr,
140 				   enum teecore_memtypes area_type)
141 {
142 	void *va = NULL;
143 	struct mobj_phys *moph = NULL;
144 
145 	if ((pa & CORE_MMU_USER_PARAM_MASK) ||
146 	    (size & CORE_MMU_USER_PARAM_MASK)) {
147 		DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
148 		return NULL;
149 	}
150 
151 	/* Only SDP memory may not have a virtual address */
152 	va = phys_to_virt(pa, area_type);
153 	if (!va && battr != CORE_MEM_SDP_MEM)
154 		return NULL;
155 
156 	moph = calloc(1, sizeof(*moph));
157 	if (!moph)
158 		return NULL;
159 
160 	moph->battr = battr;
161 	moph->cattr = cattr;
162 	moph->mobj.size = size;
163 	moph->mobj.ops = &mobj_phys_ops;
164 	refcount_set(&moph->mobj.refc, 1);
165 	moph->pa = pa;
166 	moph->va = (vaddr_t)va;
167 
168 	return &moph->mobj;
169 }
170 
171 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
172 			     enum buf_is_attr battr)
173 {
174 	enum teecore_memtypes area_type;
175 
176 	switch (battr) {
177 	case CORE_MEM_TEE_RAM:
178 		area_type = MEM_AREA_TEE_RAM_RW_DATA;
179 		break;
180 	case CORE_MEM_TA_RAM:
181 		area_type = MEM_AREA_TA_RAM;
182 		break;
183 	case CORE_MEM_NSEC_SHM:
184 		area_type = MEM_AREA_NSEC_SHM;
185 		break;
186 	case CORE_MEM_SDP_MEM:
187 		area_type = MEM_AREA_SDP_MEM;
188 		break;
189 	default:
190 		DMSG("can't allocate with specified attribute");
191 		return NULL;
192 	}
193 
194 	return mobj_phys_init(pa, size, cattr, battr, area_type);
195 }
196 
197 /*
198  * mobj_virt implementation
199  */
200 
201 static void mobj_virt_assert_type(struct mobj *mobj);
202 
203 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset)
204 {
205 	mobj_virt_assert_type(mobj);
206 
207 	return (void *)(vaddr_t)offset;
208 }
209 
210 /*
211  * Note: this variable is weak just to ease breaking its dependency chain
212  * when added to the unpaged area.
213  */
214 const struct mobj_ops mobj_virt_ops __weak __rodata_unpaged("mobj_virt_ops") = {
215 	.get_va = mobj_virt_get_va,
216 };
217 
218 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
219 {
220 	assert(mobj->ops == &mobj_virt_ops);
221 }
222 
223 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
224 
225 /*
226  * mobj_mm implementation
227  */
228 
229 struct mobj_mm {
230 	tee_mm_entry_t *mm;
231 	struct mobj *parent_mobj;
232 	struct mobj mobj;
233 };
234 
235 static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
236 
237 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
238 {
239 	tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
240 
241 	return (mm->offset << mm->pool->shift) + offs;
242 }
243 
244 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs)
245 {
246 	return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
247 			   mobj_mm_offs(mobj, offs));
248 }
249 
250 
251 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
252 				    size_t granule, paddr_t *pa)
253 {
254 	return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
255 			   mobj_mm_offs(mobj, offs), granule, pa);
256 }
257 DECLARE_KEEP_PAGER(mobj_mm_get_pa);
258 
259 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule)
260 {
261 	return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule);
262 }
263 
264 static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr)
265 {
266 	return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr);
267 }
268 
269 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
270 {
271 	return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
272 }
273 
274 static void mobj_mm_free(struct mobj *mobj)
275 {
276 	struct mobj_mm *m = to_mobj_mm(mobj);
277 
278 	tee_mm_free(m->mm);
279 	free(m);
280 }
281 
282 /*
283  * Note: this variable is weak just to ease breaking its dependency chain
284  * when added to the unpaged area.
285  */
286 const struct mobj_ops mobj_mm_ops __weak __rodata_unpaged("mobj_mm_ops") = {
287 	.get_va = mobj_mm_get_va,
288 	.get_pa = mobj_mm_get_pa,
289 	.get_phys_offs = mobj_mm_get_phys_offs,
290 	.get_cattr = mobj_mm_get_cattr,
291 	.matches = mobj_mm_matches,
292 	.free = mobj_mm_free,
293 };
294 
295 static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
296 {
297 	assert(mobj->ops == &mobj_mm_ops);
298 	return container_of(mobj, struct mobj_mm, mobj);
299 }
300 
301 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
302 			      tee_mm_pool_t *pool)
303 {
304 	struct mobj_mm *m = calloc(1, sizeof(*m));
305 
306 	if (!m)
307 		return NULL;
308 
309 	m->mm = tee_mm_alloc(pool, size);
310 	if (!m->mm) {
311 		free(m);
312 		return NULL;
313 	}
314 
315 	m->parent_mobj = mobj_parent;
316 	m->mobj.size = size;
317 	m->mobj.ops = &mobj_mm_ops;
318 	refcount_set(&m->mobj.refc, 1);
319 
320 	return &m->mobj;
321 }
322 
323 
324 /*
325  * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
326  * - it is physically contiguous.
327  * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
328  * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
329  *   generic CORE_MEM_NON_SEC.
330  */
331 
332 struct mobj_shm {
333 	struct mobj mobj;
334 	paddr_t pa;
335 	uint64_t cookie;
336 };
337 
338 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
339 
340 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset)
341 {
342 	struct mobj_shm *m = to_mobj_shm(mobj);
343 
344 	if (offset >= mobj->size)
345 		return NULL;
346 
347 	return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM);
348 }
349 
350 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
351 				   size_t granule, paddr_t *pa)
352 {
353 	struct mobj_shm *m = to_mobj_shm(mobj);
354 	paddr_t p;
355 
356 	if (!pa || offs >= mobj->size)
357 		return TEE_ERROR_GENERIC;
358 
359 	p = m->pa + offs;
360 
361 	if (granule) {
362 		if (granule != SMALL_PAGE_SIZE &&
363 		    granule != CORE_MMU_PGDIR_SIZE)
364 			return TEE_ERROR_GENERIC;
365 		p &= ~(granule - 1);
366 	}
367 
368 	*pa = p;
369 	return TEE_SUCCESS;
370 }
371 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
372 
373 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
374 {
375 	assert(IS_POWER_OF_TWO(granule));
376 	return to_mobj_shm(mobj)->pa & (granule - 1);
377 }
378 
379 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
380 {
381 	return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
382 }
383 
384 static void mobj_shm_free(struct mobj *mobj)
385 {
386 	struct mobj_shm *m = to_mobj_shm(mobj);
387 
388 	free(m);
389 }
390 
391 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
392 {
393 	return to_mobj_shm(mobj)->cookie;
394 }
395 
396 /*
397  * Note: this variable is weak just to ease breaking its dependency chain
398  * when added to the unpaged area.
399  */
400 const struct mobj_ops mobj_shm_ops __weak __rodata_unpaged("mobj_shm_ops") = {
401 	.get_va = mobj_shm_get_va,
402 	.get_pa = mobj_shm_get_pa,
403 	.get_phys_offs = mobj_shm_get_phys_offs,
404 	.matches = mobj_shm_matches,
405 	.free = mobj_shm_free,
406 	.get_cookie = mobj_shm_get_cookie,
407 };
408 
409 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
410 {
411 	assert(mobj->ops == &mobj_shm_ops);
412 	return container_of(mobj, struct mobj_shm, mobj);
413 }
414 
415 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
416 {
417 	struct mobj_shm *m;
418 
419 	if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
420 		return NULL;
421 
422 	m = calloc(1, sizeof(*m));
423 	if (!m)
424 		return NULL;
425 
426 	m->mobj.size = size;
427 	m->mobj.ops = &mobj_shm_ops;
428 	refcount_set(&m->mobj.refc, 1);
429 	m->pa = pa;
430 	m->cookie = cookie;
431 
432 	return &m->mobj;
433 }
434 
435 #ifdef CFG_PAGED_USER_TA
436 /*
437  * mobj_seccpy_shm implementation
438  */
439 
440 struct mobj_seccpy_shm {
441 	struct user_ta_ctx *utc;
442 	vaddr_t va;
443 	struct mobj mobj;
444 	struct fobj *fobj;
445 };
446 
447 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
448 
449 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
450 {
451 	assert(mobj_is_seccpy_shm(mobj));
452 	return container_of(mobj, struct mobj_seccpy_shm, mobj);
453 }
454 
455 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs)
456 {
457 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
458 
459 	if (&m->utc->ta_ctx.ts_ctx != thread_get_tsd()->ctx)
460 		return NULL;
461 
462 	if (offs >= mobj->size)
463 		return NULL;
464 	return (void *)(m->va + offs);
465 }
466 
467 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
468 				 enum buf_is_attr attr)
469 {
470 	assert(mobj_is_seccpy_shm(mobj));
471 
472 	return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
473 }
474 
475 static void mobj_seccpy_shm_free(struct mobj *mobj)
476 {
477 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
478 
479 	tee_pager_rem_um_region(&m->utc->uctx, m->va, mobj->size);
480 	vm_rem_rwmem(&m->utc->uctx, mobj, m->va);
481 	fobj_put(m->fobj);
482 	free(m);
483 }
484 
485 static struct fobj *mobj_seccpy_shm_get_fobj(struct mobj *mobj)
486 {
487 	return fobj_get(to_mobj_seccpy_shm(mobj)->fobj);
488 }
489 
490 /*
491  * Note: this variable is weak just to ease breaking its dependency chain
492  * when added to the unpaged area.
493  */
494 const struct mobj_ops mobj_seccpy_shm_ops
495 __weak __rodata_unpaged("mobj_seccpy_shm_ops") = {
496 	.get_va = mobj_seccpy_shm_get_va,
497 	.matches = mobj_seccpy_shm_matches,
498 	.free = mobj_seccpy_shm_free,
499 	.get_fobj = mobj_seccpy_shm_get_fobj,
500 };
501 
502 static bool mobj_is_seccpy_shm(struct mobj *mobj)
503 {
504 	return mobj && mobj->ops == &mobj_seccpy_shm_ops;
505 }
506 
507 struct mobj *mobj_seccpy_shm_alloc(size_t size)
508 {
509 	struct thread_specific_data *tsd = thread_get_tsd();
510 	struct mobj_seccpy_shm *m;
511 	struct user_ta_ctx *utc;
512 	vaddr_t va = 0;
513 
514 	if (!is_user_ta_ctx(tsd->ctx))
515 		return NULL;
516 	utc = to_user_ta_ctx(tsd->ctx);
517 
518 	m = calloc(1, sizeof(*m));
519 	if (!m)
520 		return NULL;
521 
522 	m->mobj.size = size;
523 	m->mobj.ops = &mobj_seccpy_shm_ops;
524 	refcount_set(&m->mobj.refc, 1);
525 
526 	if (vm_add_rwmem(&utc->uctx, &m->mobj, &va) != TEE_SUCCESS)
527 		goto bad;
528 
529 	m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) /
530 				      SMALL_PAGE_SIZE);
531 	if (tee_pager_add_um_region(&utc->uctx, va, m->fobj,
532 				    TEE_MATTR_PRW | TEE_MATTR_URW))
533 		goto bad;
534 
535 	m->va = va;
536 	m->utc = to_user_ta_ctx(tsd->ctx);
537 	return &m->mobj;
538 bad:
539 	if (va)
540 		vm_rem_rwmem(&utc->uctx, &m->mobj, va);
541 	fobj_put(m->fobj);
542 	free(m);
543 	return NULL;
544 }
545 
546 
547 #endif /*CFG_PAGED_USER_TA*/
548 
549 struct mobj_with_fobj {
550 	struct fobj *fobj;
551 	struct file *file;
552 	struct mobj mobj;
553 };
554 
555 const struct mobj_ops mobj_with_fobj_ops;
556 
557 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file)
558 {
559 	struct mobj_with_fobj *m = NULL;
560 
561 	if (!fobj)
562 		return NULL;
563 
564 	m = calloc(1, sizeof(*m));
565 	if (!m)
566 		return NULL;
567 
568 	m->mobj.ops = &mobj_with_fobj_ops;
569 	refcount_set(&m->mobj.refc, 1);
570 	m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
571 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
572 	m->fobj = fobj_get(fobj);
573 	m->file = file_get(file);
574 
575 	return &m->mobj;
576 }
577 
578 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
579 {
580 	assert(mobj && mobj->ops == &mobj_with_fobj_ops);
581 
582 	return container_of(mobj, struct mobj_with_fobj, mobj);
583 }
584 
585 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
586 				 enum buf_is_attr attr)
587 {
588 	assert(to_mobj_with_fobj(mobj));
589 
590 	/*
591 	 * All fobjs are supposed to be mapped secure so classify it as
592 	 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
593 	 * needed it can probably be carried in another way than to put the
594 	 * burden directly on fobj.
595 	 */
596 	return attr == CORE_MEM_SEC;
597 }
598 
599 static void mobj_with_fobj_free(struct mobj *mobj)
600 {
601 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
602 
603 	fobj_put(m->fobj);
604 	file_put(m->file);
605 	free(m);
606 }
607 
608 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
609 {
610 	return fobj_get(to_mobj_with_fobj(mobj)->fobj);
611 }
612 
613 static TEE_Result mobj_with_fobj_get_cattr(struct mobj *mobj __unused,
614 					   uint32_t *cattr)
615 {
616 	if (!cattr)
617 		return TEE_ERROR_GENERIC;
618 
619 	/* All fobjs are mapped as normal cached memory */
620 	*cattr = TEE_MATTR_CACHE_CACHED;
621 
622 	return TEE_SUCCESS;
623 }
624 
625 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
626 					size_t granule, paddr_t *pa)
627 {
628 	struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
629 	paddr_t p = 0;
630 
631 	if (!f->fobj->ops->get_pa) {
632 		assert(mobj_is_paged(mobj));
633 		return TEE_ERROR_NOT_SUPPORTED;
634 	}
635 
636 	p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
637 	    offs % SMALL_PAGE_SIZE;
638 
639 	if (granule) {
640 		if (granule != SMALL_PAGE_SIZE &&
641 		    granule != CORE_MMU_PGDIR_SIZE)
642 			return TEE_ERROR_GENERIC;
643 		p &= ~(granule - 1);
644 	}
645 
646 	*pa = p;
647 
648 	return TEE_SUCCESS;
649 }
650 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
651 
652 /*
653  * Note: this variable is weak just to ease breaking its dependency chain
654  * when added to the unpaged area.
655  */
656 const struct mobj_ops mobj_with_fobj_ops
657 __weak __rodata_unpaged("mobj_with_fobj_ops") = {
658 	.matches = mobj_with_fobj_matches,
659 	.free = mobj_with_fobj_free,
660 	.get_fobj = mobj_with_fobj_get_fobj,
661 	.get_cattr = mobj_with_fobj_get_cattr,
662 	.get_pa = mobj_with_fobj_get_pa,
663 };
664 
665 #ifdef CFG_PAGED_USER_TA
666 bool mobj_is_paged(struct mobj *mobj)
667 {
668 	if (mobj->ops == &mobj_seccpy_shm_ops)
669 		return true;
670 
671 	if (mobj->ops == &mobj_with_fobj_ops &&
672 	    !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
673 		return true;
674 
675 	return false;
676 }
677 #endif /*CFG_PAGED_USER_TA*/
678 
679 static TEE_Result mobj_init(void)
680 {
681 	mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo,
682 				       tee_mm_sec_ddr.hi - tee_mm_sec_ddr.lo,
683 				       OPTEE_SMC_SHM_CACHED, CORE_MEM_TA_RAM);
684 	if (!mobj_sec_ddr)
685 		panic("Failed to register secure ta ram");
686 
687 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
688 		mobj_tee_ram_rx = mobj_phys_init(TEE_RAM_START,
689 						 VCORE_UNPG_RX_SZ,
690 						 TEE_MATTR_CACHE_CACHED,
691 						 CORE_MEM_TEE_RAM,
692 						 MEM_AREA_TEE_RAM_RX);
693 		if (!mobj_tee_ram_rx)
694 			panic("Failed to register tee ram rx");
695 
696 		mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START +
697 						 VCORE_UNPG_RX_SZ,
698 						 VCORE_UNPG_RW_SZ,
699 						 TEE_MATTR_CACHE_CACHED,
700 						 CORE_MEM_TEE_RAM,
701 						 MEM_AREA_TEE_RAM_RW_DATA);
702 		if (!mobj_tee_ram_rw)
703 			panic("Failed to register tee ram rw");
704 	} else {
705 		mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START,
706 						 VCORE_UNPG_RW_PA +
707 						 VCORE_UNPG_RW_SZ -
708 						 TEE_RAM_START,
709 						 TEE_MATTR_CACHE_CACHED,
710 						 CORE_MEM_TEE_RAM,
711 						 MEM_AREA_TEE_RAM_RW_DATA);
712 		if (!mobj_tee_ram_rw)
713 			panic("Failed to register tee ram");
714 
715 		mobj_tee_ram_rx = mobj_tee_ram_rw;
716 	}
717 
718 	return TEE_SUCCESS;
719 }
720 
721 driver_init_late(mobj_init);
722