xref: /optee_os/core/mm/mobj.c (revision 00361c1879106efbbe11e789e03b9efe0c43b0ca)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <initcall.h>
8 #include <keep.h>
9 #include <kernel/linker.h>
10 #include <kernel/mutex.h>
11 #include <kernel/panic.h>
12 #include <kernel/refcount.h>
13 #include <kernel/spinlock.h>
14 #include <kernel/tee_misc.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <mm/vm.h>
19 #include <optee_msg.h>
20 #include <sm/optee_smc.h>
21 #include <stdlib.h>
22 #include <tee_api_types.h>
23 #include <types_ext.h>
24 #include <util.h>
25 
26 struct mobj *mobj_sec_ddr;
27 struct mobj *mobj_tee_ram;
28 
29 /*
30  * mobj_phys implementation
31  */
32 
33 struct mobj_phys {
34 	struct mobj mobj;
35 	enum buf_is_attr battr;
36 	uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */
37 	vaddr_t va;
38 	paddr_t pa;
39 };
40 
41 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
42 
43 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset)
44 {
45 	struct mobj_phys *moph = to_mobj_phys(mobj);
46 
47 	if (!moph->va || offset >= mobj->size)
48 		return NULL;
49 
50 	return (void *)(moph->va + offset);
51 }
52 
53 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
54 				   size_t granule, paddr_t *pa)
55 {
56 	struct mobj_phys *moph = to_mobj_phys(mobj);
57 	paddr_t p;
58 
59 	if (!pa)
60 		return TEE_ERROR_GENERIC;
61 
62 	p = moph->pa + offs;
63 
64 	if (granule) {
65 		if (granule != SMALL_PAGE_SIZE &&
66 		    granule != CORE_MMU_PGDIR_SIZE)
67 			return TEE_ERROR_GENERIC;
68 		p &= ~(granule - 1);
69 	}
70 
71 	*pa = p;
72 	return TEE_SUCCESS;
73 }
74 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
75 
76 static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr)
77 {
78 	struct mobj_phys *moph = to_mobj_phys(mobj);
79 
80 	if (!cattr)
81 		return TEE_ERROR_GENERIC;
82 
83 	*cattr = moph->cattr;
84 	return TEE_SUCCESS;
85 }
86 
87 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
88 {
89 	struct mobj_phys *moph = to_mobj_phys(mobj);
90 	enum buf_is_attr a;
91 
92 	a = moph->battr;
93 
94 	switch (attr) {
95 	case CORE_MEM_SEC:
96 		return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
97 		       a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
98 	case CORE_MEM_NON_SEC:
99 		return a == CORE_MEM_NSEC_SHM;
100 	case CORE_MEM_TEE_RAM:
101 	case CORE_MEM_TA_RAM:
102 	case CORE_MEM_NSEC_SHM:
103 	case CORE_MEM_SDP_MEM:
104 		return attr == a;
105 	default:
106 		return false;
107 	}
108 }
109 
110 static void mobj_phys_free(struct mobj *mobj)
111 {
112 	struct mobj_phys *moph = to_mobj_phys(mobj);
113 
114 	free(moph);
115 }
116 
117 /*
118  * Note: this variable is weak just to ease breaking its dependency chain
119  * when added to the unpaged area.
120  */
121 const struct mobj_ops mobj_phys_ops __weak __rodata_unpaged("mobj_phys_ops") = {
122 	.get_va = mobj_phys_get_va,
123 	.get_pa = mobj_phys_get_pa,
124 	.get_phys_offs = NULL, /* only offset 0 */
125 	.get_cattr = mobj_phys_get_cattr,
126 	.matches = mobj_phys_matches,
127 	.free = mobj_phys_free,
128 };
129 
130 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
131 {
132 	assert(mobj->ops == &mobj_phys_ops);
133 	return container_of(mobj, struct mobj_phys, mobj);
134 }
135 
136 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
137 			     enum buf_is_attr battr)
138 {
139 	struct mobj_phys *moph;
140 	enum teecore_memtypes area_type;
141 	void *va;
142 
143 	if ((pa & CORE_MMU_USER_PARAM_MASK) ||
144 	    (size & CORE_MMU_USER_PARAM_MASK)) {
145 		DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
146 		return NULL;
147 	}
148 
149 	switch (battr) {
150 	case CORE_MEM_TEE_RAM:
151 		area_type = MEM_AREA_TEE_RAM_RW_DATA;
152 		break;
153 	case CORE_MEM_TA_RAM:
154 		area_type = MEM_AREA_TA_RAM;
155 		break;
156 	case CORE_MEM_NSEC_SHM:
157 		area_type = MEM_AREA_NSEC_SHM;
158 		break;
159 	case CORE_MEM_SDP_MEM:
160 		area_type = MEM_AREA_SDP_MEM;
161 		break;
162 	default:
163 		DMSG("can't allocate with specified attribute");
164 		return NULL;
165 	}
166 
167 	/* Only SDP memory may not have a virtual address */
168 	va = phys_to_virt(pa, area_type);
169 	if (!va && battr != CORE_MEM_SDP_MEM)
170 		return NULL;
171 
172 	moph = calloc(1, sizeof(*moph));
173 	if (!moph)
174 		return NULL;
175 
176 	moph->battr = battr;
177 	moph->cattr = cattr;
178 	moph->mobj.size = size;
179 	moph->mobj.ops = &mobj_phys_ops;
180 	refcount_set(&moph->mobj.refc, 1);
181 	moph->pa = pa;
182 	moph->va = (vaddr_t)va;
183 
184 	return &moph->mobj;
185 }
186 
187 /*
188  * mobj_virt implementation
189  */
190 
191 static void mobj_virt_assert_type(struct mobj *mobj);
192 
193 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset)
194 {
195 	mobj_virt_assert_type(mobj);
196 
197 	return (void *)(vaddr_t)offset;
198 }
199 
200 /*
201  * Note: this variable is weak just to ease breaking its dependency chain
202  * when added to the unpaged area.
203  */
204 const struct mobj_ops mobj_virt_ops __weak __rodata_unpaged("mobj_virt_ops") = {
205 	.get_va = mobj_virt_get_va,
206 };
207 
208 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
209 {
210 	assert(mobj->ops == &mobj_virt_ops);
211 }
212 
213 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
214 
215 /*
216  * mobj_mm implementation
217  */
218 
219 struct mobj_mm {
220 	tee_mm_entry_t *mm;
221 	struct mobj *parent_mobj;
222 	struct mobj mobj;
223 };
224 
225 static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
226 
227 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
228 {
229 	tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
230 
231 	return (mm->offset << mm->pool->shift) + offs;
232 }
233 
234 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs)
235 {
236 	return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
237 			   mobj_mm_offs(mobj, offs));
238 }
239 
240 
241 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
242 				    size_t granule, paddr_t *pa)
243 {
244 	return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
245 			   mobj_mm_offs(mobj, offs), granule, pa);
246 }
247 DECLARE_KEEP_PAGER(mobj_mm_get_pa);
248 
249 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule)
250 {
251 	return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule);
252 }
253 
254 static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr)
255 {
256 	return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr);
257 }
258 
259 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
260 {
261 	return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
262 }
263 
264 static void mobj_mm_free(struct mobj *mobj)
265 {
266 	struct mobj_mm *m = to_mobj_mm(mobj);
267 
268 	tee_mm_free(m->mm);
269 	free(m);
270 }
271 
272 /*
273  * Note: this variable is weak just to ease breaking its dependency chain
274  * when added to the unpaged area.
275  */
276 const struct mobj_ops mobj_mm_ops __weak __rodata_unpaged("mobj_mm_ops") = {
277 	.get_va = mobj_mm_get_va,
278 	.get_pa = mobj_mm_get_pa,
279 	.get_phys_offs = mobj_mm_get_phys_offs,
280 	.get_cattr = mobj_mm_get_cattr,
281 	.matches = mobj_mm_matches,
282 	.free = mobj_mm_free,
283 };
284 
285 static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
286 {
287 	assert(mobj->ops == &mobj_mm_ops);
288 	return container_of(mobj, struct mobj_mm, mobj);
289 }
290 
291 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
292 			      tee_mm_pool_t *pool)
293 {
294 	struct mobj_mm *m = calloc(1, sizeof(*m));
295 
296 	if (!m)
297 		return NULL;
298 
299 	m->mm = tee_mm_alloc(pool, size);
300 	if (!m->mm) {
301 		free(m);
302 		return NULL;
303 	}
304 
305 	m->parent_mobj = mobj_parent;
306 	m->mobj.size = size;
307 	m->mobj.ops = &mobj_mm_ops;
308 	refcount_set(&m->mobj.refc, 1);
309 
310 	return &m->mobj;
311 }
312 
313 
314 /*
315  * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
316  * - it is physically contiguous.
317  * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
318  * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
319  *   generic CORE_MEM_NON_SEC.
320  */
321 
322 struct mobj_shm {
323 	struct mobj mobj;
324 	paddr_t pa;
325 	uint64_t cookie;
326 };
327 
328 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
329 
330 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset)
331 {
332 	struct mobj_shm *m = to_mobj_shm(mobj);
333 
334 	if (offset >= mobj->size)
335 		return NULL;
336 
337 	return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM);
338 }
339 
340 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
341 				   size_t granule, paddr_t *pa)
342 {
343 	struct mobj_shm *m = to_mobj_shm(mobj);
344 	paddr_t p;
345 
346 	if (!pa || offs >= mobj->size)
347 		return TEE_ERROR_GENERIC;
348 
349 	p = m->pa + offs;
350 
351 	if (granule) {
352 		if (granule != SMALL_PAGE_SIZE &&
353 		    granule != CORE_MMU_PGDIR_SIZE)
354 			return TEE_ERROR_GENERIC;
355 		p &= ~(granule - 1);
356 	}
357 
358 	*pa = p;
359 	return TEE_SUCCESS;
360 }
361 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
362 
363 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
364 {
365 	assert(IS_POWER_OF_TWO(granule));
366 	return to_mobj_shm(mobj)->pa & (granule - 1);
367 }
368 
369 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
370 {
371 	return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
372 }
373 
374 static void mobj_shm_free(struct mobj *mobj)
375 {
376 	struct mobj_shm *m = to_mobj_shm(mobj);
377 
378 	free(m);
379 }
380 
381 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
382 {
383 	return to_mobj_shm(mobj)->cookie;
384 }
385 
386 /*
387  * Note: this variable is weak just to ease breaking its dependency chain
388  * when added to the unpaged area.
389  */
390 const struct mobj_ops mobj_shm_ops __weak __rodata_unpaged("mobj_shm_ops") = {
391 	.get_va = mobj_shm_get_va,
392 	.get_pa = mobj_shm_get_pa,
393 	.get_phys_offs = mobj_shm_get_phys_offs,
394 	.matches = mobj_shm_matches,
395 	.free = mobj_shm_free,
396 	.get_cookie = mobj_shm_get_cookie,
397 };
398 
399 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
400 {
401 	assert(mobj->ops == &mobj_shm_ops);
402 	return container_of(mobj, struct mobj_shm, mobj);
403 }
404 
405 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
406 {
407 	struct mobj_shm *m;
408 
409 	if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
410 		return NULL;
411 
412 	m = calloc(1, sizeof(*m));
413 	if (!m)
414 		return NULL;
415 
416 	m->mobj.size = size;
417 	m->mobj.ops = &mobj_shm_ops;
418 	refcount_set(&m->mobj.refc, 1);
419 	m->pa = pa;
420 	m->cookie = cookie;
421 
422 	return &m->mobj;
423 }
424 
425 #ifdef CFG_PAGED_USER_TA
426 /*
427  * mobj_seccpy_shm implementation
428  */
429 
430 struct mobj_seccpy_shm {
431 	struct user_ta_ctx *utc;
432 	vaddr_t va;
433 	struct mobj mobj;
434 	struct fobj *fobj;
435 };
436 
437 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
438 
439 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
440 {
441 	assert(mobj_is_seccpy_shm(mobj));
442 	return container_of(mobj, struct mobj_seccpy_shm, mobj);
443 }
444 
445 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs)
446 {
447 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
448 
449 	if (&m->utc->ta_ctx.ts_ctx != thread_get_tsd()->ctx)
450 		return NULL;
451 
452 	if (offs >= mobj->size)
453 		return NULL;
454 	return (void *)(m->va + offs);
455 }
456 
457 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
458 				 enum buf_is_attr attr)
459 {
460 	assert(mobj_is_seccpy_shm(mobj));
461 
462 	return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
463 }
464 
465 static void mobj_seccpy_shm_free(struct mobj *mobj)
466 {
467 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
468 
469 	tee_pager_rem_um_region(&m->utc->uctx, m->va, mobj->size);
470 	vm_rem_rwmem(&m->utc->uctx, mobj, m->va);
471 	fobj_put(m->fobj);
472 	free(m);
473 }
474 
475 static struct fobj *mobj_seccpy_shm_get_fobj(struct mobj *mobj)
476 {
477 	return fobj_get(to_mobj_seccpy_shm(mobj)->fobj);
478 }
479 
480 /*
481  * Note: this variable is weak just to ease breaking its dependency chain
482  * when added to the unpaged area.
483  */
484 const struct mobj_ops mobj_seccpy_shm_ops
485 __weak __rodata_unpaged("mobj_seccpy_shm_ops") = {
486 	.get_va = mobj_seccpy_shm_get_va,
487 	.matches = mobj_seccpy_shm_matches,
488 	.free = mobj_seccpy_shm_free,
489 	.get_fobj = mobj_seccpy_shm_get_fobj,
490 };
491 
492 static bool mobj_is_seccpy_shm(struct mobj *mobj)
493 {
494 	return mobj && mobj->ops == &mobj_seccpy_shm_ops;
495 }
496 
497 struct mobj *mobj_seccpy_shm_alloc(size_t size)
498 {
499 	struct thread_specific_data *tsd = thread_get_tsd();
500 	struct mobj_seccpy_shm *m;
501 	struct user_ta_ctx *utc;
502 	vaddr_t va = 0;
503 
504 	if (!is_user_ta_ctx(tsd->ctx))
505 		return NULL;
506 	utc = to_user_ta_ctx(tsd->ctx);
507 
508 	m = calloc(1, sizeof(*m));
509 	if (!m)
510 		return NULL;
511 
512 	m->mobj.size = size;
513 	m->mobj.ops = &mobj_seccpy_shm_ops;
514 	refcount_set(&m->mobj.refc, 1);
515 
516 	if (vm_add_rwmem(&utc->uctx, &m->mobj, &va) != TEE_SUCCESS)
517 		goto bad;
518 
519 	m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) /
520 				      SMALL_PAGE_SIZE);
521 	if (tee_pager_add_um_region(&utc->uctx, va, m->fobj,
522 				    TEE_MATTR_PRW | TEE_MATTR_URW))
523 		goto bad;
524 
525 	m->va = va;
526 	m->utc = to_user_ta_ctx(tsd->ctx);
527 	return &m->mobj;
528 bad:
529 	if (va)
530 		vm_rem_rwmem(&utc->uctx, &m->mobj, va);
531 	fobj_put(m->fobj);
532 	free(m);
533 	return NULL;
534 }
535 
536 
537 #endif /*CFG_PAGED_USER_TA*/
538 
539 struct mobj_with_fobj {
540 	struct fobj *fobj;
541 	struct file *file;
542 	struct mobj mobj;
543 };
544 
545 const struct mobj_ops mobj_with_fobj_ops;
546 
547 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file)
548 {
549 	struct mobj_with_fobj *m = NULL;
550 
551 	if (!fobj)
552 		return NULL;
553 
554 	m = calloc(1, sizeof(*m));
555 	if (!m)
556 		return NULL;
557 
558 	m->mobj.ops = &mobj_with_fobj_ops;
559 	refcount_set(&m->mobj.refc, 1);
560 	m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
561 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
562 	m->fobj = fobj_get(fobj);
563 	m->file = file_get(file);
564 
565 	return &m->mobj;
566 }
567 
568 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
569 {
570 	assert(mobj && mobj->ops == &mobj_with_fobj_ops);
571 
572 	return container_of(mobj, struct mobj_with_fobj, mobj);
573 }
574 
575 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
576 				 enum buf_is_attr attr)
577 {
578 	assert(to_mobj_with_fobj(mobj));
579 
580 	/*
581 	 * All fobjs are supposed to be mapped secure so classify it as
582 	 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
583 	 * needed it can probably be carried in another way than to put the
584 	 * burden directly on fobj.
585 	 */
586 	return attr == CORE_MEM_SEC;
587 }
588 
589 static void mobj_with_fobj_free(struct mobj *mobj)
590 {
591 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
592 
593 	fobj_put(m->fobj);
594 	file_put(m->file);
595 	free(m);
596 }
597 
598 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
599 {
600 	return fobj_get(to_mobj_with_fobj(mobj)->fobj);
601 }
602 
603 static TEE_Result mobj_with_fobj_get_cattr(struct mobj *mobj __unused,
604 					   uint32_t *cattr)
605 {
606 	if (!cattr)
607 		return TEE_ERROR_GENERIC;
608 
609 	/* All fobjs are mapped as normal cached memory */
610 	*cattr = TEE_MATTR_CACHE_CACHED;
611 
612 	return TEE_SUCCESS;
613 }
614 
615 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
616 					size_t granule, paddr_t *pa)
617 {
618 	struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
619 	paddr_t p = 0;
620 
621 	if (!f->fobj->ops->get_pa) {
622 		assert(mobj_is_paged(mobj));
623 		return TEE_ERROR_NOT_SUPPORTED;
624 	}
625 
626 	p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
627 	    offs % SMALL_PAGE_SIZE;
628 
629 	if (granule) {
630 		if (granule != SMALL_PAGE_SIZE &&
631 		    granule != CORE_MMU_PGDIR_SIZE)
632 			return TEE_ERROR_GENERIC;
633 		p &= ~(granule - 1);
634 	}
635 
636 	*pa = p;
637 
638 	return TEE_SUCCESS;
639 }
640 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
641 
642 /*
643  * Note: this variable is weak just to ease breaking its dependency chain
644  * when added to the unpaged area.
645  */
646 const struct mobj_ops mobj_with_fobj_ops
647 __weak __rodata_unpaged("mobj_with_fobj_ops") = {
648 	.matches = mobj_with_fobj_matches,
649 	.free = mobj_with_fobj_free,
650 	.get_fobj = mobj_with_fobj_get_fobj,
651 	.get_cattr = mobj_with_fobj_get_cattr,
652 	.get_pa = mobj_with_fobj_get_pa,
653 };
654 
655 #ifdef CFG_PAGED_USER_TA
656 bool mobj_is_paged(struct mobj *mobj)
657 {
658 	if (mobj->ops == &mobj_seccpy_shm_ops)
659 		return true;
660 
661 	if (mobj->ops == &mobj_with_fobj_ops &&
662 	    !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
663 		return true;
664 
665 	return false;
666 }
667 #endif /*CFG_PAGED_USER_TA*/
668 
669 static TEE_Result mobj_init(void)
670 {
671 	mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo,
672 				       tee_mm_sec_ddr.hi - tee_mm_sec_ddr.lo,
673 				       OPTEE_SMC_SHM_CACHED, CORE_MEM_TA_RAM);
674 	if (!mobj_sec_ddr)
675 		panic("Failed to register secure ta ram");
676 
677 	mobj_tee_ram = mobj_phys_alloc(TEE_RAM_START,
678 				       VCORE_UNPG_RW_PA + VCORE_UNPG_RW_SZ -
679 						TEE_RAM_START,
680 				       TEE_MATTR_CACHE_CACHED,
681 				       CORE_MEM_TEE_RAM);
682 	if (!mobj_tee_ram)
683 		panic("Failed to register tee ram");
684 
685 	return TEE_SUCCESS;
686 }
687 
688 driver_init_late(mobj_init);
689