xref: /optee_os/core/mm/mobj.c (revision 8afe7a7c52204da38fc082c55f908fb819079f30)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/linker.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <mm/core_mmu.h>
17 #include <mm/mobj.h>
18 #include <mm/tee_pager.h>
19 #include <mm/vm.h>
20 #include <optee_msg.h>
21 #include <stdlib.h>
22 #include <tee_api_types.h>
23 #include <types_ext.h>
24 #include <util.h>
25 
26 struct mobj *mobj_sec_ddr;
27 struct mobj *mobj_tee_ram_rx;
28 struct mobj *mobj_tee_ram_rw;
29 
30 /*
31  * mobj_phys implementation
32  */
33 
34 struct mobj_phys {
35 	struct mobj mobj;
36 	enum buf_is_attr battr;
37 	/* Defined by TEE_MATTR_MEM_TYPE_* in tee_mmu_types.h */
38 	uint32_t mem_type;
39 	vaddr_t va;
40 	paddr_t pa;
41 };
42 
43 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
44 
45 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len)
46 {
47 	struct mobj_phys *moph = to_mobj_phys(mobj);
48 
49 	if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len))
50 		return NULL;
51 
52 	return (void *)(moph->va + offset);
53 }
54 
55 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
56 				   size_t granule, paddr_t *pa)
57 {
58 	struct mobj_phys *moph = to_mobj_phys(mobj);
59 	paddr_t p;
60 
61 	if (!pa)
62 		return TEE_ERROR_GENERIC;
63 
64 	p = moph->pa + offs;
65 
66 	if (granule) {
67 		if (granule != SMALL_PAGE_SIZE &&
68 		    granule != CORE_MMU_PGDIR_SIZE)
69 			return TEE_ERROR_GENERIC;
70 		p &= ~(granule - 1);
71 	}
72 
73 	*pa = p;
74 	return TEE_SUCCESS;
75 }
76 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
77 
78 static TEE_Result mobj_phys_get_mem_type(struct mobj *mobj, uint32_t *mem_type)
79 {
80 	struct mobj_phys *moph = to_mobj_phys(mobj);
81 
82 	if (!mem_type)
83 		return TEE_ERROR_GENERIC;
84 
85 	*mem_type = moph->mem_type;
86 	return TEE_SUCCESS;
87 }
88 
89 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
90 {
91 	struct mobj_phys *moph = to_mobj_phys(mobj);
92 	enum buf_is_attr a;
93 
94 	a = moph->battr;
95 
96 	switch (attr) {
97 	case CORE_MEM_SEC:
98 		return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
99 		       a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
100 	case CORE_MEM_NON_SEC:
101 		return a == CORE_MEM_NSEC_SHM;
102 	case CORE_MEM_TEE_RAM:
103 	case CORE_MEM_TA_RAM:
104 	case CORE_MEM_NSEC_SHM:
105 	case CORE_MEM_SDP_MEM:
106 		return attr == a;
107 	default:
108 		return false;
109 	}
110 }
111 
112 static void mobj_phys_free(struct mobj *mobj)
113 {
114 	struct mobj_phys *moph = to_mobj_phys(mobj);
115 
116 	free(moph);
117 }
118 
119 /*
120  * Note: this variable is weak just to ease breaking its dependency chain
121  * when added to the unpaged area.
122  */
123 const struct mobj_ops mobj_phys_ops
124 __weak __relrodata_unpaged("mobj_phys_ops") = {
125 	.get_va = mobj_phys_get_va,
126 	.get_pa = mobj_phys_get_pa,
127 	.get_phys_offs = NULL, /* only offset 0 */
128 	.get_mem_type = mobj_phys_get_mem_type,
129 	.matches = mobj_phys_matches,
130 	.free = mobj_phys_free,
131 };
132 
133 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
134 {
135 	assert(mobj->ops == &mobj_phys_ops);
136 	return container_of(mobj, struct mobj_phys, mobj);
137 }
138 
139 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t mem_type,
140 				   enum buf_is_attr battr,
141 				   enum teecore_memtypes area_type)
142 {
143 	void *va = NULL;
144 	struct mobj_phys *moph = NULL;
145 	struct tee_mmap_region *map = NULL;
146 
147 	if ((pa & CORE_MMU_USER_PARAM_MASK) ||
148 	    (size & CORE_MMU_USER_PARAM_MASK)) {
149 		DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
150 		return NULL;
151 	}
152 
153 	if (pa) {
154 		va = phys_to_virt(pa, area_type, size);
155 	} else {
156 		map = core_mmu_find_mapping_exclusive(area_type, size);
157 		if (!map)
158 			return NULL;
159 
160 		pa = map->pa;
161 		va = (void *)map->va;
162 	}
163 
164 	/* Only SDP memory may not have a virtual address */
165 	if (!va && battr != CORE_MEM_SDP_MEM)
166 		return NULL;
167 
168 	moph = calloc(1, sizeof(*moph));
169 	if (!moph)
170 		return NULL;
171 
172 	moph->battr = battr;
173 	moph->mem_type = mem_type;
174 	moph->mobj.size = size;
175 	moph->mobj.ops = &mobj_phys_ops;
176 	refcount_set(&moph->mobj.refc, 1);
177 	moph->pa = pa;
178 	moph->va = (vaddr_t)va;
179 
180 	return &moph->mobj;
181 }
182 
183 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t mem_type,
184 			     enum buf_is_attr battr)
185 {
186 	enum teecore_memtypes area_type;
187 
188 	switch (battr) {
189 	case CORE_MEM_TEE_RAM:
190 		area_type = MEM_AREA_TEE_RAM_RW_DATA;
191 		break;
192 	case CORE_MEM_TA_RAM:
193 		area_type = MEM_AREA_TA_RAM;
194 		break;
195 	case CORE_MEM_NSEC_SHM:
196 		area_type = MEM_AREA_NSEC_SHM;
197 		break;
198 	case CORE_MEM_SDP_MEM:
199 		area_type = MEM_AREA_SDP_MEM;
200 		break;
201 	default:
202 		DMSG("can't allocate with specified attribute");
203 		return NULL;
204 	}
205 
206 	return mobj_phys_init(pa, size, mem_type, battr, area_type);
207 }
208 
209 /*
210  * mobj_virt implementation
211  */
212 
213 static void mobj_virt_assert_type(struct mobj *mobj);
214 
215 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset,
216 			      size_t len __maybe_unused)
217 {
218 	mobj_virt_assert_type(mobj);
219 	assert(mobj_check_offset_and_len(mobj, offset, len));
220 
221 	return (void *)(vaddr_t)offset;
222 }
223 
224 /*
225  * Note: this variable is weak just to ease breaking its dependency chain
226  * when added to the unpaged area.
227  */
228 const struct mobj_ops mobj_virt_ops
229 __weak __relrodata_unpaged("mobj_virt_ops") = {
230 	.get_va = mobj_virt_get_va,
231 };
232 
233 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
234 {
235 	assert(mobj->ops == &mobj_virt_ops);
236 }
237 
238 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
239 
240 /*
241  * mobj_mm implementation
242  */
243 
244 struct mobj_mm {
245 	tee_mm_entry_t *mm;
246 	struct mobj *parent_mobj;
247 	struct mobj mobj;
248 };
249 
250 static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
251 
252 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
253 {
254 	tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
255 
256 	return (mm->offset << mm->pool->shift) + offs;
257 }
258 
259 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs, size_t len)
260 {
261 	return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
262 			   mobj_mm_offs(mobj, offs), len);
263 }
264 
265 
266 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
267 				    size_t granule, paddr_t *pa)
268 {
269 	return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
270 			   mobj_mm_offs(mobj, offs), granule, pa);
271 }
272 DECLARE_KEEP_PAGER(mobj_mm_get_pa);
273 
274 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule)
275 {
276 	return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule);
277 }
278 
279 static TEE_Result mobj_mm_get_mem_type(struct mobj *mobj, uint32_t *mem_type)
280 {
281 	return mobj_get_mem_type(to_mobj_mm(mobj)->parent_mobj, mem_type);
282 }
283 
284 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
285 {
286 	return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
287 }
288 
289 static void mobj_mm_free(struct mobj *mobj)
290 {
291 	struct mobj_mm *m = to_mobj_mm(mobj);
292 
293 	tee_mm_free(m->mm);
294 	free(m);
295 }
296 
297 /*
298  * Note: this variable is weak just to ease breaking its dependency chain
299  * when added to the unpaged area.
300  */
301 const struct mobj_ops mobj_mm_ops __weak __relrodata_unpaged("mobj_mm_ops") = {
302 	.get_va = mobj_mm_get_va,
303 	.get_pa = mobj_mm_get_pa,
304 	.get_phys_offs = mobj_mm_get_phys_offs,
305 	.get_mem_type = mobj_mm_get_mem_type,
306 	.matches = mobj_mm_matches,
307 	.free = mobj_mm_free,
308 };
309 
310 static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
311 {
312 	assert(mobj->ops == &mobj_mm_ops);
313 	return container_of(mobj, struct mobj_mm, mobj);
314 }
315 
316 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
317 			      tee_mm_pool_t *pool)
318 {
319 	struct mobj_mm *m = calloc(1, sizeof(*m));
320 
321 	if (!m)
322 		return NULL;
323 
324 	m->mm = tee_mm_alloc(pool, size);
325 	if (!m->mm) {
326 		free(m);
327 		return NULL;
328 	}
329 
330 	m->parent_mobj = mobj_parent;
331 	m->mobj.size = size;
332 	m->mobj.ops = &mobj_mm_ops;
333 	refcount_set(&m->mobj.refc, 1);
334 
335 	return &m->mobj;
336 }
337 
338 
339 /*
340  * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
341  * - it is physically contiguous.
342  * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
343  * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
344  *   generic CORE_MEM_NON_SEC.
345  */
346 
347 struct mobj_shm {
348 	struct mobj mobj;
349 	paddr_t pa;
350 	uint64_t cookie;
351 };
352 
353 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
354 
355 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
356 {
357 	struct mobj_shm *m = to_mobj_shm(mobj);
358 
359 	if (!mobj_check_offset_and_len(mobj, offset, len))
360 		return NULL;
361 
362 	return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM,
363 			    mobj->size - offset);
364 }
365 
366 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
367 				   size_t granule, paddr_t *pa)
368 {
369 	struct mobj_shm *m = to_mobj_shm(mobj);
370 	paddr_t p;
371 
372 	if (!pa || offs >= mobj->size)
373 		return TEE_ERROR_GENERIC;
374 
375 	p = m->pa + offs;
376 
377 	if (granule) {
378 		if (granule != SMALL_PAGE_SIZE &&
379 		    granule != CORE_MMU_PGDIR_SIZE)
380 			return TEE_ERROR_GENERIC;
381 		p &= ~(granule - 1);
382 	}
383 
384 	*pa = p;
385 	return TEE_SUCCESS;
386 }
387 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
388 
389 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
390 {
391 	assert(IS_POWER_OF_TWO(granule));
392 	return to_mobj_shm(mobj)->pa & (granule - 1);
393 }
394 
395 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
396 {
397 	return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
398 }
399 
400 static TEE_Result mobj_shm_get_mem_type(struct mobj *mobj __unused,
401 					uint32_t *mem_type)
402 {
403 	if (!mem_type)
404 		return TEE_ERROR_GENERIC;
405 
406 	*mem_type = TEE_MATTR_MEM_TYPE_CACHED;
407 
408 	return TEE_SUCCESS;
409 }
410 
411 static void mobj_shm_free(struct mobj *mobj)
412 {
413 	struct mobj_shm *m = to_mobj_shm(mobj);
414 
415 	free(m);
416 }
417 
418 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
419 {
420 	return to_mobj_shm(mobj)->cookie;
421 }
422 
423 /*
424  * Note: this variable is weak just to ease breaking its dependency chain
425  * when added to the unpaged area.
426  */
427 const struct mobj_ops mobj_shm_ops
428 __weak __relrodata_unpaged("mobj_shm_ops") = {
429 	.get_va = mobj_shm_get_va,
430 	.get_pa = mobj_shm_get_pa,
431 	.get_phys_offs = mobj_shm_get_phys_offs,
432 	.get_mem_type = mobj_shm_get_mem_type,
433 	.matches = mobj_shm_matches,
434 	.free = mobj_shm_free,
435 	.get_cookie = mobj_shm_get_cookie,
436 };
437 
438 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
439 {
440 	assert(mobj->ops == &mobj_shm_ops);
441 	return container_of(mobj, struct mobj_shm, mobj);
442 }
443 
444 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
445 {
446 	struct mobj_shm *m;
447 
448 	if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
449 		return NULL;
450 
451 	m = calloc(1, sizeof(*m));
452 	if (!m)
453 		return NULL;
454 
455 	m->mobj.size = size;
456 	m->mobj.ops = &mobj_shm_ops;
457 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
458 	refcount_set(&m->mobj.refc, 1);
459 	m->pa = pa;
460 	m->cookie = cookie;
461 
462 	return &m->mobj;
463 }
464 
465 #ifdef CFG_PAGED_USER_TA
466 /*
467  * mobj_seccpy_shm implementation
468  */
469 
470 struct mobj_seccpy_shm {
471 	struct user_ta_ctx *utc;
472 	vaddr_t va;
473 	struct mobj mobj;
474 	struct fobj *fobj;
475 };
476 
477 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
478 
479 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
480 {
481 	assert(mobj_is_seccpy_shm(mobj));
482 	return container_of(mobj, struct mobj_seccpy_shm, mobj);
483 }
484 
485 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs, size_t len)
486 {
487 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
488 
489 	if (&m->utc->ta_ctx.ts_ctx != thread_get_tsd()->ctx)
490 		return NULL;
491 
492 	if (!mobj_check_offset_and_len(mobj, offs, len))
493 		return NULL;
494 	return (void *)(m->va + offs);
495 }
496 
497 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
498 				 enum buf_is_attr attr)
499 {
500 	assert(mobj_is_seccpy_shm(mobj));
501 
502 	return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
503 }
504 
505 static void mobj_seccpy_shm_free(struct mobj *mobj)
506 {
507 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
508 
509 	tee_pager_rem_um_region(&m->utc->uctx, m->va, mobj->size);
510 	vm_rem_rwmem(&m->utc->uctx, mobj, m->va);
511 	fobj_put(m->fobj);
512 	free(m);
513 }
514 
515 static struct fobj *mobj_seccpy_shm_get_fobj(struct mobj *mobj)
516 {
517 	return fobj_get(to_mobj_seccpy_shm(mobj)->fobj);
518 }
519 
520 /*
521  * Note: this variable is weak just to ease breaking its dependency chain
522  * when added to the unpaged area.
523  */
524 const struct mobj_ops mobj_seccpy_shm_ops
525 __weak __relrodata_unpaged("mobj_seccpy_shm_ops") = {
526 	.get_va = mobj_seccpy_shm_get_va,
527 	.matches = mobj_seccpy_shm_matches,
528 	.free = mobj_seccpy_shm_free,
529 	.get_fobj = mobj_seccpy_shm_get_fobj,
530 };
531 
532 static bool mobj_is_seccpy_shm(struct mobj *mobj)
533 {
534 	return mobj && mobj->ops == &mobj_seccpy_shm_ops;
535 }
536 
537 struct mobj *mobj_seccpy_shm_alloc(size_t size)
538 {
539 	struct thread_specific_data *tsd = thread_get_tsd();
540 	struct mobj_seccpy_shm *m;
541 	struct user_ta_ctx *utc;
542 	vaddr_t va = 0;
543 
544 	if (!is_user_ta_ctx(tsd->ctx))
545 		return NULL;
546 	utc = to_user_ta_ctx(tsd->ctx);
547 
548 	m = calloc(1, sizeof(*m));
549 	if (!m)
550 		return NULL;
551 
552 	m->mobj.size = size;
553 	m->mobj.ops = &mobj_seccpy_shm_ops;
554 	refcount_set(&m->mobj.refc, 1);
555 
556 	if (vm_add_rwmem(&utc->uctx, &m->mobj, &va) != TEE_SUCCESS)
557 		goto bad;
558 
559 	m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) /
560 				      SMALL_PAGE_SIZE);
561 	if (tee_pager_add_um_region(&utc->uctx, va, m->fobj,
562 				    TEE_MATTR_PRW | TEE_MATTR_URW))
563 		goto bad;
564 
565 	m->va = va;
566 	m->utc = to_user_ta_ctx(tsd->ctx);
567 	return &m->mobj;
568 bad:
569 	if (va)
570 		vm_rem_rwmem(&utc->uctx, &m->mobj, va);
571 	fobj_put(m->fobj);
572 	free(m);
573 	return NULL;
574 }
575 
576 
577 #endif /*CFG_PAGED_USER_TA*/
578 
579 struct mobj_with_fobj {
580 	struct fobj *fobj;
581 	struct file *file;
582 	struct mobj mobj;
583 };
584 
585 const struct mobj_ops mobj_with_fobj_ops;
586 
587 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file)
588 {
589 	struct mobj_with_fobj *m = NULL;
590 
591 	if (!fobj)
592 		return NULL;
593 
594 	m = calloc(1, sizeof(*m));
595 	if (!m)
596 		return NULL;
597 
598 	m->mobj.ops = &mobj_with_fobj_ops;
599 	refcount_set(&m->mobj.refc, 1);
600 	m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
601 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
602 	m->fobj = fobj_get(fobj);
603 	m->file = file_get(file);
604 
605 	return &m->mobj;
606 }
607 
608 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
609 {
610 	assert(mobj && mobj->ops == &mobj_with_fobj_ops);
611 
612 	return container_of(mobj, struct mobj_with_fobj, mobj);
613 }
614 
615 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
616 				 enum buf_is_attr attr)
617 {
618 	assert(to_mobj_with_fobj(mobj));
619 
620 	/*
621 	 * All fobjs are supposed to be mapped secure so classify it as
622 	 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
623 	 * needed it can probably be carried in another way than to put the
624 	 * burden directly on fobj.
625 	 */
626 	return attr == CORE_MEM_SEC;
627 }
628 
629 static void mobj_with_fobj_free(struct mobj *mobj)
630 {
631 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
632 
633 	fobj_put(m->fobj);
634 	file_put(m->file);
635 	free(m);
636 }
637 
638 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
639 {
640 	return fobj_get(to_mobj_with_fobj(mobj)->fobj);
641 }
642 
643 static TEE_Result mobj_with_fobj_get_mem_type(struct mobj *mobj __unused,
644 					      uint32_t *mem_type)
645 {
646 	if (!mem_type)
647 		return TEE_ERROR_GENERIC;
648 
649 	/* All fobjs are mapped as normal cached memory */
650 	*mem_type = TEE_MATTR_MEM_TYPE_CACHED;
651 
652 	return TEE_SUCCESS;
653 }
654 
655 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
656 					size_t granule, paddr_t *pa)
657 {
658 	struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
659 	paddr_t p = 0;
660 
661 	if (!f->fobj->ops->get_pa) {
662 		assert(mobj_is_paged(mobj));
663 		return TEE_ERROR_NOT_SUPPORTED;
664 	}
665 
666 	p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
667 	    offs % SMALL_PAGE_SIZE;
668 
669 	if (granule) {
670 		if (granule != SMALL_PAGE_SIZE &&
671 		    granule != CORE_MMU_PGDIR_SIZE)
672 			return TEE_ERROR_GENERIC;
673 		p &= ~(granule - 1);
674 	}
675 
676 	*pa = p;
677 
678 	return TEE_SUCCESS;
679 }
680 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
681 
682 /*
683  * Note: this variable is weak just to ease breaking its dependency chain
684  * when added to the unpaged area.
685  */
686 const struct mobj_ops mobj_with_fobj_ops
687 __weak __relrodata_unpaged("mobj_with_fobj_ops") = {
688 	.matches = mobj_with_fobj_matches,
689 	.free = mobj_with_fobj_free,
690 	.get_fobj = mobj_with_fobj_get_fobj,
691 	.get_mem_type = mobj_with_fobj_get_mem_type,
692 	.get_pa = mobj_with_fobj_get_pa,
693 };
694 
695 #ifdef CFG_PAGED_USER_TA
696 bool mobj_is_paged(struct mobj *mobj)
697 {
698 	if (mobj->ops == &mobj_seccpy_shm_ops)
699 		return true;
700 
701 	if (mobj->ops == &mobj_with_fobj_ops &&
702 	    !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
703 		return true;
704 
705 	return false;
706 }
707 #endif /*CFG_PAGED_USER_TA*/
708 
709 static TEE_Result mobj_init(void)
710 {
711 	mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo,
712 				       tee_mm_sec_ddr.size,
713 				       TEE_MATTR_MEM_TYPE_CACHED,
714 				       CORE_MEM_TA_RAM);
715 	if (!mobj_sec_ddr)
716 		panic("Failed to register secure ta ram");
717 
718 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
719 		mobj_tee_ram_rx = mobj_phys_init(0,
720 						 VCORE_UNPG_RX_SZ,
721 						 TEE_MATTR_MEM_TYPE_CACHED,
722 						 CORE_MEM_TEE_RAM,
723 						 MEM_AREA_TEE_RAM_RX);
724 		if (!mobj_tee_ram_rx)
725 			panic("Failed to register tee ram rx");
726 
727 		mobj_tee_ram_rw = mobj_phys_init(0,
728 						 VCORE_UNPG_RW_SZ,
729 						 TEE_MATTR_MEM_TYPE_CACHED,
730 						 CORE_MEM_TEE_RAM,
731 						 MEM_AREA_TEE_RAM_RW_DATA);
732 		if (!mobj_tee_ram_rw)
733 			panic("Failed to register tee ram rw");
734 	} else {
735 		mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START,
736 						 VCORE_UNPG_RW_PA +
737 						 VCORE_UNPG_RW_SZ -
738 						 TEE_RAM_START,
739 						 TEE_MATTR_MEM_TYPE_CACHED,
740 						 CORE_MEM_TEE_RAM,
741 						 MEM_AREA_TEE_RAM_RW_DATA);
742 		if (!mobj_tee_ram_rw)
743 			panic("Failed to register tee ram");
744 
745 		mobj_tee_ram_rx = mobj_tee_ram_rw;
746 	}
747 
748 	return TEE_SUCCESS;
749 }
750 
751 driver_init_late(mobj_init);
752