xref: /optee_os/core/mm/mobj.c (revision c04a96a45ffe0e665a4d86e542ec921fae932aa8)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <initcall.h>
8 #include <keep.h>
9 #include <kernel/linker.h>
10 #include <kernel/mutex.h>
11 #include <kernel/panic.h>
12 #include <kernel/refcount.h>
13 #include <kernel/spinlock.h>
14 #include <kernel/tee_misc.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <mm/vm.h>
19 #include <optee_msg.h>
20 #include <sm/optee_smc.h>
21 #include <stdlib.h>
22 #include <tee_api_types.h>
23 #include <types_ext.h>
24 #include <util.h>
25 
26 struct mobj *mobj_sec_ddr;
27 struct mobj *mobj_tee_ram;
28 
29 /*
30  * mobj_phys implementation
31  */
32 
33 struct mobj_phys {
34 	struct mobj mobj;
35 	enum buf_is_attr battr;
36 	uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */
37 	vaddr_t va;
38 	paddr_t pa;
39 };
40 
41 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
42 
43 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset)
44 {
45 	struct mobj_phys *moph = to_mobj_phys(mobj);
46 
47 	if (!moph->va || offset >= mobj->size)
48 		return NULL;
49 
50 	return (void *)(moph->va + offset);
51 }
52 
53 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
54 				   size_t granule, paddr_t *pa)
55 {
56 	struct mobj_phys *moph = to_mobj_phys(mobj);
57 	paddr_t p;
58 
59 	if (!pa)
60 		return TEE_ERROR_GENERIC;
61 
62 	p = moph->pa + offs;
63 
64 	if (granule) {
65 		if (granule != SMALL_PAGE_SIZE &&
66 		    granule != CORE_MMU_PGDIR_SIZE)
67 			return TEE_ERROR_GENERIC;
68 		p &= ~(granule - 1);
69 	}
70 
71 	*pa = p;
72 	return TEE_SUCCESS;
73 }
74 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
75 
76 static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr)
77 {
78 	struct mobj_phys *moph = to_mobj_phys(mobj);
79 
80 	if (!cattr)
81 		return TEE_ERROR_GENERIC;
82 
83 	*cattr = moph->cattr;
84 	return TEE_SUCCESS;
85 }
86 
87 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
88 {
89 	struct mobj_phys *moph = to_mobj_phys(mobj);
90 	enum buf_is_attr a;
91 
92 	a = moph->battr;
93 
94 	switch (attr) {
95 	case CORE_MEM_SEC:
96 		return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
97 		       a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
98 	case CORE_MEM_NON_SEC:
99 		return a == CORE_MEM_NSEC_SHM;
100 	case CORE_MEM_TEE_RAM:
101 	case CORE_MEM_TA_RAM:
102 	case CORE_MEM_NSEC_SHM:
103 	case CORE_MEM_SDP_MEM:
104 		return attr == a;
105 	default:
106 		return false;
107 	}
108 }
109 
110 static void mobj_phys_free(struct mobj *mobj)
111 {
112 	struct mobj_phys *moph = to_mobj_phys(mobj);
113 
114 	free(moph);
115 }
116 
117 static const struct mobj_ops mobj_phys_ops __rodata_unpaged = {
118 	.get_va = mobj_phys_get_va,
119 	.get_pa = mobj_phys_get_pa,
120 	.get_phys_offs = NULL, /* only offset 0 */
121 	.get_cattr = mobj_phys_get_cattr,
122 	.matches = mobj_phys_matches,
123 	.free = mobj_phys_free,
124 };
125 
126 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
127 {
128 	assert(mobj->ops == &mobj_phys_ops);
129 	return container_of(mobj, struct mobj_phys, mobj);
130 }
131 
132 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
133 			     enum buf_is_attr battr)
134 {
135 	struct mobj_phys *moph;
136 	enum teecore_memtypes area_type;
137 	void *va;
138 
139 	if ((pa & CORE_MMU_USER_PARAM_MASK) ||
140 	    (size & CORE_MMU_USER_PARAM_MASK)) {
141 		DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
142 		return NULL;
143 	}
144 
145 	switch (battr) {
146 	case CORE_MEM_TEE_RAM:
147 		area_type = MEM_AREA_TEE_RAM_RW_DATA;
148 		break;
149 	case CORE_MEM_TA_RAM:
150 		area_type = MEM_AREA_TA_RAM;
151 		break;
152 	case CORE_MEM_NSEC_SHM:
153 		area_type = MEM_AREA_NSEC_SHM;
154 		break;
155 	case CORE_MEM_SDP_MEM:
156 		area_type = MEM_AREA_SDP_MEM;
157 		break;
158 	default:
159 		DMSG("can't allocate with specified attribute");
160 		return NULL;
161 	}
162 
163 	/* Only SDP memory may not have a virtual address */
164 	va = phys_to_virt(pa, area_type);
165 	if (!va && battr != CORE_MEM_SDP_MEM)
166 		return NULL;
167 
168 	moph = calloc(1, sizeof(*moph));
169 	if (!moph)
170 		return NULL;
171 
172 	moph->battr = battr;
173 	moph->cattr = cattr;
174 	moph->mobj.size = size;
175 	moph->mobj.ops = &mobj_phys_ops;
176 	refcount_set(&moph->mobj.refc, 1);
177 	moph->pa = pa;
178 	moph->va = (vaddr_t)va;
179 
180 	return &moph->mobj;
181 }
182 
183 /*
184  * mobj_virt implementation
185  */
186 
187 static void mobj_virt_assert_type(struct mobj *mobj);
188 
189 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset)
190 {
191 	mobj_virt_assert_type(mobj);
192 
193 	return (void *)(vaddr_t)offset;
194 }
195 
196 static const struct mobj_ops mobj_virt_ops __rodata_unpaged = {
197 	.get_va = mobj_virt_get_va,
198 };
199 
200 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
201 {
202 	assert(mobj->ops == &mobj_virt_ops);
203 }
204 
205 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
206 
207 /*
208  * mobj_mm implementation
209  */
210 
211 struct mobj_mm {
212 	tee_mm_entry_t *mm;
213 	struct mobj *parent_mobj;
214 	struct mobj mobj;
215 };
216 
217 static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
218 
219 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
220 {
221 	tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
222 
223 	return (mm->offset << mm->pool->shift) + offs;
224 }
225 
226 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs)
227 {
228 	return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
229 			   mobj_mm_offs(mobj, offs));
230 }
231 
232 
233 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
234 				    size_t granule, paddr_t *pa)
235 {
236 	return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
237 			   mobj_mm_offs(mobj, offs), granule, pa);
238 }
239 DECLARE_KEEP_PAGER(mobj_mm_get_pa);
240 
241 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule)
242 {
243 	return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule);
244 }
245 
246 static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr)
247 {
248 	return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr);
249 }
250 
251 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
252 {
253 	return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
254 }
255 
256 static void mobj_mm_free(struct mobj *mobj)
257 {
258 	struct mobj_mm *m = to_mobj_mm(mobj);
259 
260 	tee_mm_free(m->mm);
261 	free(m);
262 }
263 
264 static const struct mobj_ops mobj_mm_ops __rodata_unpaged = {
265 	.get_va = mobj_mm_get_va,
266 	.get_pa = mobj_mm_get_pa,
267 	.get_phys_offs = mobj_mm_get_phys_offs,
268 	.get_cattr = mobj_mm_get_cattr,
269 	.matches = mobj_mm_matches,
270 	.free = mobj_mm_free,
271 };
272 
273 static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
274 {
275 	assert(mobj->ops == &mobj_mm_ops);
276 	return container_of(mobj, struct mobj_mm, mobj);
277 }
278 
279 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
280 			      tee_mm_pool_t *pool)
281 {
282 	struct mobj_mm *m = calloc(1, sizeof(*m));
283 
284 	if (!m)
285 		return NULL;
286 
287 	m->mm = tee_mm_alloc(pool, size);
288 	if (!m->mm) {
289 		free(m);
290 		return NULL;
291 	}
292 
293 	m->parent_mobj = mobj_parent;
294 	m->mobj.size = size;
295 	m->mobj.ops = &mobj_mm_ops;
296 	refcount_set(&m->mobj.refc, 1);
297 
298 	return &m->mobj;
299 }
300 
301 
302 /*
303  * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
304  * - it is physically contiguous.
305  * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
306  * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
307  *   generic CORE_MEM_NON_SEC.
308  */
309 
310 struct mobj_shm {
311 	struct mobj mobj;
312 	paddr_t pa;
313 	uint64_t cookie;
314 };
315 
316 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
317 
318 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset)
319 {
320 	struct mobj_shm *m = to_mobj_shm(mobj);
321 
322 	if (offset >= mobj->size)
323 		return NULL;
324 
325 	return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM);
326 }
327 
328 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
329 				   size_t granule, paddr_t *pa)
330 {
331 	struct mobj_shm *m = to_mobj_shm(mobj);
332 	paddr_t p;
333 
334 	if (!pa || offs >= mobj->size)
335 		return TEE_ERROR_GENERIC;
336 
337 	p = m->pa + offs;
338 
339 	if (granule) {
340 		if (granule != SMALL_PAGE_SIZE &&
341 		    granule != CORE_MMU_PGDIR_SIZE)
342 			return TEE_ERROR_GENERIC;
343 		p &= ~(granule - 1);
344 	}
345 
346 	*pa = p;
347 	return TEE_SUCCESS;
348 }
349 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
350 
351 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
352 {
353 	assert(IS_POWER_OF_TWO(granule));
354 	return to_mobj_shm(mobj)->pa & (granule - 1);
355 }
356 
357 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
358 {
359 	return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
360 }
361 
362 static void mobj_shm_free(struct mobj *mobj)
363 {
364 	struct mobj_shm *m = to_mobj_shm(mobj);
365 
366 	free(m);
367 }
368 
369 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
370 {
371 	return to_mobj_shm(mobj)->cookie;
372 }
373 
374 static const struct mobj_ops mobj_shm_ops __rodata_unpaged = {
375 	.get_va = mobj_shm_get_va,
376 	.get_pa = mobj_shm_get_pa,
377 	.get_phys_offs = mobj_shm_get_phys_offs,
378 	.matches = mobj_shm_matches,
379 	.free = mobj_shm_free,
380 	.get_cookie = mobj_shm_get_cookie,
381 };
382 
383 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
384 {
385 	assert(mobj->ops == &mobj_shm_ops);
386 	return container_of(mobj, struct mobj_shm, mobj);
387 }
388 
389 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
390 {
391 	struct mobj_shm *m;
392 
393 	if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
394 		return NULL;
395 
396 	m = calloc(1, sizeof(*m));
397 	if (!m)
398 		return NULL;
399 
400 	m->mobj.size = size;
401 	m->mobj.ops = &mobj_shm_ops;
402 	refcount_set(&m->mobj.refc, 1);
403 	m->pa = pa;
404 	m->cookie = cookie;
405 
406 	return &m->mobj;
407 }
408 
409 #ifdef CFG_PAGED_USER_TA
410 /*
411  * mobj_seccpy_shm implementation
412  */
413 
414 struct mobj_seccpy_shm {
415 	struct user_ta_ctx *utc;
416 	vaddr_t va;
417 	struct mobj mobj;
418 	struct fobj *fobj;
419 };
420 
421 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
422 
423 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
424 {
425 	assert(mobj_is_seccpy_shm(mobj));
426 	return container_of(mobj, struct mobj_seccpy_shm, mobj);
427 }
428 
429 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs)
430 {
431 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
432 
433 	if (&m->utc->ta_ctx.ts_ctx != thread_get_tsd()->ctx)
434 		return NULL;
435 
436 	if (offs >= mobj->size)
437 		return NULL;
438 	return (void *)(m->va + offs);
439 }
440 
441 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
442 				 enum buf_is_attr attr)
443 {
444 	assert(mobj_is_seccpy_shm(mobj));
445 
446 	return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
447 }
448 
449 static void mobj_seccpy_shm_free(struct mobj *mobj)
450 {
451 	struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
452 
453 	tee_pager_rem_um_region(&m->utc->uctx, m->va, mobj->size);
454 	vm_rem_rwmem(&m->utc->uctx, mobj, m->va);
455 	fobj_put(m->fobj);
456 	free(m);
457 }
458 
459 static struct fobj *mobj_seccpy_shm_get_fobj(struct mobj *mobj)
460 {
461 	return fobj_get(to_mobj_seccpy_shm(mobj)->fobj);
462 }
463 
464 static const struct mobj_ops mobj_seccpy_shm_ops __rodata_unpaged = {
465 	.get_va = mobj_seccpy_shm_get_va,
466 	.matches = mobj_seccpy_shm_matches,
467 	.free = mobj_seccpy_shm_free,
468 	.get_fobj = mobj_seccpy_shm_get_fobj,
469 };
470 
471 static bool mobj_is_seccpy_shm(struct mobj *mobj)
472 {
473 	return mobj && mobj->ops == &mobj_seccpy_shm_ops;
474 }
475 
476 struct mobj *mobj_seccpy_shm_alloc(size_t size)
477 {
478 	struct thread_specific_data *tsd = thread_get_tsd();
479 	struct mobj_seccpy_shm *m;
480 	struct user_ta_ctx *utc;
481 	vaddr_t va = 0;
482 
483 	if (!is_user_ta_ctx(tsd->ctx))
484 		return NULL;
485 	utc = to_user_ta_ctx(tsd->ctx);
486 
487 	m = calloc(1, sizeof(*m));
488 	if (!m)
489 		return NULL;
490 
491 	m->mobj.size = size;
492 	m->mobj.ops = &mobj_seccpy_shm_ops;
493 	refcount_set(&m->mobj.refc, 1);
494 
495 	if (vm_add_rwmem(&utc->uctx, &m->mobj, &va) != TEE_SUCCESS)
496 		goto bad;
497 
498 	m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) /
499 				      SMALL_PAGE_SIZE);
500 	if (tee_pager_add_um_region(&utc->uctx, va, m->fobj,
501 				    TEE_MATTR_PRW | TEE_MATTR_URW))
502 		goto bad;
503 
504 	m->va = va;
505 	m->utc = to_user_ta_ctx(tsd->ctx);
506 	return &m->mobj;
507 bad:
508 	if (va)
509 		vm_rem_rwmem(&utc->uctx, &m->mobj, va);
510 	fobj_put(m->fobj);
511 	free(m);
512 	return NULL;
513 }
514 
515 
516 #endif /*CFG_PAGED_USER_TA*/
517 
518 struct mobj_with_fobj {
519 	struct fobj *fobj;
520 	struct file *file;
521 	struct mobj mobj;
522 };
523 
524 static const struct mobj_ops mobj_with_fobj_ops;
525 
526 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file)
527 {
528 	struct mobj_with_fobj *m = NULL;
529 
530 	if (!fobj)
531 		return NULL;
532 
533 	m = calloc(1, sizeof(*m));
534 	if (!m)
535 		return NULL;
536 
537 	m->mobj.ops = &mobj_with_fobj_ops;
538 	refcount_set(&m->mobj.refc, 1);
539 	m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
540 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
541 	m->fobj = fobj_get(fobj);
542 	m->file = file_get(file);
543 
544 	return &m->mobj;
545 }
546 
547 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
548 {
549 	assert(mobj && mobj->ops == &mobj_with_fobj_ops);
550 
551 	return container_of(mobj, struct mobj_with_fobj, mobj);
552 }
553 
554 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
555 				 enum buf_is_attr attr)
556 {
557 	assert(to_mobj_with_fobj(mobj));
558 
559 	/*
560 	 * All fobjs are supposed to be mapped secure so classify it as
561 	 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
562 	 * needed it can probably be carried in another way than to put the
563 	 * burden directly on fobj.
564 	 */
565 	return attr == CORE_MEM_SEC;
566 }
567 
568 static void mobj_with_fobj_free(struct mobj *mobj)
569 {
570 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
571 
572 	fobj_put(m->fobj);
573 	file_put(m->file);
574 	free(m);
575 }
576 
577 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
578 {
579 	return fobj_get(to_mobj_with_fobj(mobj)->fobj);
580 }
581 
582 static TEE_Result mobj_with_fobj_get_cattr(struct mobj *mobj __unused,
583 					   uint32_t *cattr)
584 {
585 	if (!cattr)
586 		return TEE_ERROR_GENERIC;
587 
588 	/* All fobjs are mapped as normal cached memory */
589 	*cattr = TEE_MATTR_CACHE_CACHED;
590 
591 	return TEE_SUCCESS;
592 }
593 
594 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
595 					size_t granule, paddr_t *pa)
596 {
597 	struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
598 	paddr_t p = 0;
599 
600 	if (!f->fobj->ops->get_pa) {
601 		assert(mobj_is_paged(mobj));
602 		return TEE_ERROR_NOT_SUPPORTED;
603 	}
604 
605 	p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
606 	    offs % SMALL_PAGE_SIZE;
607 
608 	if (granule) {
609 		if (granule != SMALL_PAGE_SIZE &&
610 		    granule != CORE_MMU_PGDIR_SIZE)
611 			return TEE_ERROR_GENERIC;
612 		p &= ~(granule - 1);
613 	}
614 
615 	*pa = p;
616 
617 	return TEE_SUCCESS;
618 }
619 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
620 
621 static const struct mobj_ops mobj_with_fobj_ops __rodata_unpaged = {
622 	.matches = mobj_with_fobj_matches,
623 	.free = mobj_with_fobj_free,
624 	.get_fobj = mobj_with_fobj_get_fobj,
625 	.get_cattr = mobj_with_fobj_get_cattr,
626 	.get_pa = mobj_with_fobj_get_pa,
627 };
628 
629 #ifdef CFG_PAGED_USER_TA
630 bool mobj_is_paged(struct mobj *mobj)
631 {
632 	if (mobj->ops == &mobj_seccpy_shm_ops)
633 		return true;
634 
635 	if (mobj->ops == &mobj_with_fobj_ops &&
636 	    !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
637 		return true;
638 
639 	return false;
640 }
641 #endif /*CFG_PAGED_USER_TA*/
642 
643 static TEE_Result mobj_init(void)
644 {
645 	mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo,
646 				       tee_mm_sec_ddr.hi - tee_mm_sec_ddr.lo,
647 				       OPTEE_SMC_SHM_CACHED, CORE_MEM_TA_RAM);
648 	if (!mobj_sec_ddr)
649 		panic("Failed to register secure ta ram");
650 
651 	mobj_tee_ram = mobj_phys_alloc(TEE_RAM_START,
652 				       VCORE_UNPG_RW_PA + VCORE_UNPG_RW_SZ -
653 						TEE_RAM_START,
654 				       TEE_MATTR_CACHE_CACHED,
655 				       CORE_MEM_TEE_RAM);
656 	if (!mobj_tee_ram)
657 		panic("Failed to register tee ram");
658 
659 	return TEE_SUCCESS;
660 }
661 
662 driver_init_late(mobj_init);
663