xref: /optee_os/core/mm/mobj.c (revision 19a31ec40245ae01a9adcd206eec2a4bb4479fc9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2022, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/linker.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <optee_msg.h>
19 #include <stdlib.h>
20 #include <tee_api_types.h>
21 #include <types_ext.h>
22 #include <util.h>
23 
24 struct mobj *mobj_sec_ddr;
25 struct mobj *mobj_tee_ram_rx;
26 struct mobj *mobj_tee_ram_rw;
27 
28 /*
29  * mobj_phys implementation
30  */
31 
32 struct mobj_phys {
33 	struct mobj mobj;
34 	enum buf_is_attr battr;
35 	/* Defined by TEE_MATTR_MEM_TYPE_* in tee_mmu_types.h */
36 	uint32_t mem_type;
37 	vaddr_t va;
38 	paddr_t pa;
39 };
40 
41 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
42 
43 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len)
44 {
45 	struct mobj_phys *moph = to_mobj_phys(mobj);
46 
47 	if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len))
48 		return NULL;
49 
50 	return (void *)(moph->va + offset);
51 }
52 
53 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
54 				   size_t granule, paddr_t *pa)
55 {
56 	struct mobj_phys *moph = to_mobj_phys(mobj);
57 	paddr_t p;
58 
59 	if (!pa)
60 		return TEE_ERROR_GENERIC;
61 
62 	p = moph->pa + offs;
63 
64 	if (granule) {
65 		if (granule != SMALL_PAGE_SIZE &&
66 		    granule != CORE_MMU_PGDIR_SIZE)
67 			return TEE_ERROR_GENERIC;
68 		p &= ~(granule - 1);
69 	}
70 
71 	*pa = p;
72 	return TEE_SUCCESS;
73 }
74 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
75 
76 static TEE_Result mobj_phys_get_mem_type(struct mobj *mobj, uint32_t *mem_type)
77 {
78 	struct mobj_phys *moph = to_mobj_phys(mobj);
79 
80 	if (!mem_type)
81 		return TEE_ERROR_GENERIC;
82 
83 	*mem_type = moph->mem_type;
84 	return TEE_SUCCESS;
85 }
86 
87 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
88 {
89 	struct mobj_phys *moph = to_mobj_phys(mobj);
90 	enum buf_is_attr a;
91 
92 	a = moph->battr;
93 
94 	switch (attr) {
95 	case CORE_MEM_SEC:
96 		return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
97 		       a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
98 	case CORE_MEM_NON_SEC:
99 		return a == CORE_MEM_NSEC_SHM;
100 	case CORE_MEM_TEE_RAM:
101 	case CORE_MEM_TA_RAM:
102 	case CORE_MEM_NSEC_SHM:
103 	case CORE_MEM_SDP_MEM:
104 		return attr == a;
105 	default:
106 		return false;
107 	}
108 }
109 
110 static void mobj_phys_free(struct mobj *mobj)
111 {
112 	struct mobj_phys *moph = to_mobj_phys(mobj);
113 
114 	free(moph);
115 }
116 
117 /*
118  * Note: this variable is weak just to ease breaking its dependency chain
119  * when added to the unpaged area.
120  */
121 const struct mobj_ops mobj_phys_ops
122 __weak __relrodata_unpaged("mobj_phys_ops") = {
123 	.get_va = mobj_phys_get_va,
124 	.get_pa = mobj_phys_get_pa,
125 	.get_phys_offs = NULL, /* only offset 0 */
126 	.get_mem_type = mobj_phys_get_mem_type,
127 	.matches = mobj_phys_matches,
128 	.free = mobj_phys_free,
129 };
130 
131 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
132 {
133 	assert(mobj->ops == &mobj_phys_ops);
134 	return container_of(mobj, struct mobj_phys, mobj);
135 }
136 
137 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t mem_type,
138 				   enum buf_is_attr battr,
139 				   enum teecore_memtypes area_type)
140 {
141 	void *va = NULL;
142 	struct mobj_phys *moph = NULL;
143 	struct tee_mmap_region *map = NULL;
144 
145 	if ((pa & CORE_MMU_USER_PARAM_MASK) ||
146 	    (size & CORE_MMU_USER_PARAM_MASK)) {
147 		DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
148 		return NULL;
149 	}
150 
151 	if (pa) {
152 		va = phys_to_virt(pa, area_type, size);
153 	} else {
154 		map = core_mmu_find_mapping_exclusive(area_type, size);
155 		if (!map)
156 			return NULL;
157 
158 		pa = map->pa;
159 		va = (void *)map->va;
160 	}
161 
162 	/* Only SDP memory may not have a virtual address */
163 	if (!va && battr != CORE_MEM_SDP_MEM)
164 		return NULL;
165 
166 	moph = calloc(1, sizeof(*moph));
167 	if (!moph)
168 		return NULL;
169 
170 	moph->battr = battr;
171 	moph->mem_type = mem_type;
172 	moph->mobj.size = size;
173 	moph->mobj.ops = &mobj_phys_ops;
174 	refcount_set(&moph->mobj.refc, 1);
175 	moph->pa = pa;
176 	moph->va = (vaddr_t)va;
177 
178 	return &moph->mobj;
179 }
180 
181 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t mem_type,
182 			     enum buf_is_attr battr)
183 {
184 	enum teecore_memtypes area_type;
185 
186 	switch (battr) {
187 	case CORE_MEM_TEE_RAM:
188 		area_type = MEM_AREA_TEE_RAM_RW_DATA;
189 		break;
190 	case CORE_MEM_TA_RAM:
191 		area_type = MEM_AREA_TA_RAM;
192 		break;
193 	case CORE_MEM_NSEC_SHM:
194 		area_type = MEM_AREA_NSEC_SHM;
195 		break;
196 	case CORE_MEM_SDP_MEM:
197 		area_type = MEM_AREA_SDP_MEM;
198 		break;
199 	default:
200 		DMSG("can't allocate with specified attribute");
201 		return NULL;
202 	}
203 
204 	return mobj_phys_init(pa, size, mem_type, battr, area_type);
205 }
206 
207 /*
208  * mobj_virt implementation
209  */
210 
211 static void mobj_virt_assert_type(struct mobj *mobj);
212 
213 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset,
214 			      size_t len __maybe_unused)
215 {
216 	mobj_virt_assert_type(mobj);
217 	assert(mobj_check_offset_and_len(mobj, offset, len));
218 
219 	return (void *)(vaddr_t)offset;
220 }
221 
222 /*
223  * Note: this variable is weak just to ease breaking its dependency chain
224  * when added to the unpaged area.
225  */
226 const struct mobj_ops mobj_virt_ops
227 __weak __relrodata_unpaged("mobj_virt_ops") = {
228 	.get_va = mobj_virt_get_va,
229 };
230 
231 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
232 {
233 	assert(mobj->ops == &mobj_virt_ops);
234 }
235 
236 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
237 
238 /*
239  * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
240  * - it is physically contiguous.
241  * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
242  * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
243  *   generic CORE_MEM_NON_SEC.
244  */
245 
246 struct mobj_shm {
247 	struct mobj mobj;
248 	paddr_t pa;
249 	uint64_t cookie;
250 };
251 
252 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
253 
254 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
255 {
256 	struct mobj_shm *m = to_mobj_shm(mobj);
257 
258 	if (!mobj_check_offset_and_len(mobj, offset, len))
259 		return NULL;
260 
261 	return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM,
262 			    mobj->size - offset);
263 }
264 
265 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
266 				   size_t granule, paddr_t *pa)
267 {
268 	struct mobj_shm *m = to_mobj_shm(mobj);
269 	paddr_t p;
270 
271 	if (!pa || offs >= mobj->size)
272 		return TEE_ERROR_GENERIC;
273 
274 	p = m->pa + offs;
275 
276 	if (granule) {
277 		if (granule != SMALL_PAGE_SIZE &&
278 		    granule != CORE_MMU_PGDIR_SIZE)
279 			return TEE_ERROR_GENERIC;
280 		p &= ~(granule - 1);
281 	}
282 
283 	*pa = p;
284 	return TEE_SUCCESS;
285 }
286 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
287 
288 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
289 {
290 	assert(IS_POWER_OF_TWO(granule));
291 	return to_mobj_shm(mobj)->pa & (granule - 1);
292 }
293 
294 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
295 {
296 	return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
297 }
298 
299 static TEE_Result mobj_shm_get_mem_type(struct mobj *mobj __unused,
300 					uint32_t *mem_type)
301 {
302 	if (!mem_type)
303 		return TEE_ERROR_GENERIC;
304 
305 	*mem_type = TEE_MATTR_MEM_TYPE_CACHED;
306 
307 	return TEE_SUCCESS;
308 }
309 
310 static void mobj_shm_free(struct mobj *mobj)
311 {
312 	struct mobj_shm *m = to_mobj_shm(mobj);
313 
314 	free(m);
315 }
316 
317 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
318 {
319 	return to_mobj_shm(mobj)->cookie;
320 }
321 
322 /*
323  * Note: this variable is weak just to ease breaking its dependency chain
324  * when added to the unpaged area.
325  */
326 const struct mobj_ops mobj_shm_ops
327 __weak __relrodata_unpaged("mobj_shm_ops") = {
328 	.get_va = mobj_shm_get_va,
329 	.get_pa = mobj_shm_get_pa,
330 	.get_phys_offs = mobj_shm_get_phys_offs,
331 	.get_mem_type = mobj_shm_get_mem_type,
332 	.matches = mobj_shm_matches,
333 	.free = mobj_shm_free,
334 	.get_cookie = mobj_shm_get_cookie,
335 };
336 
337 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
338 {
339 	assert(mobj->ops == &mobj_shm_ops);
340 	return container_of(mobj, struct mobj_shm, mobj);
341 }
342 
343 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
344 {
345 	struct mobj_shm *m;
346 
347 	if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
348 		return NULL;
349 
350 	m = calloc(1, sizeof(*m));
351 	if (!m)
352 		return NULL;
353 
354 	m->mobj.size = size;
355 	m->mobj.ops = &mobj_shm_ops;
356 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
357 	refcount_set(&m->mobj.refc, 1);
358 	m->pa = pa;
359 	m->cookie = cookie;
360 
361 	return &m->mobj;
362 }
363 
364 struct mobj_with_fobj {
365 	struct fobj *fobj;
366 	struct file *file;
367 	struct mobj mobj;
368 	uint8_t mem_type;
369 };
370 
371 const struct mobj_ops mobj_with_fobj_ops;
372 
373 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file,
374 				  uint32_t mem_type)
375 {
376 	struct mobj_with_fobj *m = NULL;
377 
378 	assert(!(mem_type & ~TEE_MATTR_MEM_TYPE_MASK));
379 
380 	if (!fobj)
381 		return NULL;
382 	if (mem_type > UINT8_MAX)
383 		return NULL;
384 
385 	m = calloc(1, sizeof(*m));
386 	if (!m)
387 		return NULL;
388 
389 	m->mobj.ops = &mobj_with_fobj_ops;
390 	refcount_set(&m->mobj.refc, 1);
391 	m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
392 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
393 	m->fobj = fobj_get(fobj);
394 	m->file = file_get(file);
395 	m->mem_type = mem_type;
396 
397 	return &m->mobj;
398 }
399 
400 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
401 {
402 	assert(mobj && mobj->ops == &mobj_with_fobj_ops);
403 
404 	return container_of(mobj, struct mobj_with_fobj, mobj);
405 }
406 
407 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
408 				 enum buf_is_attr attr)
409 {
410 	assert(to_mobj_with_fobj(mobj));
411 
412 	/*
413 	 * All fobjs are supposed to be mapped secure so classify it as
414 	 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
415 	 * needed it can probably be carried in another way than to put the
416 	 * burden directly on fobj.
417 	 */
418 	return attr == CORE_MEM_SEC;
419 }
420 
421 static void mobj_with_fobj_free(struct mobj *mobj)
422 {
423 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
424 
425 	fobj_put(m->fobj);
426 	file_put(m->file);
427 	free(m);
428 }
429 
430 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
431 {
432 	return fobj_get(to_mobj_with_fobj(mobj)->fobj);
433 }
434 
435 static TEE_Result mobj_with_fobj_get_mem_type(struct mobj *mobj,
436 					      uint32_t *mem_type)
437 {
438 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
439 
440 	if (!mem_type)
441 		return TEE_ERROR_GENERIC;
442 
443 	*mem_type = m->mem_type;
444 
445 	return TEE_SUCCESS;
446 }
447 
448 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
449 					size_t granule, paddr_t *pa)
450 {
451 	struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
452 	paddr_t p = 0;
453 
454 	if (!f->fobj->ops->get_pa) {
455 		assert(mobj_is_paged(mobj));
456 		return TEE_ERROR_NOT_SUPPORTED;
457 	}
458 
459 	p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
460 	    offs % SMALL_PAGE_SIZE;
461 
462 	if (granule) {
463 		if (granule != SMALL_PAGE_SIZE &&
464 		    granule != CORE_MMU_PGDIR_SIZE)
465 			return TEE_ERROR_GENERIC;
466 		p &= ~(granule - 1);
467 	}
468 
469 	*pa = p;
470 
471 	return TEE_SUCCESS;
472 }
473 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
474 
475 /*
476  * Note: this variable is weak just to ease breaking its dependency chain
477  * when added to the unpaged area.
478  */
479 const struct mobj_ops mobj_with_fobj_ops
480 __weak __relrodata_unpaged("mobj_with_fobj_ops") = {
481 	.matches = mobj_with_fobj_matches,
482 	.free = mobj_with_fobj_free,
483 	.get_fobj = mobj_with_fobj_get_fobj,
484 	.get_mem_type = mobj_with_fobj_get_mem_type,
485 	.get_pa = mobj_with_fobj_get_pa,
486 };
487 
488 #ifdef CFG_PAGED_USER_TA
489 bool mobj_is_paged(struct mobj *mobj)
490 {
491 	if (mobj->ops == &mobj_with_fobj_ops &&
492 	    !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
493 		return true;
494 
495 	return false;
496 }
497 #endif /*CFG_PAGED_USER_TA*/
498 
499 static TEE_Result mobj_init(void)
500 {
501 	mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo,
502 				       tee_mm_sec_ddr.size,
503 				       TEE_MATTR_MEM_TYPE_CACHED,
504 				       CORE_MEM_TA_RAM);
505 	if (!mobj_sec_ddr)
506 		panic("Failed to register secure ta ram");
507 
508 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
509 		mobj_tee_ram_rx = mobj_phys_init(0,
510 						 VCORE_UNPG_RX_SZ,
511 						 TEE_MATTR_MEM_TYPE_CACHED,
512 						 CORE_MEM_TEE_RAM,
513 						 MEM_AREA_TEE_RAM_RX);
514 		if (!mobj_tee_ram_rx)
515 			panic("Failed to register tee ram rx");
516 
517 		mobj_tee_ram_rw = mobj_phys_init(0,
518 						 VCORE_UNPG_RW_SZ,
519 						 TEE_MATTR_MEM_TYPE_CACHED,
520 						 CORE_MEM_TEE_RAM,
521 						 MEM_AREA_TEE_RAM_RW_DATA);
522 		if (!mobj_tee_ram_rw)
523 			panic("Failed to register tee ram rw");
524 	} else {
525 		mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START,
526 						 VCORE_UNPG_RW_PA +
527 						 VCORE_UNPG_RW_SZ -
528 						 TEE_RAM_START,
529 						 TEE_MATTR_MEM_TYPE_CACHED,
530 						 CORE_MEM_TEE_RAM,
531 						 MEM_AREA_TEE_RAM_RW_DATA);
532 		if (!mobj_tee_ram_rw)
533 			panic("Failed to register tee ram");
534 
535 		mobj_tee_ram_rx = mobj_tee_ram_rw;
536 	}
537 
538 	return TEE_SUCCESS;
539 }
540 
541 driver_init_late(mobj_init);
542