xref: /optee_os/core/mm/mobj.c (revision 8c95493b1db3347888635c5f95011594646467fa)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2022, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/linker.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <optee_msg.h>
19 #include <stdlib.h>
20 #include <tee_api_types.h>
21 #include <types_ext.h>
22 #include <util.h>
23 
24 struct mobj *mobj_tee_ram_rx;
25 struct mobj *mobj_tee_ram_rw;
26 
27 /*
28  * mobj_phys implementation
29  */
30 
31 struct mobj_phys {
32 	struct mobj mobj;
33 	enum buf_is_attr battr;
34 	/* Defined by TEE_MATTR_MEM_TYPE_* in tee_mmu_types.h */
35 	uint32_t mem_type;
36 	vaddr_t va;
37 	paddr_t pa;
38 };
39 
40 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
41 
42 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len)
43 {
44 	struct mobj_phys *moph = to_mobj_phys(mobj);
45 
46 	if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len))
47 		return NULL;
48 
49 	return (void *)(moph->va + offset);
50 }
51 
52 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
53 				   size_t granule, paddr_t *pa)
54 {
55 	struct mobj_phys *moph = to_mobj_phys(mobj);
56 	paddr_t p;
57 
58 	if (!pa)
59 		return TEE_ERROR_GENERIC;
60 
61 	p = moph->pa + offs;
62 
63 	if (granule) {
64 		if (granule != SMALL_PAGE_SIZE &&
65 		    granule != CORE_MMU_PGDIR_SIZE)
66 			return TEE_ERROR_GENERIC;
67 		p &= ~(granule - 1);
68 	}
69 
70 	*pa = p;
71 	return TEE_SUCCESS;
72 }
73 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
74 
75 static TEE_Result mobj_phys_get_mem_type(struct mobj *mobj, uint32_t *mem_type)
76 {
77 	struct mobj_phys *moph = to_mobj_phys(mobj);
78 
79 	if (!mem_type)
80 		return TEE_ERROR_GENERIC;
81 
82 	*mem_type = moph->mem_type;
83 	return TEE_SUCCESS;
84 }
85 
86 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
87 {
88 	struct mobj_phys *moph = to_mobj_phys(mobj);
89 	enum buf_is_attr a;
90 
91 	a = moph->battr;
92 
93 	switch (attr) {
94 	case CORE_MEM_SEC:
95 		return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
96 		       a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
97 	case CORE_MEM_NON_SEC:
98 		return a == CORE_MEM_NSEC_SHM;
99 	case CORE_MEM_TEE_RAM:
100 	case CORE_MEM_TA_RAM:
101 	case CORE_MEM_NSEC_SHM:
102 	case CORE_MEM_SDP_MEM:
103 		return attr == a;
104 	default:
105 		return false;
106 	}
107 }
108 
109 static void mobj_phys_free(struct mobj *mobj)
110 {
111 	struct mobj_phys *moph = to_mobj_phys(mobj);
112 
113 	free(moph);
114 }
115 
116 /*
117  * Note: this variable is weak just to ease breaking its dependency chain
118  * when added to the unpaged area.
119  */
120 const struct mobj_ops mobj_phys_ops
121 __weak __relrodata_unpaged("mobj_phys_ops") = {
122 	.get_va = mobj_phys_get_va,
123 	.get_pa = mobj_phys_get_pa,
124 	.get_phys_offs = NULL, /* only offset 0 */
125 	.get_mem_type = mobj_phys_get_mem_type,
126 	.matches = mobj_phys_matches,
127 	.free = mobj_phys_free,
128 };
129 
130 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
131 {
132 	assert(mobj->ops == &mobj_phys_ops);
133 	return container_of(mobj, struct mobj_phys, mobj);
134 }
135 
136 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t mem_type,
137 				   enum buf_is_attr battr,
138 				   enum teecore_memtypes area_type)
139 {
140 	void *va = NULL;
141 	struct mobj_phys *moph = NULL;
142 	struct tee_mmap_region *map = NULL;
143 
144 	if ((pa & CORE_MMU_USER_PARAM_MASK) ||
145 	    (size & CORE_MMU_USER_PARAM_MASK)) {
146 		DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
147 		return NULL;
148 	}
149 
150 	if (pa) {
151 		va = phys_to_virt(pa, area_type, size);
152 	} else {
153 		map = core_mmu_find_mapping_exclusive(area_type, size);
154 		if (!map)
155 			return NULL;
156 
157 		pa = map->pa;
158 		va = (void *)map->va;
159 	}
160 
161 	/* Only SDP memory may not have a virtual address */
162 	if (!va && battr != CORE_MEM_SDP_MEM)
163 		return NULL;
164 
165 	moph = calloc(1, sizeof(*moph));
166 	if (!moph)
167 		return NULL;
168 
169 	moph->battr = battr;
170 	moph->mem_type = mem_type;
171 	moph->mobj.size = size;
172 	moph->mobj.ops = &mobj_phys_ops;
173 	refcount_set(&moph->mobj.refc, 1);
174 	moph->pa = pa;
175 	moph->va = (vaddr_t)va;
176 
177 	return &moph->mobj;
178 }
179 
180 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t mem_type,
181 			     enum buf_is_attr battr)
182 {
183 	enum teecore_memtypes area_type;
184 
185 	switch (battr) {
186 	case CORE_MEM_TEE_RAM:
187 		area_type = MEM_AREA_TEE_RAM_RW_DATA;
188 		break;
189 	case CORE_MEM_TA_RAM:
190 		area_type = MEM_AREA_TA_RAM;
191 		break;
192 	case CORE_MEM_NSEC_SHM:
193 		area_type = MEM_AREA_NSEC_SHM;
194 		break;
195 	case CORE_MEM_SDP_MEM:
196 		area_type = MEM_AREA_SDP_MEM;
197 		break;
198 	default:
199 		DMSG("can't allocate with specified attribute");
200 		return NULL;
201 	}
202 
203 	return mobj_phys_init(pa, size, mem_type, battr, area_type);
204 }
205 
206 /*
207  * mobj_virt implementation
208  */
209 
210 static void mobj_virt_assert_type(struct mobj *mobj);
211 
212 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset,
213 			      size_t len __maybe_unused)
214 {
215 	mobj_virt_assert_type(mobj);
216 	assert(mobj_check_offset_and_len(mobj, offset, len));
217 
218 	return (void *)(vaddr_t)offset;
219 }
220 
221 /*
222  * Note: this variable is weak just to ease breaking its dependency chain
223  * when added to the unpaged area.
224  */
225 const struct mobj_ops mobj_virt_ops
226 __weak __relrodata_unpaged("mobj_virt_ops") = {
227 	.get_va = mobj_virt_get_va,
228 };
229 
230 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
231 {
232 	assert(mobj->ops == &mobj_virt_ops);
233 }
234 
235 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
236 
237 /*
238  * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
239  * - it is physically contiguous.
240  * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
241  * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
242  *   generic CORE_MEM_NON_SEC.
243  */
244 
245 struct mobj_shm {
246 	struct mobj mobj;
247 	paddr_t pa;
248 	uint64_t cookie;
249 };
250 
251 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
252 
253 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
254 {
255 	struct mobj_shm *m = to_mobj_shm(mobj);
256 
257 	if (!mobj_check_offset_and_len(mobj, offset, len))
258 		return NULL;
259 
260 	return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM,
261 			    mobj->size - offset);
262 }
263 
264 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
265 				   size_t granule, paddr_t *pa)
266 {
267 	struct mobj_shm *m = to_mobj_shm(mobj);
268 	paddr_t p;
269 
270 	if (!pa || offs >= mobj->size)
271 		return TEE_ERROR_GENERIC;
272 
273 	p = m->pa + offs;
274 
275 	if (granule) {
276 		if (granule != SMALL_PAGE_SIZE &&
277 		    granule != CORE_MMU_PGDIR_SIZE)
278 			return TEE_ERROR_GENERIC;
279 		p &= ~(granule - 1);
280 	}
281 
282 	*pa = p;
283 	return TEE_SUCCESS;
284 }
285 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
286 
287 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
288 {
289 	assert(IS_POWER_OF_TWO(granule));
290 	return to_mobj_shm(mobj)->pa & (granule - 1);
291 }
292 
293 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
294 {
295 	return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
296 }
297 
298 static TEE_Result mobj_shm_get_mem_type(struct mobj *mobj __unused,
299 					uint32_t *mem_type)
300 {
301 	if (!mem_type)
302 		return TEE_ERROR_GENERIC;
303 
304 	*mem_type = TEE_MATTR_MEM_TYPE_CACHED;
305 
306 	return TEE_SUCCESS;
307 }
308 
309 static void mobj_shm_free(struct mobj *mobj)
310 {
311 	struct mobj_shm *m = to_mobj_shm(mobj);
312 
313 	free(m);
314 }
315 
316 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
317 {
318 	return to_mobj_shm(mobj)->cookie;
319 }
320 
321 /*
322  * Note: this variable is weak just to ease breaking its dependency chain
323  * when added to the unpaged area.
324  */
325 const struct mobj_ops mobj_shm_ops
326 __weak __relrodata_unpaged("mobj_shm_ops") = {
327 	.get_va = mobj_shm_get_va,
328 	.get_pa = mobj_shm_get_pa,
329 	.get_phys_offs = mobj_shm_get_phys_offs,
330 	.get_mem_type = mobj_shm_get_mem_type,
331 	.matches = mobj_shm_matches,
332 	.free = mobj_shm_free,
333 	.get_cookie = mobj_shm_get_cookie,
334 };
335 
336 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
337 {
338 	assert(mobj->ops == &mobj_shm_ops);
339 	return container_of(mobj, struct mobj_shm, mobj);
340 }
341 
342 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
343 {
344 	struct mobj_shm *m;
345 
346 	if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
347 		return NULL;
348 
349 	m = calloc(1, sizeof(*m));
350 	if (!m)
351 		return NULL;
352 
353 	m->mobj.size = size;
354 	m->mobj.ops = &mobj_shm_ops;
355 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
356 	refcount_set(&m->mobj.refc, 1);
357 	m->pa = pa;
358 	m->cookie = cookie;
359 
360 	return &m->mobj;
361 }
362 
363 struct mobj_with_fobj {
364 	struct fobj *fobj;
365 	struct file *file;
366 	struct mobj mobj;
367 	uint8_t mem_type;
368 };
369 
370 const struct mobj_ops mobj_with_fobj_ops;
371 
372 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file,
373 				  uint32_t mem_type)
374 {
375 	struct mobj_with_fobj *m = NULL;
376 
377 	assert(!(mem_type & ~TEE_MATTR_MEM_TYPE_MASK));
378 
379 	if (!fobj)
380 		return NULL;
381 	if (mem_type > UINT8_MAX)
382 		return NULL;
383 
384 	m = calloc(1, sizeof(*m));
385 	if (!m)
386 		return NULL;
387 
388 	m->mobj.ops = &mobj_with_fobj_ops;
389 	refcount_set(&m->mobj.refc, 1);
390 	m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
391 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
392 	m->fobj = fobj_get(fobj);
393 	m->file = file_get(file);
394 	m->mem_type = mem_type;
395 
396 	return &m->mobj;
397 }
398 
399 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
400 {
401 	assert(mobj && mobj->ops == &mobj_with_fobj_ops);
402 
403 	return container_of(mobj, struct mobj_with_fobj, mobj);
404 }
405 
406 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
407 				 enum buf_is_attr attr)
408 {
409 	assert(to_mobj_with_fobj(mobj));
410 
411 	/*
412 	 * All fobjs are supposed to be mapped secure so classify it as
413 	 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
414 	 * needed it can probably be carried in another way than to put the
415 	 * burden directly on fobj.
416 	 */
417 	return attr == CORE_MEM_SEC;
418 }
419 
420 static void mobj_with_fobj_free(struct mobj *mobj)
421 {
422 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
423 
424 	fobj_put(m->fobj);
425 	file_put(m->file);
426 	free(m);
427 }
428 
429 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
430 {
431 	return fobj_get(to_mobj_with_fobj(mobj)->fobj);
432 }
433 
434 static TEE_Result mobj_with_fobj_get_mem_type(struct mobj *mobj,
435 					      uint32_t *mem_type)
436 {
437 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
438 
439 	if (!mem_type)
440 		return TEE_ERROR_GENERIC;
441 
442 	*mem_type = m->mem_type;
443 
444 	return TEE_SUCCESS;
445 }
446 
447 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
448 					size_t granule, paddr_t *pa)
449 {
450 	struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
451 	paddr_t p = 0;
452 
453 	if (!f->fobj->ops->get_pa) {
454 		assert(mobj_is_paged(mobj));
455 		return TEE_ERROR_NOT_SUPPORTED;
456 	}
457 
458 	p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
459 	    offs % SMALL_PAGE_SIZE;
460 
461 	if (granule) {
462 		if (granule != SMALL_PAGE_SIZE &&
463 		    granule != CORE_MMU_PGDIR_SIZE)
464 			return TEE_ERROR_GENERIC;
465 		p &= ~(granule - 1);
466 	}
467 
468 	*pa = p;
469 
470 	return TEE_SUCCESS;
471 }
472 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
473 
474 /*
475  * Note: this variable is weak just to ease breaking its dependency chain
476  * when added to the unpaged area.
477  */
478 const struct mobj_ops mobj_with_fobj_ops
479 __weak __relrodata_unpaged("mobj_with_fobj_ops") = {
480 	.matches = mobj_with_fobj_matches,
481 	.free = mobj_with_fobj_free,
482 	.get_fobj = mobj_with_fobj_get_fobj,
483 	.get_mem_type = mobj_with_fobj_get_mem_type,
484 	.get_pa = mobj_with_fobj_get_pa,
485 };
486 
487 #ifdef CFG_PAGED_USER_TA
488 bool mobj_is_paged(struct mobj *mobj)
489 {
490 	if (mobj->ops == &mobj_with_fobj_ops &&
491 	    !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
492 		return true;
493 
494 	return false;
495 }
496 #endif /*CFG_PAGED_USER_TA*/
497 
498 static TEE_Result mobj_init(void)
499 {
500 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
501 		mobj_tee_ram_rx = mobj_phys_init(0,
502 						 VCORE_UNPG_RX_SZ,
503 						 TEE_MATTR_MEM_TYPE_CACHED,
504 						 CORE_MEM_TEE_RAM,
505 						 MEM_AREA_TEE_RAM_RX);
506 		if (!mobj_tee_ram_rx)
507 			panic("Failed to register tee ram rx");
508 
509 		mobj_tee_ram_rw = mobj_phys_init(0,
510 						 VCORE_UNPG_RW_SZ,
511 						 TEE_MATTR_MEM_TYPE_CACHED,
512 						 CORE_MEM_TEE_RAM,
513 						 MEM_AREA_TEE_RAM_RW_DATA);
514 		if (!mobj_tee_ram_rw)
515 			panic("Failed to register tee ram rw");
516 	} else {
517 		mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START,
518 						 VCORE_UNPG_RW_PA +
519 						 VCORE_UNPG_RW_SZ -
520 						 TEE_RAM_START,
521 						 TEE_MATTR_MEM_TYPE_CACHED,
522 						 CORE_MEM_TEE_RAM,
523 						 MEM_AREA_TEE_RAM_RW_DATA);
524 		if (!mobj_tee_ram_rw)
525 			panic("Failed to register tee ram");
526 
527 		mobj_tee_ram_rx = mobj_tee_ram_rw;
528 	}
529 
530 	return TEE_SUCCESS;
531 }
532 
533 driver_init_late(mobj_init);
534