xref: /optee_os/core/mm/mobj.c (revision 9f34db38245c9b3a4e6e7e63eb78a75e23ab2da3)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2022, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/linker.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <optee_msg.h>
19 #include <stdlib.h>
20 #include <tee_api_types.h>
21 #include <types_ext.h>
22 #include <util.h>
23 
24 struct mobj *mobj_tee_ram_rx;
25 struct mobj *mobj_tee_ram_rw;
26 
27 /*
28  * mobj_phys implementation
29  */
30 
31 struct mobj_phys {
32 	struct mobj mobj;
33 	enum buf_is_attr battr;
34 	/* Defined by TEE_MATTR_MEM_TYPE_* in tee_mmu_types.h */
35 	uint32_t mem_type;
36 	vaddr_t va;
37 	paddr_t pa;
38 };
39 
40 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
41 
42 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len)
43 {
44 	struct mobj_phys *moph = to_mobj_phys(mobj);
45 
46 	if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len))
47 		return NULL;
48 
49 	return (void *)(moph->va + offset);
50 }
51 
52 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
53 				   size_t granule, paddr_t *pa)
54 {
55 	struct mobj_phys *moph = to_mobj_phys(mobj);
56 	paddr_t p;
57 
58 	if (!pa)
59 		return TEE_ERROR_GENERIC;
60 
61 	p = moph->pa + offs;
62 
63 	if (granule) {
64 		if (granule != SMALL_PAGE_SIZE &&
65 		    granule != CORE_MMU_PGDIR_SIZE)
66 			return TEE_ERROR_GENERIC;
67 		p &= ~(granule - 1);
68 	}
69 
70 	*pa = p;
71 	return TEE_SUCCESS;
72 }
73 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
74 
75 static TEE_Result mobj_phys_get_mem_type(struct mobj *mobj, uint32_t *mem_type)
76 {
77 	struct mobj_phys *moph = to_mobj_phys(mobj);
78 
79 	if (!mem_type)
80 		return TEE_ERROR_GENERIC;
81 
82 	*mem_type = moph->mem_type;
83 	return TEE_SUCCESS;
84 }
85 
86 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
87 {
88 	struct mobj_phys *moph = to_mobj_phys(mobj);
89 	enum buf_is_attr a;
90 
91 	a = moph->battr;
92 
93 	switch (attr) {
94 	case CORE_MEM_SEC:
95 		return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
96 		       a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
97 	case CORE_MEM_NON_SEC:
98 		return a == CORE_MEM_NSEC_SHM;
99 	case CORE_MEM_TEE_RAM:
100 	case CORE_MEM_TA_RAM:
101 	case CORE_MEM_NSEC_SHM:
102 	case CORE_MEM_SDP_MEM:
103 		return attr == a;
104 	default:
105 		return false;
106 	}
107 }
108 
109 static void mobj_phys_free(struct mobj *mobj)
110 {
111 	struct mobj_phys *moph = to_mobj_phys(mobj);
112 
113 	free(moph);
114 }
115 
116 /*
117  * Note: this variable is weak just to ease breaking its dependency chain
118  * when added to the unpaged area.
119  */
120 const struct mobj_ops mobj_phys_ops
121 __weak __relrodata_unpaged("mobj_phys_ops") = {
122 	.get_va = mobj_phys_get_va,
123 	.get_pa = mobj_phys_get_pa,
124 	.get_phys_offs = NULL, /* only offset 0 */
125 	.get_mem_type = mobj_phys_get_mem_type,
126 	.matches = mobj_phys_matches,
127 	.free = mobj_phys_free,
128 };
129 
130 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
131 {
132 	assert(mobj->ops == &mobj_phys_ops);
133 	return container_of(mobj, struct mobj_phys, mobj);
134 }
135 
136 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t mem_type,
137 				   enum buf_is_attr battr,
138 				   enum teecore_memtypes area_type)
139 {
140 	void *va = NULL;
141 	struct mobj_phys *moph = NULL;
142 	struct tee_mmap_region *map = NULL;
143 
144 	if ((pa & CORE_MMU_USER_PARAM_MASK) ||
145 	    (size & CORE_MMU_USER_PARAM_MASK)) {
146 		DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
147 		return NULL;
148 	}
149 
150 	if (pa) {
151 		va = phys_to_virt(pa, area_type, size);
152 	} else {
153 		map = core_mmu_find_mapping_exclusive(area_type, size);
154 		if (!map)
155 			return NULL;
156 
157 		pa = map->pa;
158 		va = (void *)map->va;
159 	}
160 
161 	/* Only SDP memory may not have a virtual address */
162 	if (!va && battr != CORE_MEM_SDP_MEM)
163 		return NULL;
164 
165 	moph = calloc(1, sizeof(*moph));
166 	if (!moph)
167 		return NULL;
168 
169 	moph->battr = battr;
170 	moph->mem_type = mem_type;
171 	moph->mobj.size = size;
172 	moph->mobj.ops = &mobj_phys_ops;
173 	refcount_set(&moph->mobj.refc, 1);
174 	moph->pa = pa;
175 	moph->va = (vaddr_t)va;
176 
177 	return &moph->mobj;
178 }
179 
180 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t mem_type,
181 			     enum buf_is_attr battr)
182 {
183 	enum teecore_memtypes area_type;
184 
185 	switch (battr) {
186 	case CORE_MEM_NSEC_SHM:
187 		area_type = MEM_AREA_NSEC_SHM;
188 		break;
189 	case CORE_MEM_SDP_MEM:
190 		area_type = MEM_AREA_SDP_MEM;
191 		break;
192 	default:
193 		DMSG("can't allocate with specified attribute");
194 		return NULL;
195 	}
196 
197 	return mobj_phys_init(pa, size, mem_type, battr, area_type);
198 }
199 
200 /*
201  * mobj_virt implementation
202  */
203 
204 static void mobj_virt_assert_type(struct mobj *mobj);
205 
206 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset,
207 			      size_t len __maybe_unused)
208 {
209 	mobj_virt_assert_type(mobj);
210 	assert(mobj_check_offset_and_len(mobj, offset, len));
211 
212 	return (void *)(vaddr_t)offset;
213 }
214 
215 /*
216  * Note: this variable is weak just to ease breaking its dependency chain
217  * when added to the unpaged area.
218  */
219 const struct mobj_ops mobj_virt_ops
220 __weak __relrodata_unpaged("mobj_virt_ops") = {
221 	.get_va = mobj_virt_get_va,
222 };
223 
224 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
225 {
226 	assert(mobj->ops == &mobj_virt_ops);
227 }
228 
229 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
230 
231 /*
232  * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
233  * - it is physically contiguous.
234  * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
235  * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
236  *   generic CORE_MEM_NON_SEC.
237  */
238 
239 struct mobj_shm {
240 	struct mobj mobj;
241 	paddr_t pa;
242 	uint64_t cookie;
243 };
244 
245 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
246 
247 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
248 {
249 	struct mobj_shm *m = to_mobj_shm(mobj);
250 
251 	if (!mobj_check_offset_and_len(mobj, offset, len))
252 		return NULL;
253 
254 	return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM,
255 			    mobj->size - offset);
256 }
257 
258 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
259 				   size_t granule, paddr_t *pa)
260 {
261 	struct mobj_shm *m = to_mobj_shm(mobj);
262 	paddr_t p;
263 
264 	if (!pa || offs >= mobj->size)
265 		return TEE_ERROR_GENERIC;
266 
267 	p = m->pa + offs;
268 
269 	if (granule) {
270 		if (granule != SMALL_PAGE_SIZE &&
271 		    granule != CORE_MMU_PGDIR_SIZE)
272 			return TEE_ERROR_GENERIC;
273 		p &= ~(granule - 1);
274 	}
275 
276 	*pa = p;
277 	return TEE_SUCCESS;
278 }
279 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
280 
281 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
282 {
283 	assert(IS_POWER_OF_TWO(granule));
284 	return to_mobj_shm(mobj)->pa & (granule - 1);
285 }
286 
287 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
288 {
289 	return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
290 }
291 
292 static TEE_Result mobj_shm_get_mem_type(struct mobj *mobj __unused,
293 					uint32_t *mem_type)
294 {
295 	if (!mem_type)
296 		return TEE_ERROR_GENERIC;
297 
298 	*mem_type = TEE_MATTR_MEM_TYPE_CACHED;
299 
300 	return TEE_SUCCESS;
301 }
302 
303 static void mobj_shm_free(struct mobj *mobj)
304 {
305 	struct mobj_shm *m = to_mobj_shm(mobj);
306 
307 	free(m);
308 }
309 
310 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
311 {
312 	return to_mobj_shm(mobj)->cookie;
313 }
314 
315 /*
316  * Note: this variable is weak just to ease breaking its dependency chain
317  * when added to the unpaged area.
318  */
319 const struct mobj_ops mobj_shm_ops
320 __weak __relrodata_unpaged("mobj_shm_ops") = {
321 	.get_va = mobj_shm_get_va,
322 	.get_pa = mobj_shm_get_pa,
323 	.get_phys_offs = mobj_shm_get_phys_offs,
324 	.get_mem_type = mobj_shm_get_mem_type,
325 	.matches = mobj_shm_matches,
326 	.free = mobj_shm_free,
327 	.get_cookie = mobj_shm_get_cookie,
328 };
329 
330 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
331 {
332 	assert(mobj->ops == &mobj_shm_ops);
333 	return container_of(mobj, struct mobj_shm, mobj);
334 }
335 
336 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
337 {
338 	struct mobj_shm *m;
339 
340 	if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
341 		return NULL;
342 
343 	m = calloc(1, sizeof(*m));
344 	if (!m)
345 		return NULL;
346 
347 	m->mobj.size = size;
348 	m->mobj.ops = &mobj_shm_ops;
349 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
350 	refcount_set(&m->mobj.refc, 1);
351 	m->pa = pa;
352 	m->cookie = cookie;
353 
354 	return &m->mobj;
355 }
356 
357 struct mobj_with_fobj {
358 	struct fobj *fobj;
359 	struct file *file;
360 	struct mobj mobj;
361 	uint8_t mem_type;
362 };
363 
364 const struct mobj_ops mobj_with_fobj_ops;
365 
366 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file,
367 				  uint32_t mem_type)
368 {
369 	struct mobj_with_fobj *m = NULL;
370 
371 	assert(!(mem_type & ~TEE_MATTR_MEM_TYPE_MASK));
372 
373 	if (!fobj)
374 		return NULL;
375 	if (mem_type > UINT8_MAX)
376 		return NULL;
377 
378 	m = calloc(1, sizeof(*m));
379 	if (!m)
380 		return NULL;
381 
382 	m->mobj.ops = &mobj_with_fobj_ops;
383 	refcount_set(&m->mobj.refc, 1);
384 	m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
385 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
386 	m->fobj = fobj_get(fobj);
387 	m->file = file_get(file);
388 	m->mem_type = mem_type;
389 
390 	return &m->mobj;
391 }
392 
393 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
394 {
395 	assert(mobj && mobj->ops == &mobj_with_fobj_ops);
396 
397 	return container_of(mobj, struct mobj_with_fobj, mobj);
398 }
399 
400 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
401 				 enum buf_is_attr attr)
402 {
403 	assert(to_mobj_with_fobj(mobj));
404 
405 	/*
406 	 * All fobjs are supposed to be mapped secure so classify it as
407 	 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
408 	 * needed it can probably be carried in another way than to put the
409 	 * burden directly on fobj.
410 	 */
411 	return attr == CORE_MEM_SEC;
412 }
413 
414 static void mobj_with_fobj_free(struct mobj *mobj)
415 {
416 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
417 
418 	fobj_put(m->fobj);
419 	file_put(m->file);
420 	free(m);
421 }
422 
423 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
424 {
425 	return fobj_get(to_mobj_with_fobj(mobj)->fobj);
426 }
427 
428 static TEE_Result mobj_with_fobj_get_mem_type(struct mobj *mobj,
429 					      uint32_t *mem_type)
430 {
431 	struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
432 
433 	if (!mem_type)
434 		return TEE_ERROR_GENERIC;
435 
436 	*mem_type = m->mem_type;
437 
438 	return TEE_SUCCESS;
439 }
440 
441 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
442 					size_t granule, paddr_t *pa)
443 {
444 	struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
445 	paddr_t p = 0;
446 
447 	if (!f->fobj->ops->get_pa) {
448 		assert(mobj_is_paged(mobj));
449 		return TEE_ERROR_NOT_SUPPORTED;
450 	}
451 
452 	p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
453 	    offs % SMALL_PAGE_SIZE;
454 
455 	if (granule) {
456 		if (granule != SMALL_PAGE_SIZE &&
457 		    granule != CORE_MMU_PGDIR_SIZE)
458 			return TEE_ERROR_GENERIC;
459 		p &= ~(granule - 1);
460 	}
461 
462 	*pa = p;
463 
464 	return TEE_SUCCESS;
465 }
466 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
467 
468 /*
469  * Note: this variable is weak just to ease breaking its dependency chain
470  * when added to the unpaged area.
471  */
472 const struct mobj_ops mobj_with_fobj_ops
473 __weak __relrodata_unpaged("mobj_with_fobj_ops") = {
474 	.matches = mobj_with_fobj_matches,
475 	.free = mobj_with_fobj_free,
476 	.get_fobj = mobj_with_fobj_get_fobj,
477 	.get_mem_type = mobj_with_fobj_get_mem_type,
478 	.get_pa = mobj_with_fobj_get_pa,
479 };
480 
481 #ifdef CFG_PAGED_USER_TA
482 bool mobj_is_paged(struct mobj *mobj)
483 {
484 	if (mobj->ops == &mobj_with_fobj_ops &&
485 	    !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
486 		return true;
487 
488 	return false;
489 }
490 #endif /*CFG_PAGED_USER_TA*/
491 
492 static TEE_Result mobj_init(void)
493 {
494 	if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
495 		mobj_tee_ram_rx = mobj_phys_init(0,
496 						 VCORE_UNPG_RX_SZ,
497 						 TEE_MATTR_MEM_TYPE_CACHED,
498 						 CORE_MEM_TEE_RAM,
499 						 MEM_AREA_TEE_RAM_RX);
500 		if (!mobj_tee_ram_rx)
501 			panic("Failed to register tee ram rx");
502 
503 		mobj_tee_ram_rw = mobj_phys_init(0,
504 						 VCORE_UNPG_RW_SZ,
505 						 TEE_MATTR_MEM_TYPE_CACHED,
506 						 CORE_MEM_TEE_RAM,
507 						 MEM_AREA_TEE_RAM_RW_DATA);
508 		if (!mobj_tee_ram_rw)
509 			panic("Failed to register tee ram rw");
510 	} else {
511 		mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START,
512 						 VCORE_UNPG_RW_PA +
513 						 VCORE_UNPG_RW_SZ -
514 						 VCORE_START_VA,
515 						 TEE_MATTR_MEM_TYPE_CACHED,
516 						 CORE_MEM_TEE_RAM,
517 						 MEM_AREA_TEE_RAM_RW_DATA);
518 		if (!mobj_tee_ram_rw)
519 			panic("Failed to register tee ram");
520 
521 		mobj_tee_ram_rx = mobj_tee_ram_rw;
522 	}
523 
524 	return TEE_SUCCESS;
525 }
526 
527 driver_init_late(mobj_init);
528