1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016-2022, Linaro Limited
4 */
5
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/linker.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <mm/tee_pager.h>
18 #include <optee_msg.h>
19 #include <stdlib.h>
20 #include <tee_api_types.h>
21 #include <types_ext.h>
22 #include <util.h>
23
24 struct mobj *mobj_tee_ram_rx;
25 struct mobj *mobj_tee_ram_rw;
26
27 /*
28 * mobj_phys implementation
29 */
30
31 struct mobj_phys {
32 struct mobj mobj;
33 enum buf_is_attr battr;
34 /* Defined by TEE_MATTR_MEM_TYPE_* in tee_mmu_types.h */
35 uint32_t mem_type;
36 vaddr_t va;
37 paddr_t pa;
38 };
39
40 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
41
mobj_phys_get_va(struct mobj * mobj,size_t offset,size_t len)42 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len)
43 {
44 struct mobj_phys *moph = to_mobj_phys(mobj);
45
46 if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len))
47 return NULL;
48
49 return (void *)(moph->va + offset);
50 }
51
mobj_phys_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)52 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
53 size_t granule, paddr_t *pa)
54 {
55 struct mobj_phys *moph = to_mobj_phys(mobj);
56 paddr_t p;
57
58 if (!pa)
59 return TEE_ERROR_GENERIC;
60
61 p = moph->pa + offs;
62
63 if (granule) {
64 if (granule != SMALL_PAGE_SIZE &&
65 granule != CORE_MMU_PGDIR_SIZE)
66 return TEE_ERROR_GENERIC;
67 p &= ~(granule - 1);
68 }
69
70 *pa = p;
71 return TEE_SUCCESS;
72 }
73 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
74
mobj_phys_get_mem_type(struct mobj * mobj,uint32_t * mem_type)75 static TEE_Result mobj_phys_get_mem_type(struct mobj *mobj, uint32_t *mem_type)
76 {
77 struct mobj_phys *moph = to_mobj_phys(mobj);
78
79 if (!mem_type)
80 return TEE_ERROR_GENERIC;
81
82 *mem_type = moph->mem_type;
83 return TEE_SUCCESS;
84 }
85
mobj_phys_matches(struct mobj * mobj,enum buf_is_attr attr)86 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
87 {
88 struct mobj_phys *moph = to_mobj_phys(mobj);
89 enum buf_is_attr a;
90
91 a = moph->battr;
92
93 switch (attr) {
94 case CORE_MEM_SEC:
95 return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
96 a == CORE_MEM_SDP_MEM;
97 case CORE_MEM_NON_SEC:
98 return a == CORE_MEM_NSEC_SHM;
99 case CORE_MEM_TEE_RAM:
100 case CORE_MEM_NSEC_SHM:
101 case CORE_MEM_SDP_MEM:
102 return attr == a;
103 default:
104 return false;
105 }
106 }
107
mobj_phys_free(struct mobj * mobj)108 static void mobj_phys_free(struct mobj *mobj)
109 {
110 struct mobj_phys *moph = to_mobj_phys(mobj);
111
112 free(moph);
113 }
114
115 /*
116 * Note: this variable is weak just to ease breaking its dependency chain
117 * when added to the unpaged area.
118 */
119 const struct mobj_ops mobj_phys_ops
120 __weak __relrodata_unpaged("mobj_phys_ops") = {
121 .get_va = mobj_phys_get_va,
122 .get_pa = mobj_phys_get_pa,
123 .get_phys_offs = NULL, /* only offset 0 */
124 .get_mem_type = mobj_phys_get_mem_type,
125 .matches = mobj_phys_matches,
126 .free = mobj_phys_free,
127 };
128
to_mobj_phys(struct mobj * mobj)129 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
130 {
131 assert(mobj->ops == &mobj_phys_ops);
132 return container_of(mobj, struct mobj_phys, mobj);
133 }
134
mobj_phys_init(paddr_t pa,size_t size,uint32_t mem_type,enum buf_is_attr battr,enum teecore_memtypes area_type)135 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t mem_type,
136 enum buf_is_attr battr,
137 enum teecore_memtypes area_type)
138 {
139 void *va = NULL;
140 struct mobj_phys *moph = NULL;
141 struct tee_mmap_region *map = NULL;
142
143 if ((pa & CORE_MMU_USER_PARAM_MASK) ||
144 (size & CORE_MMU_USER_PARAM_MASK)) {
145 DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
146 return NULL;
147 }
148
149 if (pa) {
150 va = phys_to_virt(pa, area_type, size);
151 } else {
152 map = core_mmu_find_mapping_exclusive(area_type, size);
153 if (!map)
154 return NULL;
155
156 pa = map->pa;
157 va = (void *)map->va;
158 }
159
160 /* Only SDP memory may not have a virtual address */
161 if (!va && battr != CORE_MEM_SDP_MEM)
162 return NULL;
163
164 moph = calloc(1, sizeof(*moph));
165 if (!moph)
166 return NULL;
167
168 moph->battr = battr;
169 moph->mem_type = mem_type;
170 moph->mobj.size = size;
171 moph->mobj.ops = &mobj_phys_ops;
172 refcount_set(&moph->mobj.refc, 1);
173 moph->pa = pa;
174 moph->va = (vaddr_t)va;
175
176 return &moph->mobj;
177 }
178
mobj_phys_alloc(paddr_t pa,size_t size,uint32_t mem_type,enum buf_is_attr battr)179 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t mem_type,
180 enum buf_is_attr battr)
181 {
182 enum teecore_memtypes area_type;
183
184 switch (battr) {
185 case CORE_MEM_NSEC_SHM:
186 area_type = MEM_AREA_NSEC_SHM;
187 break;
188 case CORE_MEM_SDP_MEM:
189 area_type = MEM_AREA_SDP_MEM;
190 break;
191 default:
192 DMSG("can't allocate with specified attribute");
193 return NULL;
194 }
195
196 return mobj_phys_init(pa, size, mem_type, battr, area_type);
197 }
198
199 /*
200 * mobj_virt implementation
201 */
202
203 static void mobj_virt_assert_type(struct mobj *mobj);
204
mobj_virt_get_va(struct mobj * mobj,size_t offset,size_t len __maybe_unused)205 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset,
206 size_t len __maybe_unused)
207 {
208 mobj_virt_assert_type(mobj);
209 assert(mobj_check_offset_and_len(mobj, offset, len));
210
211 return (void *)(vaddr_t)offset;
212 }
213
214 /*
215 * Note: this variable is weak just to ease breaking its dependency chain
216 * when added to the unpaged area.
217 */
218 const struct mobj_ops mobj_virt_ops
219 __weak __relrodata_unpaged("mobj_virt_ops") = {
220 .get_va = mobj_virt_get_va,
221 };
222
mobj_virt_assert_type(struct mobj * mobj __maybe_unused)223 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
224 {
225 assert(mobj->ops == &mobj_virt_ops);
226 }
227
228 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
229
230 /*
231 * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
232 * - it is physically contiguous.
233 * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
234 * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
235 * generic CORE_MEM_NON_SEC.
236 */
237
238 struct mobj_shm {
239 struct mobj mobj;
240 paddr_t pa;
241 uint64_t cookie;
242 };
243
244 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
245
mobj_shm_get_va(struct mobj * mobj,size_t offset,size_t len)246 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
247 {
248 struct mobj_shm *m = to_mobj_shm(mobj);
249
250 if (!mobj_check_offset_and_len(mobj, offset, len))
251 return NULL;
252
253 return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM,
254 mobj->size - offset);
255 }
256
mobj_shm_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)257 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
258 size_t granule, paddr_t *pa)
259 {
260 struct mobj_shm *m = to_mobj_shm(mobj);
261 paddr_t p;
262
263 if (!pa || offs >= mobj->size)
264 return TEE_ERROR_GENERIC;
265
266 p = m->pa + offs;
267
268 if (granule) {
269 if (granule != SMALL_PAGE_SIZE &&
270 granule != CORE_MMU_PGDIR_SIZE)
271 return TEE_ERROR_GENERIC;
272 p &= ~(granule - 1);
273 }
274
275 *pa = p;
276 return TEE_SUCCESS;
277 }
278 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
279
mobj_shm_get_phys_offs(struct mobj * mobj,size_t granule)280 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
281 {
282 assert(IS_POWER_OF_TWO(granule));
283 return to_mobj_shm(mobj)->pa & (granule - 1);
284 }
285
mobj_shm_matches(struct mobj * mobj __unused,enum buf_is_attr attr)286 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
287 {
288 return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
289 }
290
mobj_shm_get_mem_type(struct mobj * mobj __unused,uint32_t * mem_type)291 static TEE_Result mobj_shm_get_mem_type(struct mobj *mobj __unused,
292 uint32_t *mem_type)
293 {
294 if (!mem_type)
295 return TEE_ERROR_GENERIC;
296
297 *mem_type = TEE_MATTR_MEM_TYPE_CACHED;
298
299 return TEE_SUCCESS;
300 }
301
mobj_shm_free(struct mobj * mobj)302 static void mobj_shm_free(struct mobj *mobj)
303 {
304 struct mobj_shm *m = to_mobj_shm(mobj);
305
306 free(m);
307 }
308
mobj_shm_get_cookie(struct mobj * mobj)309 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
310 {
311 return to_mobj_shm(mobj)->cookie;
312 }
313
314 /*
315 * Note: this variable is weak just to ease breaking its dependency chain
316 * when added to the unpaged area.
317 */
318 const struct mobj_ops mobj_shm_ops
319 __weak __relrodata_unpaged("mobj_shm_ops") = {
320 .get_va = mobj_shm_get_va,
321 .get_pa = mobj_shm_get_pa,
322 .get_phys_offs = mobj_shm_get_phys_offs,
323 .get_mem_type = mobj_shm_get_mem_type,
324 .matches = mobj_shm_matches,
325 .free = mobj_shm_free,
326 .get_cookie = mobj_shm_get_cookie,
327 };
328
to_mobj_shm(struct mobj * mobj)329 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
330 {
331 assert(mobj->ops == &mobj_shm_ops);
332 return container_of(mobj, struct mobj_shm, mobj);
333 }
334
mobj_shm_alloc(paddr_t pa,size_t size,uint64_t cookie)335 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
336 {
337 struct mobj_shm *m;
338
339 if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
340 return NULL;
341
342 m = calloc(1, sizeof(*m));
343 if (!m)
344 return NULL;
345
346 m->mobj.size = size;
347 m->mobj.ops = &mobj_shm_ops;
348 m->mobj.phys_granule = SMALL_PAGE_SIZE;
349 refcount_set(&m->mobj.refc, 1);
350 m->pa = pa;
351 m->cookie = cookie;
352
353 return &m->mobj;
354 }
355
356 struct mobj_with_fobj {
357 struct fobj *fobj;
358 struct file *file;
359 struct mobj mobj;
360 uint8_t mem_type;
361 };
362
363 const struct mobj_ops mobj_with_fobj_ops;
364
mobj_with_fobj_alloc(struct fobj * fobj,struct file * file,uint32_t mem_type)365 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file,
366 uint32_t mem_type)
367 {
368 struct mobj_with_fobj *m = NULL;
369
370 assert(!(mem_type & ~TEE_MATTR_MEM_TYPE_MASK));
371
372 if (!fobj)
373 return NULL;
374 if (mem_type > UINT8_MAX)
375 return NULL;
376
377 m = calloc(1, sizeof(*m));
378 if (!m)
379 return NULL;
380
381 m->mobj.ops = &mobj_with_fobj_ops;
382 refcount_set(&m->mobj.refc, 1);
383 m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
384 m->mobj.phys_granule = SMALL_PAGE_SIZE;
385 m->fobj = fobj_get(fobj);
386 m->file = file_get(file);
387 m->mem_type = mem_type;
388
389 return &m->mobj;
390 }
391
to_mobj_with_fobj(struct mobj * mobj)392 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
393 {
394 assert(mobj && mobj->ops == &mobj_with_fobj_ops);
395
396 return container_of(mobj, struct mobj_with_fobj, mobj);
397 }
398
mobj_with_fobj_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)399 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
400 enum buf_is_attr attr)
401 {
402 assert(to_mobj_with_fobj(mobj));
403
404 /*
405 * All fobjs are supposed to be mapped secure so classify it as
406 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
407 * needed it can probably be carried in another way than to put the
408 * burden directly on fobj.
409 */
410 return attr == CORE_MEM_SEC;
411 }
412
mobj_with_fobj_free(struct mobj * mobj)413 static void mobj_with_fobj_free(struct mobj *mobj)
414 {
415 struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
416
417 fobj_put(m->fobj);
418 file_put(m->file);
419 free(m);
420 }
421
mobj_with_fobj_get_fobj(struct mobj * mobj)422 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
423 {
424 return fobj_get(to_mobj_with_fobj(mobj)->fobj);
425 }
426
mobj_with_fobj_get_mem_type(struct mobj * mobj,uint32_t * mem_type)427 static TEE_Result mobj_with_fobj_get_mem_type(struct mobj *mobj,
428 uint32_t *mem_type)
429 {
430 struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
431
432 if (!mem_type)
433 return TEE_ERROR_GENERIC;
434
435 *mem_type = m->mem_type;
436
437 return TEE_SUCCESS;
438 }
439
mobj_with_fobj_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)440 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
441 size_t granule, paddr_t *pa)
442 {
443 struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
444 paddr_t p = 0;
445
446 if (!f->fobj->ops->get_pa) {
447 assert(mobj_is_paged(mobj));
448 return TEE_ERROR_NOT_SUPPORTED;
449 }
450
451 p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
452 offs % SMALL_PAGE_SIZE;
453
454 if (granule) {
455 if (granule != SMALL_PAGE_SIZE &&
456 granule != CORE_MMU_PGDIR_SIZE)
457 return TEE_ERROR_GENERIC;
458 p &= ~(granule - 1);
459 }
460
461 *pa = p;
462
463 return TEE_SUCCESS;
464 }
465 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
466
467 /*
468 * Note: this variable is weak just to ease breaking its dependency chain
469 * when added to the unpaged area.
470 */
471 const struct mobj_ops mobj_with_fobj_ops
472 __weak __relrodata_unpaged("mobj_with_fobj_ops") = {
473 .matches = mobj_with_fobj_matches,
474 .free = mobj_with_fobj_free,
475 .get_fobj = mobj_with_fobj_get_fobj,
476 .get_mem_type = mobj_with_fobj_get_mem_type,
477 .get_pa = mobj_with_fobj_get_pa,
478 };
479
480 #ifdef CFG_PAGED_USER_TA
mobj_is_paged(struct mobj * mobj)481 bool mobj_is_paged(struct mobj *mobj)
482 {
483 if (mobj->ops == &mobj_with_fobj_ops &&
484 !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
485 return true;
486
487 return false;
488 }
489 #endif /*CFG_PAGED_USER_TA*/
490
mobj_init(void)491 static TEE_Result mobj_init(void)
492 {
493 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
494 mobj_tee_ram_rx = mobj_phys_init(0,
495 VCORE_UNPG_RX_SZ,
496 TEE_MATTR_MEM_TYPE_CACHED,
497 CORE_MEM_TEE_RAM,
498 MEM_AREA_TEE_RAM_RX);
499 if (!mobj_tee_ram_rx)
500 panic("Failed to register tee ram rx");
501
502 mobj_tee_ram_rw = mobj_phys_init(0,
503 VCORE_UNPG_RW_SZ,
504 TEE_MATTR_MEM_TYPE_CACHED,
505 CORE_MEM_TEE_RAM,
506 MEM_AREA_TEE_RAM_RW_DATA);
507 if (!mobj_tee_ram_rw)
508 panic("Failed to register tee ram rw");
509 } else {
510 mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START,
511 VCORE_UNPG_RW_PA +
512 VCORE_UNPG_RW_SZ -
513 VCORE_START_VA,
514 TEE_MATTR_MEM_TYPE_CACHED,
515 CORE_MEM_TEE_RAM,
516 MEM_AREA_TEE_RAM_RW_DATA);
517 if (!mobj_tee_ram_rw)
518 panic("Failed to register tee ram");
519
520 mobj_tee_ram_rx = mobj_tee_ram_rw;
521 }
522
523 return TEE_SUCCESS;
524 }
525
526 driver_init_late(mobj_init);
527
528 #if defined(CFG_CORE_DYN_PROTMEM)
529 #if defined(CFG_INSECURE)
530 /*
531 * This function requires CFG_INSECURE=y since it's stubbed and could cause
532 * a silent security error.
533 */
plat_set_protmem_range(enum mobj_use_case use_case __unused,paddr_t pa __unused,paddr_size_t sz __unused)534 TEE_Result __weak plat_set_protmem_range(enum mobj_use_case use_case __unused,
535 paddr_t pa __unused,
536 paddr_size_t sz __unused)
537 {
538 static bool once;
539
540 if (!once) {
541 IMSG("WARNING (insecure configuration): platform does not support to dynamically protected memory");
542 once = true;
543 }
544
545 return TEE_SUCCESS;
546 }
547 #endif /*defined(CFG_INSECURE)*/
548
plat_get_protmem_config(enum mobj_use_case use_case,size_t * min_mem_sz,size_t * min_mem_align)549 TEE_Result __weak plat_get_protmem_config(enum mobj_use_case use_case,
550 size_t *min_mem_sz,
551 size_t *min_mem_align)
552 {
553 if (use_case != MOBJ_USE_CASE_SEC_VIDEO_PLAY)
554 return TEE_ERROR_BAD_PARAMETERS;
555
556 /*
557 * This memory is only shared with OP-TEE and TAs, a dummy
558 * configuration for now. This will be configured by platform
559 * specific function.
560 */
561 *min_mem_sz = SIZE_1M;
562 *min_mem_align = SMALL_PAGE_SIZE;
563
564 return TEE_SUCCESS;
565 }
566 #endif /*defined(CFG_CORE_DYN_PROTMEM)*/
567