1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2022, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <config.h> 8 #include <initcall.h> 9 #include <keep.h> 10 #include <kernel/linker.h> 11 #include <kernel/mutex.h> 12 #include <kernel/panic.h> 13 #include <kernel/refcount.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/tee_misc.h> 16 #include <mm/core_mmu.h> 17 #include <mm/mobj.h> 18 #include <mm/tee_pager.h> 19 #include <mm/vm.h> 20 #include <optee_msg.h> 21 #include <stdlib.h> 22 #include <tee_api_types.h> 23 #include <types_ext.h> 24 #include <util.h> 25 26 struct mobj *mobj_sec_ddr; 27 struct mobj *mobj_tee_ram_rx; 28 struct mobj *mobj_tee_ram_rw; 29 30 /* 31 * mobj_phys implementation 32 */ 33 34 struct mobj_phys { 35 struct mobj mobj; 36 enum buf_is_attr battr; 37 /* Defined by TEE_MATTR_MEM_TYPE_* in tee_mmu_types.h */ 38 uint32_t mem_type; 39 vaddr_t va; 40 paddr_t pa; 41 }; 42 43 static struct mobj_phys *to_mobj_phys(struct mobj *mobj); 44 45 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len) 46 { 47 struct mobj_phys *moph = to_mobj_phys(mobj); 48 49 if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len)) 50 return NULL; 51 52 return (void *)(moph->va + offset); 53 } 54 55 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs, 56 size_t granule, paddr_t *pa) 57 { 58 struct mobj_phys *moph = to_mobj_phys(mobj); 59 paddr_t p; 60 61 if (!pa) 62 return TEE_ERROR_GENERIC; 63 64 p = moph->pa + offs; 65 66 if (granule) { 67 if (granule != SMALL_PAGE_SIZE && 68 granule != CORE_MMU_PGDIR_SIZE) 69 return TEE_ERROR_GENERIC; 70 p &= ~(granule - 1); 71 } 72 73 *pa = p; 74 return TEE_SUCCESS; 75 } 76 DECLARE_KEEP_PAGER(mobj_phys_get_pa); 77 78 static TEE_Result mobj_phys_get_mem_type(struct mobj *mobj, uint32_t *mem_type) 79 { 80 struct mobj_phys *moph = to_mobj_phys(mobj); 81 82 if (!mem_type) 83 return TEE_ERROR_GENERIC; 84 85 *mem_type = moph->mem_type; 86 return TEE_SUCCESS; 87 } 88 89 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr) 90 { 91 struct mobj_phys *moph = to_mobj_phys(mobj); 92 enum buf_is_attr a; 93 94 a = moph->battr; 95 96 switch (attr) { 97 case CORE_MEM_SEC: 98 return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM || 99 a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM; 100 case CORE_MEM_NON_SEC: 101 return a == CORE_MEM_NSEC_SHM; 102 case CORE_MEM_TEE_RAM: 103 case CORE_MEM_TA_RAM: 104 case CORE_MEM_NSEC_SHM: 105 case CORE_MEM_SDP_MEM: 106 return attr == a; 107 default: 108 return false; 109 } 110 } 111 112 static void mobj_phys_free(struct mobj *mobj) 113 { 114 struct mobj_phys *moph = to_mobj_phys(mobj); 115 116 free(moph); 117 } 118 119 /* 120 * Note: this variable is weak just to ease breaking its dependency chain 121 * when added to the unpaged area. 122 */ 123 const struct mobj_ops mobj_phys_ops 124 __weak __relrodata_unpaged("mobj_phys_ops") = { 125 .get_va = mobj_phys_get_va, 126 .get_pa = mobj_phys_get_pa, 127 .get_phys_offs = NULL, /* only offset 0 */ 128 .get_mem_type = mobj_phys_get_mem_type, 129 .matches = mobj_phys_matches, 130 .free = mobj_phys_free, 131 }; 132 133 static struct mobj_phys *to_mobj_phys(struct mobj *mobj) 134 { 135 assert(mobj->ops == &mobj_phys_ops); 136 return container_of(mobj, struct mobj_phys, mobj); 137 } 138 139 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t mem_type, 140 enum buf_is_attr battr, 141 enum teecore_memtypes area_type) 142 { 143 void *va = NULL; 144 struct mobj_phys *moph = NULL; 145 struct tee_mmap_region *map = NULL; 146 147 if ((pa & CORE_MMU_USER_PARAM_MASK) || 148 (size & CORE_MMU_USER_PARAM_MASK)) { 149 DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE); 150 return NULL; 151 } 152 153 if (pa) { 154 va = phys_to_virt(pa, area_type, size); 155 } else { 156 map = core_mmu_find_mapping_exclusive(area_type, size); 157 if (!map) 158 return NULL; 159 160 pa = map->pa; 161 va = (void *)map->va; 162 } 163 164 /* Only SDP memory may not have a virtual address */ 165 if (!va && battr != CORE_MEM_SDP_MEM) 166 return NULL; 167 168 moph = calloc(1, sizeof(*moph)); 169 if (!moph) 170 return NULL; 171 172 moph->battr = battr; 173 moph->mem_type = mem_type; 174 moph->mobj.size = size; 175 moph->mobj.ops = &mobj_phys_ops; 176 refcount_set(&moph->mobj.refc, 1); 177 moph->pa = pa; 178 moph->va = (vaddr_t)va; 179 180 return &moph->mobj; 181 } 182 183 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t mem_type, 184 enum buf_is_attr battr) 185 { 186 enum teecore_memtypes area_type; 187 188 switch (battr) { 189 case CORE_MEM_TEE_RAM: 190 area_type = MEM_AREA_TEE_RAM_RW_DATA; 191 break; 192 case CORE_MEM_TA_RAM: 193 area_type = MEM_AREA_TA_RAM; 194 break; 195 case CORE_MEM_NSEC_SHM: 196 area_type = MEM_AREA_NSEC_SHM; 197 break; 198 case CORE_MEM_SDP_MEM: 199 area_type = MEM_AREA_SDP_MEM; 200 break; 201 default: 202 DMSG("can't allocate with specified attribute"); 203 return NULL; 204 } 205 206 return mobj_phys_init(pa, size, mem_type, battr, area_type); 207 } 208 209 /* 210 * mobj_virt implementation 211 */ 212 213 static void mobj_virt_assert_type(struct mobj *mobj); 214 215 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset, 216 size_t len __maybe_unused) 217 { 218 mobj_virt_assert_type(mobj); 219 assert(mobj_check_offset_and_len(mobj, offset, len)); 220 221 return (void *)(vaddr_t)offset; 222 } 223 224 /* 225 * Note: this variable is weak just to ease breaking its dependency chain 226 * when added to the unpaged area. 227 */ 228 const struct mobj_ops mobj_virt_ops 229 __weak __relrodata_unpaged("mobj_virt_ops") = { 230 .get_va = mobj_virt_get_va, 231 }; 232 233 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused) 234 { 235 assert(mobj->ops == &mobj_virt_ops); 236 } 237 238 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX }; 239 240 /* 241 * mobj_mm implementation 242 */ 243 244 struct mobj_mm { 245 tee_mm_entry_t *mm; 246 struct mobj *parent_mobj; 247 struct mobj mobj; 248 }; 249 250 static struct mobj_mm *to_mobj_mm(struct mobj *mobj); 251 252 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs) 253 { 254 tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm; 255 256 return (mm->offset << mm->pool->shift) + offs; 257 } 258 259 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs, size_t len) 260 { 261 return mobj_get_va(to_mobj_mm(mobj)->parent_mobj, 262 mobj_mm_offs(mobj, offs), len); 263 } 264 265 266 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs, 267 size_t granule, paddr_t *pa) 268 { 269 return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj, 270 mobj_mm_offs(mobj, offs), granule, pa); 271 } 272 DECLARE_KEEP_PAGER(mobj_mm_get_pa); 273 274 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule) 275 { 276 return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule); 277 } 278 279 static TEE_Result mobj_mm_get_mem_type(struct mobj *mobj, uint32_t *mem_type) 280 { 281 return mobj_get_mem_type(to_mobj_mm(mobj)->parent_mobj, mem_type); 282 } 283 284 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr) 285 { 286 return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr); 287 } 288 289 static void mobj_mm_free(struct mobj *mobj) 290 { 291 struct mobj_mm *m = to_mobj_mm(mobj); 292 293 tee_mm_free(m->mm); 294 free(m); 295 } 296 297 /* 298 * Note: this variable is weak just to ease breaking its dependency chain 299 * when added to the unpaged area. 300 */ 301 const struct mobj_ops mobj_mm_ops __weak __relrodata_unpaged("mobj_mm_ops") = { 302 .get_va = mobj_mm_get_va, 303 .get_pa = mobj_mm_get_pa, 304 .get_phys_offs = mobj_mm_get_phys_offs, 305 .get_mem_type = mobj_mm_get_mem_type, 306 .matches = mobj_mm_matches, 307 .free = mobj_mm_free, 308 }; 309 310 static struct mobj_mm *to_mobj_mm(struct mobj *mobj) 311 { 312 assert(mobj->ops == &mobj_mm_ops); 313 return container_of(mobj, struct mobj_mm, mobj); 314 } 315 316 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size, 317 tee_mm_pool_t *pool) 318 { 319 struct mobj_mm *m = calloc(1, sizeof(*m)); 320 321 if (!m) 322 return NULL; 323 324 m->mm = tee_mm_alloc(pool, size); 325 if (!m->mm) { 326 free(m); 327 return NULL; 328 } 329 330 m->parent_mobj = mobj_parent; 331 m->mobj.size = size; 332 m->mobj.ops = &mobj_mm_ops; 333 refcount_set(&m->mobj.refc, 1); 334 335 return &m->mobj; 336 } 337 338 339 /* 340 * mobj_shm implementation. mobj_shm represents buffer in predefined shm region 341 * - it is physically contiguous. 342 * - it is identified in static physical layout as MEM_AREA_NSEC_SHM. 343 * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure 344 * generic CORE_MEM_NON_SEC. 345 */ 346 347 struct mobj_shm { 348 struct mobj mobj; 349 paddr_t pa; 350 uint64_t cookie; 351 }; 352 353 static struct mobj_shm *to_mobj_shm(struct mobj *mobj); 354 355 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len) 356 { 357 struct mobj_shm *m = to_mobj_shm(mobj); 358 359 if (!mobj_check_offset_and_len(mobj, offset, len)) 360 return NULL; 361 362 return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM, 363 mobj->size - offset); 364 } 365 366 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs, 367 size_t granule, paddr_t *pa) 368 { 369 struct mobj_shm *m = to_mobj_shm(mobj); 370 paddr_t p; 371 372 if (!pa || offs >= mobj->size) 373 return TEE_ERROR_GENERIC; 374 375 p = m->pa + offs; 376 377 if (granule) { 378 if (granule != SMALL_PAGE_SIZE && 379 granule != CORE_MMU_PGDIR_SIZE) 380 return TEE_ERROR_GENERIC; 381 p &= ~(granule - 1); 382 } 383 384 *pa = p; 385 return TEE_SUCCESS; 386 } 387 DECLARE_KEEP_PAGER(mobj_shm_get_pa); 388 389 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule) 390 { 391 assert(IS_POWER_OF_TWO(granule)); 392 return to_mobj_shm(mobj)->pa & (granule - 1); 393 } 394 395 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr) 396 { 397 return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC; 398 } 399 400 static TEE_Result mobj_shm_get_mem_type(struct mobj *mobj __unused, 401 uint32_t *mem_type) 402 { 403 if (!mem_type) 404 return TEE_ERROR_GENERIC; 405 406 *mem_type = TEE_MATTR_MEM_TYPE_CACHED; 407 408 return TEE_SUCCESS; 409 } 410 411 static void mobj_shm_free(struct mobj *mobj) 412 { 413 struct mobj_shm *m = to_mobj_shm(mobj); 414 415 free(m); 416 } 417 418 static uint64_t mobj_shm_get_cookie(struct mobj *mobj) 419 { 420 return to_mobj_shm(mobj)->cookie; 421 } 422 423 /* 424 * Note: this variable is weak just to ease breaking its dependency chain 425 * when added to the unpaged area. 426 */ 427 const struct mobj_ops mobj_shm_ops 428 __weak __relrodata_unpaged("mobj_shm_ops") = { 429 .get_va = mobj_shm_get_va, 430 .get_pa = mobj_shm_get_pa, 431 .get_phys_offs = mobj_shm_get_phys_offs, 432 .get_mem_type = mobj_shm_get_mem_type, 433 .matches = mobj_shm_matches, 434 .free = mobj_shm_free, 435 .get_cookie = mobj_shm_get_cookie, 436 }; 437 438 static struct mobj_shm *to_mobj_shm(struct mobj *mobj) 439 { 440 assert(mobj->ops == &mobj_shm_ops); 441 return container_of(mobj, struct mobj_shm, mobj); 442 } 443 444 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie) 445 { 446 struct mobj_shm *m; 447 448 if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size)) 449 return NULL; 450 451 m = calloc(1, sizeof(*m)); 452 if (!m) 453 return NULL; 454 455 m->mobj.size = size; 456 m->mobj.ops = &mobj_shm_ops; 457 m->mobj.phys_granule = SMALL_PAGE_SIZE; 458 refcount_set(&m->mobj.refc, 1); 459 m->pa = pa; 460 m->cookie = cookie; 461 462 return &m->mobj; 463 } 464 465 struct mobj_with_fobj { 466 struct fobj *fobj; 467 struct file *file; 468 struct mobj mobj; 469 uint8_t mem_type; 470 }; 471 472 const struct mobj_ops mobj_with_fobj_ops; 473 474 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file, 475 uint32_t mem_type) 476 { 477 struct mobj_with_fobj *m = NULL; 478 479 assert(!(mem_type & ~TEE_MATTR_MEM_TYPE_MASK)); 480 481 if (!fobj) 482 return NULL; 483 if (mem_type > UINT8_MAX) 484 return NULL; 485 486 m = calloc(1, sizeof(*m)); 487 if (!m) 488 return NULL; 489 490 m->mobj.ops = &mobj_with_fobj_ops; 491 refcount_set(&m->mobj.refc, 1); 492 m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE; 493 m->mobj.phys_granule = SMALL_PAGE_SIZE; 494 m->fobj = fobj_get(fobj); 495 m->file = file_get(file); 496 m->mem_type = mem_type; 497 498 return &m->mobj; 499 } 500 501 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj) 502 { 503 assert(mobj && mobj->ops == &mobj_with_fobj_ops); 504 505 return container_of(mobj, struct mobj_with_fobj, mobj); 506 } 507 508 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused, 509 enum buf_is_attr attr) 510 { 511 assert(to_mobj_with_fobj(mobj)); 512 513 /* 514 * All fobjs are supposed to be mapped secure so classify it as 515 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information 516 * needed it can probably be carried in another way than to put the 517 * burden directly on fobj. 518 */ 519 return attr == CORE_MEM_SEC; 520 } 521 522 static void mobj_with_fobj_free(struct mobj *mobj) 523 { 524 struct mobj_with_fobj *m = to_mobj_with_fobj(mobj); 525 526 fobj_put(m->fobj); 527 file_put(m->file); 528 free(m); 529 } 530 531 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj) 532 { 533 return fobj_get(to_mobj_with_fobj(mobj)->fobj); 534 } 535 536 static TEE_Result mobj_with_fobj_get_mem_type(struct mobj *mobj, 537 uint32_t *mem_type) 538 { 539 struct mobj_with_fobj *m = to_mobj_with_fobj(mobj); 540 541 if (!mem_type) 542 return TEE_ERROR_GENERIC; 543 544 *mem_type = m->mem_type; 545 546 return TEE_SUCCESS; 547 } 548 549 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs, 550 size_t granule, paddr_t *pa) 551 { 552 struct mobj_with_fobj *f = to_mobj_with_fobj(mobj); 553 paddr_t p = 0; 554 555 if (!f->fobj->ops->get_pa) { 556 assert(mobj_is_paged(mobj)); 557 return TEE_ERROR_NOT_SUPPORTED; 558 } 559 560 p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) + 561 offs % SMALL_PAGE_SIZE; 562 563 if (granule) { 564 if (granule != SMALL_PAGE_SIZE && 565 granule != CORE_MMU_PGDIR_SIZE) 566 return TEE_ERROR_GENERIC; 567 p &= ~(granule - 1); 568 } 569 570 *pa = p; 571 572 return TEE_SUCCESS; 573 } 574 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa); 575 576 /* 577 * Note: this variable is weak just to ease breaking its dependency chain 578 * when added to the unpaged area. 579 */ 580 const struct mobj_ops mobj_with_fobj_ops 581 __weak __relrodata_unpaged("mobj_with_fobj_ops") = { 582 .matches = mobj_with_fobj_matches, 583 .free = mobj_with_fobj_free, 584 .get_fobj = mobj_with_fobj_get_fobj, 585 .get_mem_type = mobj_with_fobj_get_mem_type, 586 .get_pa = mobj_with_fobj_get_pa, 587 }; 588 589 #ifdef CFG_PAGED_USER_TA 590 bool mobj_is_paged(struct mobj *mobj) 591 { 592 if (mobj->ops == &mobj_with_fobj_ops && 593 !to_mobj_with_fobj(mobj)->fobj->ops->get_pa) 594 return true; 595 596 return false; 597 } 598 #endif /*CFG_PAGED_USER_TA*/ 599 600 static TEE_Result mobj_init(void) 601 { 602 mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo, 603 tee_mm_sec_ddr.size, 604 TEE_MATTR_MEM_TYPE_CACHED, 605 CORE_MEM_TA_RAM); 606 if (!mobj_sec_ddr) 607 panic("Failed to register secure ta ram"); 608 609 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) { 610 mobj_tee_ram_rx = mobj_phys_init(0, 611 VCORE_UNPG_RX_SZ, 612 TEE_MATTR_MEM_TYPE_CACHED, 613 CORE_MEM_TEE_RAM, 614 MEM_AREA_TEE_RAM_RX); 615 if (!mobj_tee_ram_rx) 616 panic("Failed to register tee ram rx"); 617 618 mobj_tee_ram_rw = mobj_phys_init(0, 619 VCORE_UNPG_RW_SZ, 620 TEE_MATTR_MEM_TYPE_CACHED, 621 CORE_MEM_TEE_RAM, 622 MEM_AREA_TEE_RAM_RW_DATA); 623 if (!mobj_tee_ram_rw) 624 panic("Failed to register tee ram rw"); 625 } else { 626 mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START, 627 VCORE_UNPG_RW_PA + 628 VCORE_UNPG_RW_SZ - 629 TEE_RAM_START, 630 TEE_MATTR_MEM_TYPE_CACHED, 631 CORE_MEM_TEE_RAM, 632 MEM_AREA_TEE_RAM_RW_DATA); 633 if (!mobj_tee_ram_rw) 634 panic("Failed to register tee ram"); 635 636 mobj_tee_ram_rx = mobj_tee_ram_rw; 637 } 638 639 return TEE_SUCCESS; 640 } 641 642 driver_init_late(mobj_init); 643