1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2021, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <config.h> 8 #include <initcall.h> 9 #include <keep.h> 10 #include <kernel/linker.h> 11 #include <kernel/mutex.h> 12 #include <kernel/panic.h> 13 #include <kernel/refcount.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/tee_misc.h> 16 #include <mm/core_mmu.h> 17 #include <mm/mobj.h> 18 #include <mm/tee_pager.h> 19 #include <mm/vm.h> 20 #include <optee_msg.h> 21 #include <sm/optee_smc.h> 22 #include <stdlib.h> 23 #include <tee_api_types.h> 24 #include <types_ext.h> 25 #include <util.h> 26 27 struct mobj *mobj_sec_ddr; 28 struct mobj *mobj_tee_ram_rx; 29 struct mobj *mobj_tee_ram_rw; 30 31 /* 32 * mobj_phys implementation 33 */ 34 35 struct mobj_phys { 36 struct mobj mobj; 37 enum buf_is_attr battr; 38 uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */ 39 vaddr_t va; 40 paddr_t pa; 41 }; 42 43 static struct mobj_phys *to_mobj_phys(struct mobj *mobj); 44 45 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset) 46 { 47 struct mobj_phys *moph = to_mobj_phys(mobj); 48 49 if (!moph->va || offset >= mobj->size) 50 return NULL; 51 52 return (void *)(moph->va + offset); 53 } 54 55 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs, 56 size_t granule, paddr_t *pa) 57 { 58 struct mobj_phys *moph = to_mobj_phys(mobj); 59 paddr_t p; 60 61 if (!pa) 62 return TEE_ERROR_GENERIC; 63 64 p = moph->pa + offs; 65 66 if (granule) { 67 if (granule != SMALL_PAGE_SIZE && 68 granule != CORE_MMU_PGDIR_SIZE) 69 return TEE_ERROR_GENERIC; 70 p &= ~(granule - 1); 71 } 72 73 *pa = p; 74 return TEE_SUCCESS; 75 } 76 DECLARE_KEEP_PAGER(mobj_phys_get_pa); 77 78 static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr) 79 { 80 struct mobj_phys *moph = to_mobj_phys(mobj); 81 82 if (!cattr) 83 return TEE_ERROR_GENERIC; 84 85 *cattr = moph->cattr; 86 return TEE_SUCCESS; 87 } 88 89 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr) 90 { 91 struct mobj_phys *moph = to_mobj_phys(mobj); 92 enum buf_is_attr a; 93 94 a = moph->battr; 95 96 switch (attr) { 97 case CORE_MEM_SEC: 98 return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM || 99 a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM; 100 case CORE_MEM_NON_SEC: 101 return a == CORE_MEM_NSEC_SHM; 102 case CORE_MEM_TEE_RAM: 103 case CORE_MEM_TA_RAM: 104 case CORE_MEM_NSEC_SHM: 105 case CORE_MEM_SDP_MEM: 106 return attr == a; 107 default: 108 return false; 109 } 110 } 111 112 static void mobj_phys_free(struct mobj *mobj) 113 { 114 struct mobj_phys *moph = to_mobj_phys(mobj); 115 116 free(moph); 117 } 118 119 /* 120 * Note: this variable is weak just to ease breaking its dependency chain 121 * when added to the unpaged area. 122 */ 123 const struct mobj_ops mobj_phys_ops __weak __rodata_unpaged("mobj_phys_ops") = { 124 .get_va = mobj_phys_get_va, 125 .get_pa = mobj_phys_get_pa, 126 .get_phys_offs = NULL, /* only offset 0 */ 127 .get_cattr = mobj_phys_get_cattr, 128 .matches = mobj_phys_matches, 129 .free = mobj_phys_free, 130 }; 131 132 static struct mobj_phys *to_mobj_phys(struct mobj *mobj) 133 { 134 assert(mobj->ops == &mobj_phys_ops); 135 return container_of(mobj, struct mobj_phys, mobj); 136 } 137 138 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t cattr, 139 enum buf_is_attr battr, 140 enum teecore_memtypes area_type) 141 { 142 void *va = NULL; 143 struct mobj_phys *moph = NULL; 144 struct tee_mmap_region *map = NULL; 145 146 if ((pa & CORE_MMU_USER_PARAM_MASK) || 147 (size & CORE_MMU_USER_PARAM_MASK)) { 148 DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE); 149 return NULL; 150 } 151 152 if (pa) { 153 va = phys_to_virt(pa, area_type); 154 } else { 155 map = core_mmu_find_mapping_exclusive(area_type, size); 156 if (!map) 157 return NULL; 158 159 pa = map->pa; 160 va = (void *)map->va; 161 } 162 163 /* Only SDP memory may not have a virtual address */ 164 if (!va && battr != CORE_MEM_SDP_MEM) 165 return NULL; 166 167 moph = calloc(1, sizeof(*moph)); 168 if (!moph) 169 return NULL; 170 171 moph->battr = battr; 172 moph->cattr = cattr; 173 moph->mobj.size = size; 174 moph->mobj.ops = &mobj_phys_ops; 175 refcount_set(&moph->mobj.refc, 1); 176 moph->pa = pa; 177 moph->va = (vaddr_t)va; 178 179 return &moph->mobj; 180 } 181 182 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr, 183 enum buf_is_attr battr) 184 { 185 enum teecore_memtypes area_type; 186 187 switch (battr) { 188 case CORE_MEM_TEE_RAM: 189 area_type = MEM_AREA_TEE_RAM_RW_DATA; 190 break; 191 case CORE_MEM_TA_RAM: 192 area_type = MEM_AREA_TA_RAM; 193 break; 194 case CORE_MEM_NSEC_SHM: 195 area_type = MEM_AREA_NSEC_SHM; 196 break; 197 case CORE_MEM_SDP_MEM: 198 area_type = MEM_AREA_SDP_MEM; 199 break; 200 default: 201 DMSG("can't allocate with specified attribute"); 202 return NULL; 203 } 204 205 return mobj_phys_init(pa, size, cattr, battr, area_type); 206 } 207 208 /* 209 * mobj_virt implementation 210 */ 211 212 static void mobj_virt_assert_type(struct mobj *mobj); 213 214 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset) 215 { 216 mobj_virt_assert_type(mobj); 217 218 return (void *)(vaddr_t)offset; 219 } 220 221 /* 222 * Note: this variable is weak just to ease breaking its dependency chain 223 * when added to the unpaged area. 224 */ 225 const struct mobj_ops mobj_virt_ops __weak __rodata_unpaged("mobj_virt_ops") = { 226 .get_va = mobj_virt_get_va, 227 }; 228 229 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused) 230 { 231 assert(mobj->ops == &mobj_virt_ops); 232 } 233 234 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX }; 235 236 /* 237 * mobj_mm implementation 238 */ 239 240 struct mobj_mm { 241 tee_mm_entry_t *mm; 242 struct mobj *parent_mobj; 243 struct mobj mobj; 244 }; 245 246 static struct mobj_mm *to_mobj_mm(struct mobj *mobj); 247 248 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs) 249 { 250 tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm; 251 252 return (mm->offset << mm->pool->shift) + offs; 253 } 254 255 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs) 256 { 257 return mobj_get_va(to_mobj_mm(mobj)->parent_mobj, 258 mobj_mm_offs(mobj, offs)); 259 } 260 261 262 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs, 263 size_t granule, paddr_t *pa) 264 { 265 return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj, 266 mobj_mm_offs(mobj, offs), granule, pa); 267 } 268 DECLARE_KEEP_PAGER(mobj_mm_get_pa); 269 270 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule) 271 { 272 return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule); 273 } 274 275 static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr) 276 { 277 return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr); 278 } 279 280 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr) 281 { 282 return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr); 283 } 284 285 static void mobj_mm_free(struct mobj *mobj) 286 { 287 struct mobj_mm *m = to_mobj_mm(mobj); 288 289 tee_mm_free(m->mm); 290 free(m); 291 } 292 293 /* 294 * Note: this variable is weak just to ease breaking its dependency chain 295 * when added to the unpaged area. 296 */ 297 const struct mobj_ops mobj_mm_ops __weak __rodata_unpaged("mobj_mm_ops") = { 298 .get_va = mobj_mm_get_va, 299 .get_pa = mobj_mm_get_pa, 300 .get_phys_offs = mobj_mm_get_phys_offs, 301 .get_cattr = mobj_mm_get_cattr, 302 .matches = mobj_mm_matches, 303 .free = mobj_mm_free, 304 }; 305 306 static struct mobj_mm *to_mobj_mm(struct mobj *mobj) 307 { 308 assert(mobj->ops == &mobj_mm_ops); 309 return container_of(mobj, struct mobj_mm, mobj); 310 } 311 312 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size, 313 tee_mm_pool_t *pool) 314 { 315 struct mobj_mm *m = calloc(1, sizeof(*m)); 316 317 if (!m) 318 return NULL; 319 320 m->mm = tee_mm_alloc(pool, size); 321 if (!m->mm) { 322 free(m); 323 return NULL; 324 } 325 326 m->parent_mobj = mobj_parent; 327 m->mobj.size = size; 328 m->mobj.ops = &mobj_mm_ops; 329 refcount_set(&m->mobj.refc, 1); 330 331 return &m->mobj; 332 } 333 334 335 /* 336 * mobj_shm implementation. mobj_shm represents buffer in predefined shm region 337 * - it is physically contiguous. 338 * - it is identified in static physical layout as MEM_AREA_NSEC_SHM. 339 * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure 340 * generic CORE_MEM_NON_SEC. 341 */ 342 343 struct mobj_shm { 344 struct mobj mobj; 345 paddr_t pa; 346 uint64_t cookie; 347 }; 348 349 static struct mobj_shm *to_mobj_shm(struct mobj *mobj); 350 351 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset) 352 { 353 struct mobj_shm *m = to_mobj_shm(mobj); 354 355 if (offset >= mobj->size) 356 return NULL; 357 358 return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM); 359 } 360 361 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs, 362 size_t granule, paddr_t *pa) 363 { 364 struct mobj_shm *m = to_mobj_shm(mobj); 365 paddr_t p; 366 367 if (!pa || offs >= mobj->size) 368 return TEE_ERROR_GENERIC; 369 370 p = m->pa + offs; 371 372 if (granule) { 373 if (granule != SMALL_PAGE_SIZE && 374 granule != CORE_MMU_PGDIR_SIZE) 375 return TEE_ERROR_GENERIC; 376 p &= ~(granule - 1); 377 } 378 379 *pa = p; 380 return TEE_SUCCESS; 381 } 382 DECLARE_KEEP_PAGER(mobj_shm_get_pa); 383 384 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule) 385 { 386 assert(IS_POWER_OF_TWO(granule)); 387 return to_mobj_shm(mobj)->pa & (granule - 1); 388 } 389 390 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr) 391 { 392 return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC; 393 } 394 395 static void mobj_shm_free(struct mobj *mobj) 396 { 397 struct mobj_shm *m = to_mobj_shm(mobj); 398 399 free(m); 400 } 401 402 static uint64_t mobj_shm_get_cookie(struct mobj *mobj) 403 { 404 return to_mobj_shm(mobj)->cookie; 405 } 406 407 /* 408 * Note: this variable is weak just to ease breaking its dependency chain 409 * when added to the unpaged area. 410 */ 411 const struct mobj_ops mobj_shm_ops __weak __rodata_unpaged("mobj_shm_ops") = { 412 .get_va = mobj_shm_get_va, 413 .get_pa = mobj_shm_get_pa, 414 .get_phys_offs = mobj_shm_get_phys_offs, 415 .matches = mobj_shm_matches, 416 .free = mobj_shm_free, 417 .get_cookie = mobj_shm_get_cookie, 418 }; 419 420 static struct mobj_shm *to_mobj_shm(struct mobj *mobj) 421 { 422 assert(mobj->ops == &mobj_shm_ops); 423 return container_of(mobj, struct mobj_shm, mobj); 424 } 425 426 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie) 427 { 428 struct mobj_shm *m; 429 430 if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size)) 431 return NULL; 432 433 m = calloc(1, sizeof(*m)); 434 if (!m) 435 return NULL; 436 437 m->mobj.size = size; 438 m->mobj.ops = &mobj_shm_ops; 439 refcount_set(&m->mobj.refc, 1); 440 m->pa = pa; 441 m->cookie = cookie; 442 443 return &m->mobj; 444 } 445 446 #ifdef CFG_PAGED_USER_TA 447 /* 448 * mobj_seccpy_shm implementation 449 */ 450 451 struct mobj_seccpy_shm { 452 struct user_ta_ctx *utc; 453 vaddr_t va; 454 struct mobj mobj; 455 struct fobj *fobj; 456 }; 457 458 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj); 459 460 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj) 461 { 462 assert(mobj_is_seccpy_shm(mobj)); 463 return container_of(mobj, struct mobj_seccpy_shm, mobj); 464 } 465 466 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs) 467 { 468 struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj); 469 470 if (&m->utc->ta_ctx.ts_ctx != thread_get_tsd()->ctx) 471 return NULL; 472 473 if (offs >= mobj->size) 474 return NULL; 475 return (void *)(m->va + offs); 476 } 477 478 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused, 479 enum buf_is_attr attr) 480 { 481 assert(mobj_is_seccpy_shm(mobj)); 482 483 return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM; 484 } 485 486 static void mobj_seccpy_shm_free(struct mobj *mobj) 487 { 488 struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj); 489 490 tee_pager_rem_um_region(&m->utc->uctx, m->va, mobj->size); 491 vm_rem_rwmem(&m->utc->uctx, mobj, m->va); 492 fobj_put(m->fobj); 493 free(m); 494 } 495 496 static struct fobj *mobj_seccpy_shm_get_fobj(struct mobj *mobj) 497 { 498 return fobj_get(to_mobj_seccpy_shm(mobj)->fobj); 499 } 500 501 /* 502 * Note: this variable is weak just to ease breaking its dependency chain 503 * when added to the unpaged area. 504 */ 505 const struct mobj_ops mobj_seccpy_shm_ops 506 __weak __rodata_unpaged("mobj_seccpy_shm_ops") = { 507 .get_va = mobj_seccpy_shm_get_va, 508 .matches = mobj_seccpy_shm_matches, 509 .free = mobj_seccpy_shm_free, 510 .get_fobj = mobj_seccpy_shm_get_fobj, 511 }; 512 513 static bool mobj_is_seccpy_shm(struct mobj *mobj) 514 { 515 return mobj && mobj->ops == &mobj_seccpy_shm_ops; 516 } 517 518 struct mobj *mobj_seccpy_shm_alloc(size_t size) 519 { 520 struct thread_specific_data *tsd = thread_get_tsd(); 521 struct mobj_seccpy_shm *m; 522 struct user_ta_ctx *utc; 523 vaddr_t va = 0; 524 525 if (!is_user_ta_ctx(tsd->ctx)) 526 return NULL; 527 utc = to_user_ta_ctx(tsd->ctx); 528 529 m = calloc(1, sizeof(*m)); 530 if (!m) 531 return NULL; 532 533 m->mobj.size = size; 534 m->mobj.ops = &mobj_seccpy_shm_ops; 535 refcount_set(&m->mobj.refc, 1); 536 537 if (vm_add_rwmem(&utc->uctx, &m->mobj, &va) != TEE_SUCCESS) 538 goto bad; 539 540 m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) / 541 SMALL_PAGE_SIZE); 542 if (tee_pager_add_um_region(&utc->uctx, va, m->fobj, 543 TEE_MATTR_PRW | TEE_MATTR_URW)) 544 goto bad; 545 546 m->va = va; 547 m->utc = to_user_ta_ctx(tsd->ctx); 548 return &m->mobj; 549 bad: 550 if (va) 551 vm_rem_rwmem(&utc->uctx, &m->mobj, va); 552 fobj_put(m->fobj); 553 free(m); 554 return NULL; 555 } 556 557 558 #endif /*CFG_PAGED_USER_TA*/ 559 560 struct mobj_with_fobj { 561 struct fobj *fobj; 562 struct file *file; 563 struct mobj mobj; 564 }; 565 566 const struct mobj_ops mobj_with_fobj_ops; 567 568 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file) 569 { 570 struct mobj_with_fobj *m = NULL; 571 572 if (!fobj) 573 return NULL; 574 575 m = calloc(1, sizeof(*m)); 576 if (!m) 577 return NULL; 578 579 m->mobj.ops = &mobj_with_fobj_ops; 580 refcount_set(&m->mobj.refc, 1); 581 m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE; 582 m->mobj.phys_granule = SMALL_PAGE_SIZE; 583 m->fobj = fobj_get(fobj); 584 m->file = file_get(file); 585 586 return &m->mobj; 587 } 588 589 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj) 590 { 591 assert(mobj && mobj->ops == &mobj_with_fobj_ops); 592 593 return container_of(mobj, struct mobj_with_fobj, mobj); 594 } 595 596 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused, 597 enum buf_is_attr attr) 598 { 599 assert(to_mobj_with_fobj(mobj)); 600 601 /* 602 * All fobjs are supposed to be mapped secure so classify it as 603 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information 604 * needed it can probably be carried in another way than to put the 605 * burden directly on fobj. 606 */ 607 return attr == CORE_MEM_SEC; 608 } 609 610 static void mobj_with_fobj_free(struct mobj *mobj) 611 { 612 struct mobj_with_fobj *m = to_mobj_with_fobj(mobj); 613 614 fobj_put(m->fobj); 615 file_put(m->file); 616 free(m); 617 } 618 619 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj) 620 { 621 return fobj_get(to_mobj_with_fobj(mobj)->fobj); 622 } 623 624 static TEE_Result mobj_with_fobj_get_cattr(struct mobj *mobj __unused, 625 uint32_t *cattr) 626 { 627 if (!cattr) 628 return TEE_ERROR_GENERIC; 629 630 /* All fobjs are mapped as normal cached memory */ 631 *cattr = TEE_MATTR_CACHE_CACHED; 632 633 return TEE_SUCCESS; 634 } 635 636 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs, 637 size_t granule, paddr_t *pa) 638 { 639 struct mobj_with_fobj *f = to_mobj_with_fobj(mobj); 640 paddr_t p = 0; 641 642 if (!f->fobj->ops->get_pa) { 643 assert(mobj_is_paged(mobj)); 644 return TEE_ERROR_NOT_SUPPORTED; 645 } 646 647 p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) + 648 offs % SMALL_PAGE_SIZE; 649 650 if (granule) { 651 if (granule != SMALL_PAGE_SIZE && 652 granule != CORE_MMU_PGDIR_SIZE) 653 return TEE_ERROR_GENERIC; 654 p &= ~(granule - 1); 655 } 656 657 *pa = p; 658 659 return TEE_SUCCESS; 660 } 661 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa); 662 663 /* 664 * Note: this variable is weak just to ease breaking its dependency chain 665 * when added to the unpaged area. 666 */ 667 const struct mobj_ops mobj_with_fobj_ops 668 __weak __rodata_unpaged("mobj_with_fobj_ops") = { 669 .matches = mobj_with_fobj_matches, 670 .free = mobj_with_fobj_free, 671 .get_fobj = mobj_with_fobj_get_fobj, 672 .get_cattr = mobj_with_fobj_get_cattr, 673 .get_pa = mobj_with_fobj_get_pa, 674 }; 675 676 #ifdef CFG_PAGED_USER_TA 677 bool mobj_is_paged(struct mobj *mobj) 678 { 679 if (mobj->ops == &mobj_seccpy_shm_ops) 680 return true; 681 682 if (mobj->ops == &mobj_with_fobj_ops && 683 !to_mobj_with_fobj(mobj)->fobj->ops->get_pa) 684 return true; 685 686 return false; 687 } 688 #endif /*CFG_PAGED_USER_TA*/ 689 690 static TEE_Result mobj_init(void) 691 { 692 mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo, 693 tee_mm_sec_ddr.hi - tee_mm_sec_ddr.lo, 694 OPTEE_SMC_SHM_CACHED, CORE_MEM_TA_RAM); 695 if (!mobj_sec_ddr) 696 panic("Failed to register secure ta ram"); 697 698 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) { 699 mobj_tee_ram_rx = mobj_phys_init(0, 700 VCORE_UNPG_RX_SZ, 701 TEE_MATTR_CACHE_CACHED, 702 CORE_MEM_TEE_RAM, 703 MEM_AREA_TEE_RAM_RX); 704 if (!mobj_tee_ram_rx) 705 panic("Failed to register tee ram rx"); 706 707 mobj_tee_ram_rw = mobj_phys_init(0, 708 VCORE_UNPG_RW_SZ, 709 TEE_MATTR_CACHE_CACHED, 710 CORE_MEM_TEE_RAM, 711 MEM_AREA_TEE_RAM_RW_DATA); 712 if (!mobj_tee_ram_rw) 713 panic("Failed to register tee ram rw"); 714 } else { 715 mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START, 716 VCORE_UNPG_RW_PA + 717 VCORE_UNPG_RW_SZ - 718 TEE_RAM_START, 719 TEE_MATTR_CACHE_CACHED, 720 CORE_MEM_TEE_RAM, 721 MEM_AREA_TEE_RAM_RW_DATA); 722 if (!mobj_tee_ram_rw) 723 panic("Failed to register tee ram"); 724 725 mobj_tee_ram_rx = mobj_tee_ram_rw; 726 } 727 728 return TEE_SUCCESS; 729 } 730 731 driver_init_late(mobj_init); 732