1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2021, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <config.h> 8 #include <initcall.h> 9 #include <keep.h> 10 #include <kernel/linker.h> 11 #include <kernel/mutex.h> 12 #include <kernel/panic.h> 13 #include <kernel/refcount.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/tee_misc.h> 16 #include <mm/core_mmu.h> 17 #include <mm/mobj.h> 18 #include <mm/tee_pager.h> 19 #include <mm/vm.h> 20 #include <optee_msg.h> 21 #include <stdlib.h> 22 #include <tee_api_types.h> 23 #include <types_ext.h> 24 #include <util.h> 25 26 struct mobj *mobj_sec_ddr; 27 struct mobj *mobj_tee_ram_rx; 28 struct mobj *mobj_tee_ram_rw; 29 30 /* 31 * mobj_phys implementation 32 */ 33 34 struct mobj_phys { 35 struct mobj mobj; 36 enum buf_is_attr battr; 37 uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */ 38 vaddr_t va; 39 paddr_t pa; 40 }; 41 42 static struct mobj_phys *to_mobj_phys(struct mobj *mobj); 43 44 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len) 45 { 46 struct mobj_phys *moph = to_mobj_phys(mobj); 47 48 if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len)) 49 return NULL; 50 51 return (void *)(moph->va + offset); 52 } 53 54 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs, 55 size_t granule, paddr_t *pa) 56 { 57 struct mobj_phys *moph = to_mobj_phys(mobj); 58 paddr_t p; 59 60 if (!pa) 61 return TEE_ERROR_GENERIC; 62 63 p = moph->pa + offs; 64 65 if (granule) { 66 if (granule != SMALL_PAGE_SIZE && 67 granule != CORE_MMU_PGDIR_SIZE) 68 return TEE_ERROR_GENERIC; 69 p &= ~(granule - 1); 70 } 71 72 *pa = p; 73 return TEE_SUCCESS; 74 } 75 DECLARE_KEEP_PAGER(mobj_phys_get_pa); 76 77 static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr) 78 { 79 struct mobj_phys *moph = to_mobj_phys(mobj); 80 81 if (!cattr) 82 return TEE_ERROR_GENERIC; 83 84 *cattr = moph->cattr; 85 return TEE_SUCCESS; 86 } 87 88 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr) 89 { 90 struct mobj_phys *moph = to_mobj_phys(mobj); 91 enum buf_is_attr a; 92 93 a = moph->battr; 94 95 switch (attr) { 96 case CORE_MEM_SEC: 97 return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM || 98 a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM; 99 case CORE_MEM_NON_SEC: 100 return a == CORE_MEM_NSEC_SHM; 101 case CORE_MEM_TEE_RAM: 102 case CORE_MEM_TA_RAM: 103 case CORE_MEM_NSEC_SHM: 104 case CORE_MEM_SDP_MEM: 105 return attr == a; 106 default: 107 return false; 108 } 109 } 110 111 static void mobj_phys_free(struct mobj *mobj) 112 { 113 struct mobj_phys *moph = to_mobj_phys(mobj); 114 115 free(moph); 116 } 117 118 /* 119 * Note: this variable is weak just to ease breaking its dependency chain 120 * when added to the unpaged area. 121 */ 122 const struct mobj_ops mobj_phys_ops 123 __weak __relrodata_unpaged("mobj_phys_ops") = { 124 .get_va = mobj_phys_get_va, 125 .get_pa = mobj_phys_get_pa, 126 .get_phys_offs = NULL, /* only offset 0 */ 127 .get_cattr = mobj_phys_get_cattr, 128 .matches = mobj_phys_matches, 129 .free = mobj_phys_free, 130 }; 131 132 static struct mobj_phys *to_mobj_phys(struct mobj *mobj) 133 { 134 assert(mobj->ops == &mobj_phys_ops); 135 return container_of(mobj, struct mobj_phys, mobj); 136 } 137 138 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t cattr, 139 enum buf_is_attr battr, 140 enum teecore_memtypes area_type) 141 { 142 void *va = NULL; 143 struct mobj_phys *moph = NULL; 144 struct tee_mmap_region *map = NULL; 145 146 if ((pa & CORE_MMU_USER_PARAM_MASK) || 147 (size & CORE_MMU_USER_PARAM_MASK)) { 148 DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE); 149 return NULL; 150 } 151 152 if (pa) { 153 va = phys_to_virt(pa, area_type, size); 154 } else { 155 map = core_mmu_find_mapping_exclusive(area_type, size); 156 if (!map) 157 return NULL; 158 159 pa = map->pa; 160 va = (void *)map->va; 161 } 162 163 /* Only SDP memory may not have a virtual address */ 164 if (!va && battr != CORE_MEM_SDP_MEM) 165 return NULL; 166 167 moph = calloc(1, sizeof(*moph)); 168 if (!moph) 169 return NULL; 170 171 moph->battr = battr; 172 moph->cattr = cattr; 173 moph->mobj.size = size; 174 moph->mobj.ops = &mobj_phys_ops; 175 refcount_set(&moph->mobj.refc, 1); 176 moph->pa = pa; 177 moph->va = (vaddr_t)va; 178 179 return &moph->mobj; 180 } 181 182 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr, 183 enum buf_is_attr battr) 184 { 185 enum teecore_memtypes area_type; 186 187 switch (battr) { 188 case CORE_MEM_TEE_RAM: 189 area_type = MEM_AREA_TEE_RAM_RW_DATA; 190 break; 191 case CORE_MEM_TA_RAM: 192 area_type = MEM_AREA_TA_RAM; 193 break; 194 case CORE_MEM_NSEC_SHM: 195 area_type = MEM_AREA_NSEC_SHM; 196 break; 197 case CORE_MEM_SDP_MEM: 198 area_type = MEM_AREA_SDP_MEM; 199 break; 200 default: 201 DMSG("can't allocate with specified attribute"); 202 return NULL; 203 } 204 205 return mobj_phys_init(pa, size, cattr, battr, area_type); 206 } 207 208 /* 209 * mobj_virt implementation 210 */ 211 212 static void mobj_virt_assert_type(struct mobj *mobj); 213 214 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset, 215 size_t len __maybe_unused) 216 { 217 mobj_virt_assert_type(mobj); 218 assert(mobj_check_offset_and_len(mobj, offset, len)); 219 220 return (void *)(vaddr_t)offset; 221 } 222 223 /* 224 * Note: this variable is weak just to ease breaking its dependency chain 225 * when added to the unpaged area. 226 */ 227 const struct mobj_ops mobj_virt_ops 228 __weak __relrodata_unpaged("mobj_virt_ops") = { 229 .get_va = mobj_virt_get_va, 230 }; 231 232 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused) 233 { 234 assert(mobj->ops == &mobj_virt_ops); 235 } 236 237 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX }; 238 239 /* 240 * mobj_mm implementation 241 */ 242 243 struct mobj_mm { 244 tee_mm_entry_t *mm; 245 struct mobj *parent_mobj; 246 struct mobj mobj; 247 }; 248 249 static struct mobj_mm *to_mobj_mm(struct mobj *mobj); 250 251 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs) 252 { 253 tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm; 254 255 return (mm->offset << mm->pool->shift) + offs; 256 } 257 258 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs, size_t len) 259 { 260 return mobj_get_va(to_mobj_mm(mobj)->parent_mobj, 261 mobj_mm_offs(mobj, offs), len); 262 } 263 264 265 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs, 266 size_t granule, paddr_t *pa) 267 { 268 return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj, 269 mobj_mm_offs(mobj, offs), granule, pa); 270 } 271 DECLARE_KEEP_PAGER(mobj_mm_get_pa); 272 273 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule) 274 { 275 return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule); 276 } 277 278 static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr) 279 { 280 return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr); 281 } 282 283 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr) 284 { 285 return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr); 286 } 287 288 static void mobj_mm_free(struct mobj *mobj) 289 { 290 struct mobj_mm *m = to_mobj_mm(mobj); 291 292 tee_mm_free(m->mm); 293 free(m); 294 } 295 296 /* 297 * Note: this variable is weak just to ease breaking its dependency chain 298 * when added to the unpaged area. 299 */ 300 const struct mobj_ops mobj_mm_ops __weak __relrodata_unpaged("mobj_mm_ops") = { 301 .get_va = mobj_mm_get_va, 302 .get_pa = mobj_mm_get_pa, 303 .get_phys_offs = mobj_mm_get_phys_offs, 304 .get_cattr = mobj_mm_get_cattr, 305 .matches = mobj_mm_matches, 306 .free = mobj_mm_free, 307 }; 308 309 static struct mobj_mm *to_mobj_mm(struct mobj *mobj) 310 { 311 assert(mobj->ops == &mobj_mm_ops); 312 return container_of(mobj, struct mobj_mm, mobj); 313 } 314 315 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size, 316 tee_mm_pool_t *pool) 317 { 318 struct mobj_mm *m = calloc(1, sizeof(*m)); 319 320 if (!m) 321 return NULL; 322 323 m->mm = tee_mm_alloc(pool, size); 324 if (!m->mm) { 325 free(m); 326 return NULL; 327 } 328 329 m->parent_mobj = mobj_parent; 330 m->mobj.size = size; 331 m->mobj.ops = &mobj_mm_ops; 332 refcount_set(&m->mobj.refc, 1); 333 334 return &m->mobj; 335 } 336 337 338 /* 339 * mobj_shm implementation. mobj_shm represents buffer in predefined shm region 340 * - it is physically contiguous. 341 * - it is identified in static physical layout as MEM_AREA_NSEC_SHM. 342 * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure 343 * generic CORE_MEM_NON_SEC. 344 */ 345 346 struct mobj_shm { 347 struct mobj mobj; 348 paddr_t pa; 349 uint64_t cookie; 350 }; 351 352 static struct mobj_shm *to_mobj_shm(struct mobj *mobj); 353 354 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len) 355 { 356 struct mobj_shm *m = to_mobj_shm(mobj); 357 358 if (!mobj_check_offset_and_len(mobj, offset, len)) 359 return NULL; 360 361 return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM, 362 mobj->size - offset); 363 } 364 365 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs, 366 size_t granule, paddr_t *pa) 367 { 368 struct mobj_shm *m = to_mobj_shm(mobj); 369 paddr_t p; 370 371 if (!pa || offs >= mobj->size) 372 return TEE_ERROR_GENERIC; 373 374 p = m->pa + offs; 375 376 if (granule) { 377 if (granule != SMALL_PAGE_SIZE && 378 granule != CORE_MMU_PGDIR_SIZE) 379 return TEE_ERROR_GENERIC; 380 p &= ~(granule - 1); 381 } 382 383 *pa = p; 384 return TEE_SUCCESS; 385 } 386 DECLARE_KEEP_PAGER(mobj_shm_get_pa); 387 388 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule) 389 { 390 assert(IS_POWER_OF_TWO(granule)); 391 return to_mobj_shm(mobj)->pa & (granule - 1); 392 } 393 394 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr) 395 { 396 return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC; 397 } 398 399 static TEE_Result mobj_shm_get_cattr(struct mobj *mobj __unused, 400 uint32_t *cattr) 401 { 402 if (!cattr) 403 return TEE_ERROR_GENERIC; 404 405 *cattr = TEE_MATTR_MEM_TYPE_CACHED; 406 407 return TEE_SUCCESS; 408 } 409 410 static void mobj_shm_free(struct mobj *mobj) 411 { 412 struct mobj_shm *m = to_mobj_shm(mobj); 413 414 free(m); 415 } 416 417 static uint64_t mobj_shm_get_cookie(struct mobj *mobj) 418 { 419 return to_mobj_shm(mobj)->cookie; 420 } 421 422 /* 423 * Note: this variable is weak just to ease breaking its dependency chain 424 * when added to the unpaged area. 425 */ 426 const struct mobj_ops mobj_shm_ops 427 __weak __relrodata_unpaged("mobj_shm_ops") = { 428 .get_va = mobj_shm_get_va, 429 .get_pa = mobj_shm_get_pa, 430 .get_phys_offs = mobj_shm_get_phys_offs, 431 .get_cattr = mobj_shm_get_cattr, 432 .matches = mobj_shm_matches, 433 .free = mobj_shm_free, 434 .get_cookie = mobj_shm_get_cookie, 435 }; 436 437 static struct mobj_shm *to_mobj_shm(struct mobj *mobj) 438 { 439 assert(mobj->ops == &mobj_shm_ops); 440 return container_of(mobj, struct mobj_shm, mobj); 441 } 442 443 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie) 444 { 445 struct mobj_shm *m; 446 447 if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size)) 448 return NULL; 449 450 m = calloc(1, sizeof(*m)); 451 if (!m) 452 return NULL; 453 454 m->mobj.size = size; 455 m->mobj.ops = &mobj_shm_ops; 456 m->mobj.phys_granule = SMALL_PAGE_SIZE; 457 refcount_set(&m->mobj.refc, 1); 458 m->pa = pa; 459 m->cookie = cookie; 460 461 return &m->mobj; 462 } 463 464 #ifdef CFG_PAGED_USER_TA 465 /* 466 * mobj_seccpy_shm implementation 467 */ 468 469 struct mobj_seccpy_shm { 470 struct user_ta_ctx *utc; 471 vaddr_t va; 472 struct mobj mobj; 473 struct fobj *fobj; 474 }; 475 476 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj); 477 478 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj) 479 { 480 assert(mobj_is_seccpy_shm(mobj)); 481 return container_of(mobj, struct mobj_seccpy_shm, mobj); 482 } 483 484 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs, size_t len) 485 { 486 struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj); 487 488 if (&m->utc->ta_ctx.ts_ctx != thread_get_tsd()->ctx) 489 return NULL; 490 491 if (!mobj_check_offset_and_len(mobj, offs, len)) 492 return NULL; 493 return (void *)(m->va + offs); 494 } 495 496 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused, 497 enum buf_is_attr attr) 498 { 499 assert(mobj_is_seccpy_shm(mobj)); 500 501 return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM; 502 } 503 504 static void mobj_seccpy_shm_free(struct mobj *mobj) 505 { 506 struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj); 507 508 tee_pager_rem_um_region(&m->utc->uctx, m->va, mobj->size); 509 vm_rem_rwmem(&m->utc->uctx, mobj, m->va); 510 fobj_put(m->fobj); 511 free(m); 512 } 513 514 static struct fobj *mobj_seccpy_shm_get_fobj(struct mobj *mobj) 515 { 516 return fobj_get(to_mobj_seccpy_shm(mobj)->fobj); 517 } 518 519 /* 520 * Note: this variable is weak just to ease breaking its dependency chain 521 * when added to the unpaged area. 522 */ 523 const struct mobj_ops mobj_seccpy_shm_ops 524 __weak __relrodata_unpaged("mobj_seccpy_shm_ops") = { 525 .get_va = mobj_seccpy_shm_get_va, 526 .matches = mobj_seccpy_shm_matches, 527 .free = mobj_seccpy_shm_free, 528 .get_fobj = mobj_seccpy_shm_get_fobj, 529 }; 530 531 static bool mobj_is_seccpy_shm(struct mobj *mobj) 532 { 533 return mobj && mobj->ops == &mobj_seccpy_shm_ops; 534 } 535 536 struct mobj *mobj_seccpy_shm_alloc(size_t size) 537 { 538 struct thread_specific_data *tsd = thread_get_tsd(); 539 struct mobj_seccpy_shm *m; 540 struct user_ta_ctx *utc; 541 vaddr_t va = 0; 542 543 if (!is_user_ta_ctx(tsd->ctx)) 544 return NULL; 545 utc = to_user_ta_ctx(tsd->ctx); 546 547 m = calloc(1, sizeof(*m)); 548 if (!m) 549 return NULL; 550 551 m->mobj.size = size; 552 m->mobj.ops = &mobj_seccpy_shm_ops; 553 refcount_set(&m->mobj.refc, 1); 554 555 if (vm_add_rwmem(&utc->uctx, &m->mobj, &va) != TEE_SUCCESS) 556 goto bad; 557 558 m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) / 559 SMALL_PAGE_SIZE); 560 if (tee_pager_add_um_region(&utc->uctx, va, m->fobj, 561 TEE_MATTR_PRW | TEE_MATTR_URW)) 562 goto bad; 563 564 m->va = va; 565 m->utc = to_user_ta_ctx(tsd->ctx); 566 return &m->mobj; 567 bad: 568 if (va) 569 vm_rem_rwmem(&utc->uctx, &m->mobj, va); 570 fobj_put(m->fobj); 571 free(m); 572 return NULL; 573 } 574 575 576 #endif /*CFG_PAGED_USER_TA*/ 577 578 struct mobj_with_fobj { 579 struct fobj *fobj; 580 struct file *file; 581 struct mobj mobj; 582 }; 583 584 const struct mobj_ops mobj_with_fobj_ops; 585 586 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file) 587 { 588 struct mobj_with_fobj *m = NULL; 589 590 if (!fobj) 591 return NULL; 592 593 m = calloc(1, sizeof(*m)); 594 if (!m) 595 return NULL; 596 597 m->mobj.ops = &mobj_with_fobj_ops; 598 refcount_set(&m->mobj.refc, 1); 599 m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE; 600 m->mobj.phys_granule = SMALL_PAGE_SIZE; 601 m->fobj = fobj_get(fobj); 602 m->file = file_get(file); 603 604 return &m->mobj; 605 } 606 607 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj) 608 { 609 assert(mobj && mobj->ops == &mobj_with_fobj_ops); 610 611 return container_of(mobj, struct mobj_with_fobj, mobj); 612 } 613 614 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused, 615 enum buf_is_attr attr) 616 { 617 assert(to_mobj_with_fobj(mobj)); 618 619 /* 620 * All fobjs are supposed to be mapped secure so classify it as 621 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information 622 * needed it can probably be carried in another way than to put the 623 * burden directly on fobj. 624 */ 625 return attr == CORE_MEM_SEC; 626 } 627 628 static void mobj_with_fobj_free(struct mobj *mobj) 629 { 630 struct mobj_with_fobj *m = to_mobj_with_fobj(mobj); 631 632 fobj_put(m->fobj); 633 file_put(m->file); 634 free(m); 635 } 636 637 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj) 638 { 639 return fobj_get(to_mobj_with_fobj(mobj)->fobj); 640 } 641 642 static TEE_Result mobj_with_fobj_get_cattr(struct mobj *mobj __unused, 643 uint32_t *cattr) 644 { 645 if (!cattr) 646 return TEE_ERROR_GENERIC; 647 648 /* All fobjs are mapped as normal cached memory */ 649 *cattr = TEE_MATTR_MEM_TYPE_CACHED; 650 651 return TEE_SUCCESS; 652 } 653 654 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs, 655 size_t granule, paddr_t *pa) 656 { 657 struct mobj_with_fobj *f = to_mobj_with_fobj(mobj); 658 paddr_t p = 0; 659 660 if (!f->fobj->ops->get_pa) { 661 assert(mobj_is_paged(mobj)); 662 return TEE_ERROR_NOT_SUPPORTED; 663 } 664 665 p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) + 666 offs % SMALL_PAGE_SIZE; 667 668 if (granule) { 669 if (granule != SMALL_PAGE_SIZE && 670 granule != CORE_MMU_PGDIR_SIZE) 671 return TEE_ERROR_GENERIC; 672 p &= ~(granule - 1); 673 } 674 675 *pa = p; 676 677 return TEE_SUCCESS; 678 } 679 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa); 680 681 /* 682 * Note: this variable is weak just to ease breaking its dependency chain 683 * when added to the unpaged area. 684 */ 685 const struct mobj_ops mobj_with_fobj_ops 686 __weak __relrodata_unpaged("mobj_with_fobj_ops") = { 687 .matches = mobj_with_fobj_matches, 688 .free = mobj_with_fobj_free, 689 .get_fobj = mobj_with_fobj_get_fobj, 690 .get_cattr = mobj_with_fobj_get_cattr, 691 .get_pa = mobj_with_fobj_get_pa, 692 }; 693 694 #ifdef CFG_PAGED_USER_TA 695 bool mobj_is_paged(struct mobj *mobj) 696 { 697 if (mobj->ops == &mobj_seccpy_shm_ops) 698 return true; 699 700 if (mobj->ops == &mobj_with_fobj_ops && 701 !to_mobj_with_fobj(mobj)->fobj->ops->get_pa) 702 return true; 703 704 return false; 705 } 706 #endif /*CFG_PAGED_USER_TA*/ 707 708 static TEE_Result mobj_init(void) 709 { 710 mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo, 711 tee_mm_sec_ddr.size, 712 TEE_MATTR_MEM_TYPE_CACHED, 713 CORE_MEM_TA_RAM); 714 if (!mobj_sec_ddr) 715 panic("Failed to register secure ta ram"); 716 717 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) { 718 mobj_tee_ram_rx = mobj_phys_init(0, 719 VCORE_UNPG_RX_SZ, 720 TEE_MATTR_MEM_TYPE_CACHED, 721 CORE_MEM_TEE_RAM, 722 MEM_AREA_TEE_RAM_RX); 723 if (!mobj_tee_ram_rx) 724 panic("Failed to register tee ram rx"); 725 726 mobj_tee_ram_rw = mobj_phys_init(0, 727 VCORE_UNPG_RW_SZ, 728 TEE_MATTR_MEM_TYPE_CACHED, 729 CORE_MEM_TEE_RAM, 730 MEM_AREA_TEE_RAM_RW_DATA); 731 if (!mobj_tee_ram_rw) 732 panic("Failed to register tee ram rw"); 733 } else { 734 mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START, 735 VCORE_UNPG_RW_PA + 736 VCORE_UNPG_RW_SZ - 737 TEE_RAM_START, 738 TEE_MATTR_MEM_TYPE_CACHED, 739 CORE_MEM_TEE_RAM, 740 MEM_AREA_TEE_RAM_RW_DATA); 741 if (!mobj_tee_ram_rw) 742 panic("Failed to register tee ram"); 743 744 mobj_tee_ram_rx = mobj_tee_ram_rw; 745 } 746 747 return TEE_SUCCESS; 748 } 749 750 driver_init_late(mobj_init); 751