1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, 2022 Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved. 6 */ 7 8 #include <assert.h> 9 #include <config.h> 10 #include <kernel/boot.h> 11 #include <kernel/dt.h> 12 #include <kernel/linker.h> 13 #include <kernel/panic.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/tee_l2cc_mutex.h> 16 #include <kernel/tee_misc.h> 17 #include <kernel/tlb_helpers.h> 18 #include <kernel/user_mode_ctx.h> 19 #include <kernel/virtualization.h> 20 #include <libfdt.h> 21 #include <memtag.h> 22 #include <mm/core_memprot.h> 23 #include <mm/core_mmu.h> 24 #include <mm/mobj.h> 25 #include <mm/pgt_cache.h> 26 #include <mm/tee_pager.h> 27 #include <mm/vm.h> 28 #include <platform_config.h> 29 #include <stdalign.h> 30 #include <string.h> 31 #include <trace.h> 32 #include <util.h> 33 34 #ifndef DEBUG_XLAT_TABLE 35 #define DEBUG_XLAT_TABLE 0 36 #endif 37 38 #define SHM_VASPACE_SIZE (1024 * 1024 * 32) 39 40 /* Physical Secure DDR pool */ 41 tee_mm_pool_t tee_mm_sec_ddr; 42 43 /* Virtual memory pool for core mappings */ 44 tee_mm_pool_t core_virt_mem_pool; 45 46 /* Virtual memory pool for shared memory mappings */ 47 tee_mm_pool_t core_virt_shm_pool; 48 49 #ifdef CFG_CORE_PHYS_RELOCATABLE 50 unsigned long core_mmu_tee_load_pa __nex_bss; 51 #else 52 const unsigned long core_mmu_tee_load_pa = TEE_LOAD_ADDR; 53 #endif 54 55 /* 56 * These variables are initialized before .bss is cleared. To avoid 57 * resetting them when .bss is cleared we're storing them in .data instead, 58 * even if they initially are zero. 59 */ 60 61 #ifdef CFG_CORE_RESERVED_SHM 62 /* Default NSec shared memory allocated from NSec world */ 63 unsigned long default_nsec_shm_size __nex_bss; 64 unsigned long default_nsec_shm_paddr __nex_bss; 65 #endif 66 67 static struct tee_mmap_region static_mmap_regions[CFG_MMAP_REGIONS 68 #if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE) 69 + 1 70 #endif 71 + 1] __nex_bss; 72 static struct memory_map static_memory_map __nex_data = { 73 .map = static_mmap_regions, 74 .alloc_count = ARRAY_SIZE(static_mmap_regions), 75 }; 76 77 /* Define the platform's memory layout. */ 78 struct memaccess_area { 79 paddr_t paddr; 80 size_t size; 81 }; 82 83 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s } 84 85 static struct memaccess_area secure_only[] __nex_data = { 86 #ifdef CFG_CORE_PHYS_RELOCATABLE 87 MEMACCESS_AREA(0, 0), 88 #else 89 #ifdef TRUSTED_SRAM_BASE 90 MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE), 91 #endif 92 MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE), 93 #endif 94 }; 95 96 static struct memaccess_area nsec_shared[] __nex_data = { 97 #ifdef CFG_CORE_RESERVED_SHM 98 MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE), 99 #endif 100 }; 101 102 #if defined(CFG_SECURE_DATA_PATH) 103 static const char *tz_sdp_match = "linaro,secure-heap"; 104 static struct memaccess_area sec_sdp; 105 #ifdef CFG_TEE_SDP_MEM_BASE 106 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE); 107 #endif 108 #ifdef TEE_SDP_TEST_MEM_BASE 109 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE); 110 #endif 111 #endif 112 113 #ifdef CFG_CORE_RESERVED_SHM 114 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE); 115 #endif 116 static unsigned int mmu_spinlock; 117 118 static uint32_t mmu_lock(void) 119 { 120 return cpu_spin_lock_xsave(&mmu_spinlock); 121 } 122 123 static void mmu_unlock(uint32_t exceptions) 124 { 125 cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions); 126 } 127 128 static void grow_mem_map(struct memory_map *mem_map) 129 { 130 if (mem_map->count == mem_map->alloc_count) { 131 EMSG("Out of entries (%zu) in mem_map", mem_map->alloc_count); 132 panic(); 133 } 134 mem_map->count++; 135 } 136 137 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size) 138 { 139 /* 140 * The first range is always used to cover OP-TEE core memory, but 141 * depending on configuration it may cover more than that. 142 */ 143 *base = secure_only[0].paddr; 144 *size = secure_only[0].size; 145 } 146 147 void core_mmu_set_secure_memory(paddr_t base, size_t size) 148 { 149 #ifdef CFG_CORE_PHYS_RELOCATABLE 150 static_assert(ARRAY_SIZE(secure_only) == 1); 151 #endif 152 runtime_assert(IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)); 153 assert(!secure_only[0].size); 154 assert(base && size); 155 156 DMSG("Physical secure memory base %#"PRIxPA" size %#zx", base, size); 157 secure_only[0].paddr = base; 158 secure_only[0].size = size; 159 } 160 161 void core_mmu_get_ta_range(paddr_t *base, size_t *size) 162 { 163 paddr_t b = 0; 164 size_t s = 0; 165 166 static_assert(!(TEE_RAM_VA_SIZE % SMALL_PAGE_SIZE)); 167 #ifdef TA_RAM_START 168 b = TA_RAM_START; 169 s = TA_RAM_SIZE; 170 #else 171 static_assert(ARRAY_SIZE(secure_only) <= 2); 172 if (ARRAY_SIZE(secure_only) == 1) { 173 vaddr_t load_offs = 0; 174 175 assert(core_mmu_tee_load_pa >= secure_only[0].paddr); 176 load_offs = core_mmu_tee_load_pa - secure_only[0].paddr; 177 178 assert(secure_only[0].size > 179 load_offs + TEE_RAM_VA_SIZE + TEE_SDP_TEST_MEM_SIZE); 180 b = secure_only[0].paddr + load_offs + TEE_RAM_VA_SIZE; 181 s = secure_only[0].size - load_offs - TEE_RAM_VA_SIZE - 182 TEE_SDP_TEST_MEM_SIZE; 183 } else { 184 assert(secure_only[1].size > TEE_SDP_TEST_MEM_SIZE); 185 b = secure_only[1].paddr; 186 s = secure_only[1].size - TEE_SDP_TEST_MEM_SIZE; 187 } 188 #endif 189 if (base) 190 *base = b; 191 if (size) 192 *size = s; 193 } 194 195 static struct memory_map *get_memory_map(void) 196 { 197 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 198 struct memory_map *map = virt_get_memory_map(); 199 200 if (map) 201 return map; 202 } 203 204 return &static_memory_map; 205 } 206 207 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen, 208 paddr_t pa, size_t size) 209 { 210 size_t n; 211 212 for (n = 0; n < alen; n++) 213 if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size)) 214 return true; 215 return false; 216 } 217 218 #define pbuf_intersects(a, pa, size) \ 219 _pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size)) 220 221 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen, 222 paddr_t pa, size_t size) 223 { 224 size_t n; 225 226 for (n = 0; n < alen; n++) 227 if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size)) 228 return true; 229 return false; 230 } 231 232 #define pbuf_is_inside(a, pa, size) \ 233 _pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size)) 234 235 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len) 236 { 237 paddr_t end_pa = 0; 238 239 if (!map) 240 return false; 241 242 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 243 return false; 244 245 return (pa >= map->pa && end_pa <= map->pa + map->size - 1); 246 } 247 248 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va) 249 { 250 if (!map) 251 return false; 252 return (va >= map->va && va <= (map->va + map->size - 1)); 253 } 254 255 /* check if target buffer fits in a core default map area */ 256 static bool pbuf_inside_map_area(unsigned long p, size_t l, 257 struct tee_mmap_region *map) 258 { 259 return core_is_buffer_inside(p, l, map->pa, map->size); 260 } 261 262 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type) 263 { 264 struct memory_map *mem_map = get_memory_map(); 265 size_t n = 0; 266 267 for (n = 0; n < mem_map->count; n++) { 268 if (mem_map->map[n].type == type) 269 return mem_map->map + n; 270 } 271 return NULL; 272 } 273 274 static struct tee_mmap_region * 275 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len) 276 { 277 struct memory_map *mem_map = get_memory_map(); 278 size_t n = 0; 279 280 for (n = 0; n < mem_map->count; n++) { 281 if (mem_map->map[n].type != type) 282 continue; 283 if (pa_is_in_map(mem_map->map + n, pa, len)) 284 return mem_map->map + n; 285 } 286 return NULL; 287 } 288 289 static struct tee_mmap_region *find_map_by_va(void *va) 290 { 291 struct memory_map *mem_map = get_memory_map(); 292 vaddr_t a = (vaddr_t)va; 293 size_t n = 0; 294 295 for (n = 0; n < mem_map->count; n++) { 296 if (a >= mem_map->map[n].va && 297 a <= (mem_map->map[n].va - 1 + mem_map->map[n].size)) 298 return mem_map->map + n; 299 } 300 301 return NULL; 302 } 303 304 static struct tee_mmap_region *find_map_by_pa(unsigned long pa) 305 { 306 struct memory_map *mem_map = get_memory_map(); 307 size_t n = 0; 308 309 for (n = 0; n < mem_map->count; n++) { 310 /* Skip unmapped regions */ 311 if ((mem_map->map[n].attr & TEE_MATTR_VALID_BLOCK) && 312 pa >= mem_map->map[n].pa && 313 pa <= (mem_map->map[n].pa - 1 + mem_map->map[n].size)) 314 return mem_map->map + n; 315 } 316 317 return NULL; 318 } 319 320 #if defined(CFG_SECURE_DATA_PATH) 321 static bool dtb_get_sdp_region(void) 322 { 323 void *fdt = NULL; 324 int node = 0; 325 int tmp_node = 0; 326 paddr_t tmp_addr = 0; 327 size_t tmp_size = 0; 328 329 if (!IS_ENABLED(CFG_EMBED_DTB)) 330 return false; 331 332 fdt = get_embedded_dt(); 333 if (!fdt) 334 panic("No DTB found"); 335 336 node = fdt_node_offset_by_compatible(fdt, -1, tz_sdp_match); 337 if (node < 0) { 338 DMSG("No %s compatible node found", tz_sdp_match); 339 return false; 340 } 341 tmp_node = node; 342 while (tmp_node >= 0) { 343 tmp_node = fdt_node_offset_by_compatible(fdt, tmp_node, 344 tz_sdp_match); 345 if (tmp_node >= 0) 346 DMSG("Ignore SDP pool node %s, supports only 1 node", 347 fdt_get_name(fdt, tmp_node, NULL)); 348 } 349 350 tmp_addr = fdt_reg_base_address(fdt, node); 351 if (tmp_addr == DT_INFO_INVALID_REG) { 352 EMSG("%s: Unable to get base addr from DT", tz_sdp_match); 353 return false; 354 } 355 356 tmp_size = fdt_reg_size(fdt, node); 357 if (tmp_size == DT_INFO_INVALID_REG_SIZE) { 358 EMSG("%s: Unable to get size of base addr from DT", 359 tz_sdp_match); 360 return false; 361 } 362 363 sec_sdp.paddr = tmp_addr; 364 sec_sdp.size = tmp_size; 365 366 return true; 367 } 368 #endif 369 370 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH) 371 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len, 372 const struct core_mmu_phys_mem *start, 373 const struct core_mmu_phys_mem *end) 374 { 375 const struct core_mmu_phys_mem *mem; 376 377 for (mem = start; mem < end; mem++) { 378 if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size)) 379 return true; 380 } 381 382 return false; 383 } 384 #endif 385 386 #ifdef CFG_CORE_DYN_SHM 387 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems, 388 paddr_t pa, size_t size) 389 { 390 struct core_mmu_phys_mem *m = *mem; 391 size_t n = 0; 392 393 while (true) { 394 if (n >= *nelems) { 395 DMSG("No need to carve out %#" PRIxPA " size %#zx", 396 pa, size); 397 return; 398 } 399 if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size)) 400 break; 401 if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size)) 402 panic(); 403 n++; 404 } 405 406 if (pa == m[n].addr && size == m[n].size) { 407 /* Remove this entry */ 408 (*nelems)--; 409 memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n)); 410 m = nex_realloc(m, sizeof(*m) * *nelems); 411 if (!m) 412 panic(); 413 *mem = m; 414 } else if (pa == m[n].addr) { 415 m[n].addr += size; 416 m[n].size -= size; 417 } else if ((pa + size) == (m[n].addr + m[n].size)) { 418 m[n].size -= size; 419 } else { 420 /* Need to split the memory entry */ 421 m = nex_realloc(m, sizeof(*m) * (*nelems + 1)); 422 if (!m) 423 panic(); 424 *mem = m; 425 memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n)); 426 (*nelems)++; 427 m[n].size = pa - m[n].addr; 428 m[n + 1].size -= size + m[n].size; 429 m[n + 1].addr = pa + size; 430 } 431 } 432 433 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start, 434 size_t nelems, 435 struct tee_mmap_region *map) 436 { 437 size_t n; 438 439 for (n = 0; n < nelems; n++) { 440 if (!core_is_buffer_outside(start[n].addr, start[n].size, 441 map->pa, map->size)) { 442 EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ 443 ") overlaps map (type %d %#" PRIxPA ":%#zx)", 444 start[n].addr, start[n].size, 445 map->type, map->pa, map->size); 446 panic(); 447 } 448 } 449 } 450 451 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss; 452 static size_t discovered_nsec_ddr_nelems __nex_bss; 453 454 static int cmp_pmem_by_addr(const void *a, const void *b) 455 { 456 const struct core_mmu_phys_mem *pmem_a = a; 457 const struct core_mmu_phys_mem *pmem_b = b; 458 459 return CMP_TRILEAN(pmem_a->addr, pmem_b->addr); 460 } 461 462 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start, 463 size_t nelems) 464 { 465 struct core_mmu_phys_mem *m = start; 466 size_t num_elems = nelems; 467 struct memory_map *mem_map = &static_memory_map; 468 const struct core_mmu_phys_mem __maybe_unused *pmem; 469 size_t n = 0; 470 471 assert(!discovered_nsec_ddr_start); 472 assert(m && num_elems); 473 474 qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr); 475 476 /* 477 * Non-secure shared memory and also secure data 478 * path memory are supposed to reside inside 479 * non-secure memory. Since NSEC_SHM and SDP_MEM 480 * are used for a specific purpose make holes for 481 * those memory in the normal non-secure memory. 482 * 483 * This has to be done since for instance QEMU 484 * isn't aware of which memory range in the 485 * non-secure memory is used for NSEC_SHM. 486 */ 487 488 #ifdef CFG_SECURE_DATA_PATH 489 if (dtb_get_sdp_region()) 490 carve_out_phys_mem(&m, &num_elems, sec_sdp.paddr, sec_sdp.size); 491 492 for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++) 493 carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size); 494 #endif 495 496 for (n = 0; n < ARRAY_SIZE(secure_only); n++) 497 carve_out_phys_mem(&m, &num_elems, secure_only[n].paddr, 498 secure_only[n].size); 499 500 for (n = 0; n < mem_map->count; n++) { 501 switch (mem_map->map[n].type) { 502 case MEM_AREA_NSEC_SHM: 503 carve_out_phys_mem(&m, &num_elems, mem_map->map[n].pa, 504 mem_map->map[n].size); 505 break; 506 case MEM_AREA_EXT_DT: 507 case MEM_AREA_MANIFEST_DT: 508 case MEM_AREA_RAM_NSEC: 509 case MEM_AREA_RES_VASPACE: 510 case MEM_AREA_SHM_VASPACE: 511 case MEM_AREA_TS_VASPACE: 512 case MEM_AREA_PAGER_VASPACE: 513 break; 514 default: 515 check_phys_mem_is_outside(m, num_elems, 516 mem_map->map + n); 517 } 518 } 519 520 discovered_nsec_ddr_start = m; 521 discovered_nsec_ddr_nelems = num_elems; 522 523 if (!core_mmu_check_end_pa(m[num_elems - 1].addr, 524 m[num_elems - 1].size)) 525 panic(); 526 } 527 528 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start, 529 const struct core_mmu_phys_mem **end) 530 { 531 if (!discovered_nsec_ddr_start) 532 return false; 533 534 *start = discovered_nsec_ddr_start; 535 *end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems; 536 537 return true; 538 } 539 540 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len) 541 { 542 const struct core_mmu_phys_mem *start; 543 const struct core_mmu_phys_mem *end; 544 545 if (!get_discovered_nsec_ddr(&start, &end)) 546 return false; 547 548 return pbuf_is_special_mem(pbuf, len, start, end); 549 } 550 551 bool core_mmu_nsec_ddr_is_defined(void) 552 { 553 const struct core_mmu_phys_mem *start; 554 const struct core_mmu_phys_mem *end; 555 556 if (!get_discovered_nsec_ddr(&start, &end)) 557 return false; 558 559 return start != end; 560 } 561 #else 562 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused) 563 { 564 return false; 565 } 566 #endif /*CFG_CORE_DYN_SHM*/ 567 568 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \ 569 EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \ 570 pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2)) 571 572 #ifdef CFG_SECURE_DATA_PATH 573 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len) 574 { 575 bool is_sdp_mem = false; 576 577 if (sec_sdp.size) 578 is_sdp_mem = core_is_buffer_inside(pbuf, len, sec_sdp.paddr, 579 sec_sdp.size); 580 581 if (!is_sdp_mem) 582 is_sdp_mem = pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin, 583 phys_sdp_mem_end); 584 585 return is_sdp_mem; 586 } 587 588 static struct mobj *core_sdp_mem_alloc_mobj(paddr_t pa, size_t size) 589 { 590 struct mobj *mobj = mobj_phys_alloc(pa, size, TEE_MATTR_MEM_TYPE_CACHED, 591 CORE_MEM_SDP_MEM); 592 593 if (!mobj) 594 panic("can't create SDP physical memory object"); 595 596 return mobj; 597 } 598 599 struct mobj **core_sdp_mem_create_mobjs(void) 600 { 601 const struct core_mmu_phys_mem *mem = NULL; 602 struct mobj **mobj_base = NULL; 603 struct mobj **mobj = NULL; 604 int cnt = phys_sdp_mem_end - phys_sdp_mem_begin; 605 606 if (sec_sdp.size) 607 cnt++; 608 609 /* SDP mobjs table must end with a NULL entry */ 610 mobj_base = calloc(cnt + 1, sizeof(struct mobj *)); 611 if (!mobj_base) 612 panic("Out of memory"); 613 614 mobj = mobj_base; 615 616 for (mem = phys_sdp_mem_begin; mem < phys_sdp_mem_end; mem++, mobj++) 617 *mobj = core_sdp_mem_alloc_mobj(mem->addr, mem->size); 618 619 if (sec_sdp.size) 620 *mobj = core_sdp_mem_alloc_mobj(sec_sdp.paddr, sec_sdp.size); 621 622 return mobj_base; 623 } 624 625 #else /* CFG_SECURE_DATA_PATH */ 626 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused) 627 { 628 return false; 629 } 630 631 #endif /* CFG_SECURE_DATA_PATH */ 632 633 /* Check special memories comply with registered memories */ 634 static void verify_special_mem_areas(struct memory_map *mem_map, 635 const struct core_mmu_phys_mem *start, 636 const struct core_mmu_phys_mem *end, 637 const char *area_name __maybe_unused) 638 { 639 const struct core_mmu_phys_mem *mem = NULL; 640 const struct core_mmu_phys_mem *mem2 = NULL; 641 size_t n = 0; 642 643 if (start == end) { 644 DMSG("No %s memory area defined", area_name); 645 return; 646 } 647 648 for (mem = start; mem < end; mem++) 649 DMSG("%s memory [%" PRIxPA " %" PRIx64 "]", 650 area_name, mem->addr, (uint64_t)mem->addr + mem->size); 651 652 /* Check memories do not intersect each other */ 653 for (mem = start; mem + 1 < end; mem++) { 654 for (mem2 = mem + 1; mem2 < end; mem2++) { 655 if (core_is_buffer_intersect(mem2->addr, mem2->size, 656 mem->addr, mem->size)) { 657 MSG_MEM_INSTERSECT(mem2->addr, mem2->size, 658 mem->addr, mem->size); 659 panic("Special memory intersection"); 660 } 661 } 662 } 663 664 /* 665 * Check memories do not intersect any mapped memory. 666 * This is called before reserved VA space is loaded in mem_map. 667 */ 668 for (mem = start; mem < end; mem++) { 669 for (n = 0; n < mem_map->count; n++) { 670 if (core_is_buffer_intersect(mem->addr, mem->size, 671 mem_map->map[n].pa, 672 mem_map->map[n].size)) { 673 MSG_MEM_INSTERSECT(mem->addr, mem->size, 674 mem_map->map[n].pa, 675 mem_map->map[n].size); 676 panic("Special memory intersection"); 677 } 678 } 679 } 680 } 681 682 static void add_phys_mem(struct memory_map *mem_map, 683 const char *mem_name __maybe_unused, 684 enum teecore_memtypes mem_type, 685 paddr_t mem_addr, paddr_size_t mem_size) 686 { 687 size_t n = 0; 688 paddr_t pa = 0; 689 paddr_size_t size = 0; 690 691 if (!mem_size) /* Discard null size entries */ 692 return; 693 694 /* 695 * If some ranges of memory of the same type do overlap 696 * each others they are coalesced into one entry. To help this 697 * added entries are sorted by increasing physical. 698 * 699 * Note that it's valid to have the same physical memory as several 700 * different memory types, for instance the same device memory 701 * mapped as both secure and non-secure. This will probably not 702 * happen often in practice. 703 */ 704 DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ, 705 mem_name, teecore_memtype_name(mem_type), mem_addr, mem_size); 706 for (n = 0; n < mem_map->count; n++) { 707 pa = mem_map->map[n].pa; 708 size = mem_map->map[n].size; 709 if (mem_type == mem_map->map[n].type && 710 ((pa <= (mem_addr + (mem_size - 1))) && 711 (mem_addr <= (pa + (size - 1))))) { 712 DMSG("Physical mem map overlaps 0x%" PRIxPA, mem_addr); 713 mem_map->map[n].pa = MIN(pa, mem_addr); 714 mem_map->map[n].size = MAX(size, mem_size) + 715 (pa - mem_map->map[n].pa); 716 return; 717 } 718 if (mem_type < mem_map->map[n].type || 719 (mem_type == mem_map->map[n].type && mem_addr < pa)) 720 break; /* found the spot where to insert this memory */ 721 } 722 723 grow_mem_map(mem_map); 724 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), 725 n, NULL); 726 mem_map->map[n] = (struct tee_mmap_region){ 727 .type = mem_type, 728 .pa = mem_addr, 729 .size = mem_size, 730 }; 731 } 732 733 static void add_va_space(struct memory_map *mem_map, 734 enum teecore_memtypes type, size_t size) 735 { 736 size_t n = 0; 737 738 DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size); 739 for (n = 0; n < mem_map->count; n++) { 740 if (type < mem_map->map[n].type) 741 break; 742 } 743 744 grow_mem_map(mem_map); 745 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), 746 n, NULL); 747 mem_map->map[n] = (struct tee_mmap_region){ 748 .type = type, 749 .size = size, 750 }; 751 } 752 753 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t) 754 { 755 const uint32_t attr = TEE_MATTR_VALID_BLOCK; 756 const uint32_t tagged = TEE_MATTR_MEM_TYPE_TAGGED << 757 TEE_MATTR_MEM_TYPE_SHIFT; 758 const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED << 759 TEE_MATTR_MEM_TYPE_SHIFT; 760 const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV << 761 TEE_MATTR_MEM_TYPE_SHIFT; 762 763 switch (t) { 764 case MEM_AREA_TEE_RAM: 765 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | tagged; 766 case MEM_AREA_TEE_RAM_RX: 767 case MEM_AREA_INIT_RAM_RX: 768 case MEM_AREA_IDENTITY_MAP_RX: 769 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | tagged; 770 case MEM_AREA_TEE_RAM_RO: 771 case MEM_AREA_INIT_RAM_RO: 772 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | tagged; 773 case MEM_AREA_TEE_RAM_RW: 774 case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */ 775 case MEM_AREA_NEX_RAM_RW: 776 case MEM_AREA_TEE_ASAN: 777 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; 778 case MEM_AREA_TEE_COHERENT: 779 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache; 780 case MEM_AREA_TA_RAM: 781 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; 782 case MEM_AREA_NSEC_SHM: 783 case MEM_AREA_NEX_NSEC_SHM: 784 return attr | TEE_MATTR_PRW | cached; 785 case MEM_AREA_MANIFEST_DT: 786 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached; 787 case MEM_AREA_TRANSFER_LIST: 788 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; 789 case MEM_AREA_EXT_DT: 790 /* 791 * If CFG_MAP_EXT_DT_SECURE is enabled map the external device 792 * tree as secure non-cached memory, otherwise, fall back to 793 * non-secure mapping. 794 */ 795 if (IS_ENABLED(CFG_MAP_EXT_DT_SECURE)) 796 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | 797 noncache; 798 fallthrough; 799 case MEM_AREA_IO_NSEC: 800 return attr | TEE_MATTR_PRW | noncache; 801 case MEM_AREA_IO_SEC: 802 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache; 803 case MEM_AREA_RAM_NSEC: 804 return attr | TEE_MATTR_PRW | cached; 805 case MEM_AREA_RAM_SEC: 806 case MEM_AREA_SEC_RAM_OVERALL: 807 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; 808 case MEM_AREA_ROM_SEC: 809 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached; 810 case MEM_AREA_RES_VASPACE: 811 case MEM_AREA_SHM_VASPACE: 812 return 0; 813 case MEM_AREA_PAGER_VASPACE: 814 return TEE_MATTR_SECURE; 815 default: 816 panic("invalid type"); 817 } 818 } 819 820 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm) 821 { 822 switch (mm->type) { 823 case MEM_AREA_TEE_RAM: 824 case MEM_AREA_TEE_RAM_RX: 825 case MEM_AREA_TEE_RAM_RO: 826 case MEM_AREA_TEE_RAM_RW: 827 case MEM_AREA_INIT_RAM_RX: 828 case MEM_AREA_INIT_RAM_RO: 829 case MEM_AREA_NEX_RAM_RW: 830 case MEM_AREA_NEX_RAM_RO: 831 case MEM_AREA_TEE_ASAN: 832 return true; 833 default: 834 return false; 835 } 836 } 837 838 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm) 839 { 840 return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE); 841 } 842 843 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm) 844 { 845 return mm->region_size == CORE_MMU_PGDIR_SIZE; 846 } 847 848 static int cmp_mmap_by_lower_va(const void *a, const void *b) 849 { 850 const struct tee_mmap_region *mm_a = a; 851 const struct tee_mmap_region *mm_b = b; 852 853 return CMP_TRILEAN(mm_a->va, mm_b->va); 854 } 855 856 static void dump_mmap_table(struct memory_map *mem_map) 857 { 858 size_t n = 0; 859 860 for (n = 0; n < mem_map->count; n++) { 861 struct tee_mmap_region *map = mem_map->map + n; 862 vaddr_t __maybe_unused vstart; 863 864 vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1)); 865 DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA 866 " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)", 867 teecore_memtype_name(map->type), vstart, 868 vstart + map->size - 1, map->pa, 869 (paddr_t)(map->pa + map->size - 1), map->size, 870 map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir"); 871 } 872 } 873 874 #if DEBUG_XLAT_TABLE 875 876 static void dump_xlat_table(vaddr_t va, unsigned int level) 877 { 878 struct core_mmu_table_info tbl_info; 879 unsigned int idx = 0; 880 paddr_t pa; 881 uint32_t attr; 882 883 core_mmu_find_table(NULL, va, level, &tbl_info); 884 va = tbl_info.va_base; 885 for (idx = 0; idx < tbl_info.num_entries; idx++) { 886 core_mmu_get_entry(&tbl_info, idx, &pa, &attr); 887 if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) { 888 const char *security_bit = ""; 889 890 if (core_mmu_entry_have_security_bit(attr)) { 891 if (attr & TEE_MATTR_SECURE) 892 security_bit = "S"; 893 else 894 security_bit = "NS"; 895 } 896 897 if (attr & TEE_MATTR_TABLE) { 898 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 899 " TBL:0x%010" PRIxPA " %s", 900 level * 2, "", level, va, pa, 901 security_bit); 902 dump_xlat_table(va, level + 1); 903 } else if (attr) { 904 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 905 " PA:0x%010" PRIxPA " %s-%s-%s-%s", 906 level * 2, "", level, va, pa, 907 mattr_is_cached(attr) ? "MEM" : 908 "DEV", 909 attr & TEE_MATTR_PW ? "RW" : "RO", 910 attr & TEE_MATTR_PX ? "X " : "XN", 911 security_bit); 912 } else { 913 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 914 " INVALID\n", 915 level * 2, "", level, va); 916 } 917 } 918 va += BIT64(tbl_info.shift); 919 } 920 } 921 922 #else 923 924 static void dump_xlat_table(vaddr_t va __unused, int level __unused) 925 { 926 } 927 928 #endif 929 930 /* 931 * Reserves virtual memory space for pager usage. 932 * 933 * From the start of the first memory used by the link script + 934 * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty 935 * mapping for pager usage. This adds translation tables as needed for the 936 * pager to operate. 937 */ 938 static void add_pager_vaspace(struct memory_map *mem_map) 939 { 940 paddr_t begin = 0; 941 paddr_t end = 0; 942 size_t size = 0; 943 size_t pos = 0; 944 size_t n = 0; 945 946 947 for (n = 0; n < mem_map->count; n++) { 948 if (map_is_tee_ram(mem_map->map + n)) { 949 if (!begin) 950 begin = mem_map->map[n].pa; 951 pos = n + 1; 952 } 953 } 954 955 end = mem_map->map[pos - 1].pa + mem_map->map[pos - 1].size; 956 assert(end - begin < TEE_RAM_VA_SIZE); 957 size = TEE_RAM_VA_SIZE - (end - begin); 958 959 grow_mem_map(mem_map); 960 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), 961 n, NULL); 962 mem_map->map[n] = (struct tee_mmap_region){ 963 .type = MEM_AREA_PAGER_VASPACE, 964 .size = size, 965 .region_size = SMALL_PAGE_SIZE, 966 .attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE), 967 }; 968 } 969 970 static void check_sec_nsec_mem_config(void) 971 { 972 size_t n = 0; 973 974 for (n = 0; n < ARRAY_SIZE(secure_only); n++) { 975 if (pbuf_intersects(nsec_shared, secure_only[n].paddr, 976 secure_only[n].size)) 977 panic("Invalid memory access config: sec/nsec"); 978 } 979 } 980 981 static void collect_device_mem_ranges(struct memory_map *mem_map) 982 { 983 const char *compatible = "arm,ffa-manifest-device-regions"; 984 void *fdt = get_manifest_dt(); 985 const char *name = NULL; 986 uint64_t page_count = 0; 987 uint64_t base = 0; 988 int subnode = 0; 989 int node = 0; 990 991 assert(fdt); 992 993 node = fdt_node_offset_by_compatible(fdt, 0, compatible); 994 if (node < 0) 995 return; 996 997 fdt_for_each_subnode(subnode, fdt, node) { 998 name = fdt_get_name(fdt, subnode, NULL); 999 if (!name) 1000 continue; 1001 1002 if (dt_getprop_as_number(fdt, subnode, "base-address", 1003 &base)) { 1004 EMSG("Mandatory field is missing: base-address"); 1005 continue; 1006 } 1007 1008 if (base & SMALL_PAGE_MASK) { 1009 EMSG("base-address is not page aligned"); 1010 continue; 1011 } 1012 1013 if (dt_getprop_as_number(fdt, subnode, "pages-count", 1014 &page_count)) { 1015 EMSG("Mandatory field is missing: pages-count"); 1016 continue; 1017 } 1018 1019 add_phys_mem(mem_map, name, MEM_AREA_IO_SEC, 1020 base, base + page_count * SMALL_PAGE_SIZE); 1021 } 1022 } 1023 1024 static void collect_mem_ranges(struct memory_map *mem_map) 1025 { 1026 const struct core_mmu_phys_mem *mem = NULL; 1027 vaddr_t ram_start = secure_only[0].paddr; 1028 1029 #define ADD_PHYS_MEM(_type, _addr, _size) \ 1030 add_phys_mem(mem_map, #_addr, (_type), (_addr), (_size)) 1031 1032 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) { 1033 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, ram_start, 1034 VCORE_UNPG_RX_PA - ram_start); 1035 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA, 1036 VCORE_UNPG_RX_SZ); 1037 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA, 1038 VCORE_UNPG_RO_SZ); 1039 1040 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1041 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA, 1042 VCORE_UNPG_RW_SZ); 1043 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA, 1044 VCORE_NEX_RW_SZ); 1045 } else { 1046 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA, 1047 VCORE_UNPG_RW_SZ); 1048 } 1049 1050 if (IS_ENABLED(CFG_WITH_PAGER)) { 1051 ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA, 1052 VCORE_INIT_RX_SZ); 1053 ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA, 1054 VCORE_INIT_RO_SZ); 1055 } 1056 } else { 1057 ADD_PHYS_MEM(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE); 1058 } 1059 1060 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1061 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, TRUSTED_DRAM_BASE, 1062 TRUSTED_DRAM_SIZE); 1063 } else { 1064 /* 1065 * Every guest will have own TA RAM if virtualization 1066 * support is enabled. 1067 */ 1068 paddr_t ta_base = 0; 1069 size_t ta_size = 0; 1070 1071 core_mmu_get_ta_range(&ta_base, &ta_size); 1072 ADD_PHYS_MEM(MEM_AREA_TA_RAM, ta_base, ta_size); 1073 } 1074 1075 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS) && 1076 IS_ENABLED(CFG_WITH_PAGER)) { 1077 /* 1078 * Asan ram is part of MEM_AREA_TEE_RAM_RW when pager is 1079 * disabled. 1080 */ 1081 ADD_PHYS_MEM(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ); 1082 } 1083 1084 #undef ADD_PHYS_MEM 1085 1086 /* Collect device memory info from SP manifest */ 1087 if (IS_ENABLED(CFG_CORE_SEL2_SPMC)) 1088 collect_device_mem_ranges(mem_map); 1089 1090 for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) { 1091 /* Only unmapped virtual range may have a null phys addr */ 1092 assert(mem->addr || !core_mmu_type_to_attr(mem->type)); 1093 1094 add_phys_mem(mem_map, mem->name, mem->type, 1095 mem->addr, mem->size); 1096 } 1097 1098 if (IS_ENABLED(CFG_SECURE_DATA_PATH)) 1099 verify_special_mem_areas(mem_map, phys_sdp_mem_begin, 1100 phys_sdp_mem_end, "SDP"); 1101 1102 add_va_space(mem_map, MEM_AREA_RES_VASPACE, CFG_RESERVED_VASPACE_SIZE); 1103 add_va_space(mem_map, MEM_AREA_SHM_VASPACE, SHM_VASPACE_SIZE); 1104 } 1105 1106 static void assign_mem_granularity(struct memory_map *mem_map) 1107 { 1108 size_t n = 0; 1109 1110 /* 1111 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses 1112 * SMALL_PAGE_SIZE. 1113 */ 1114 for (n = 0; n < mem_map->count; n++) { 1115 paddr_t mask = mem_map->map[n].pa | mem_map->map[n].size; 1116 1117 if (!(mask & CORE_MMU_PGDIR_MASK)) 1118 mem_map->map[n].region_size = CORE_MMU_PGDIR_SIZE; 1119 else if (!(mask & SMALL_PAGE_MASK)) 1120 mem_map->map[n].region_size = SMALL_PAGE_SIZE; 1121 else 1122 panic("Impossible memory alignment"); 1123 1124 if (map_is_tee_ram(mem_map->map + n)) 1125 mem_map->map[n].region_size = SMALL_PAGE_SIZE; 1126 } 1127 } 1128 1129 static bool place_tee_ram_at_top(paddr_t paddr) 1130 { 1131 return paddr > BIT64(core_mmu_get_va_width()) / 2; 1132 } 1133 1134 /* 1135 * MMU arch driver shall override this function if it helps 1136 * optimizing the memory footprint of the address translation tables. 1137 */ 1138 bool __weak core_mmu_prefer_tee_ram_at_top(paddr_t paddr) 1139 { 1140 return place_tee_ram_at_top(paddr); 1141 } 1142 1143 static bool assign_mem_va_dir(vaddr_t tee_ram_va, struct memory_map *mem_map, 1144 bool tee_ram_at_top) 1145 { 1146 struct tee_mmap_region *map = NULL; 1147 vaddr_t va = 0; 1148 bool va_is_secure = true; 1149 size_t n = 0; 1150 1151 /* 1152 * tee_ram_va might equals 0 when CFG_CORE_ASLR=y. 1153 * 0 is by design an invalid va, so return false directly. 1154 */ 1155 if (!tee_ram_va) 1156 return false; 1157 1158 /* Clear eventual previous assignments */ 1159 for (n = 0; n < mem_map->count; n++) 1160 mem_map->map[n].va = 0; 1161 1162 /* 1163 * TEE RAM regions are always aligned with region_size. 1164 * 1165 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here 1166 * since it handles virtual memory which covers the part of the ELF 1167 * that cannot fit directly into memory. 1168 */ 1169 va = tee_ram_va; 1170 for (n = 0; n < mem_map->count; n++) { 1171 map = mem_map->map + n; 1172 if (map_is_tee_ram(map) || 1173 map->type == MEM_AREA_PAGER_VASPACE) { 1174 assert(!(va & (map->region_size - 1))); 1175 assert(!(map->size & (map->region_size - 1))); 1176 map->va = va; 1177 if (ADD_OVERFLOW(va, map->size, &va)) 1178 return false; 1179 if (va >= BIT64(core_mmu_get_va_width())) 1180 return false; 1181 } 1182 } 1183 1184 if (tee_ram_at_top) { 1185 /* 1186 * Map non-tee ram regions at addresses lower than the tee 1187 * ram region. 1188 */ 1189 va = tee_ram_va; 1190 for (n = 0; n < mem_map->count; n++) { 1191 map = mem_map->map + n; 1192 map->attr = core_mmu_type_to_attr(map->type); 1193 if (map->va) 1194 continue; 1195 1196 if (!IS_ENABLED(CFG_WITH_LPAE) && 1197 va_is_secure != map_is_secure(map)) { 1198 va_is_secure = !va_is_secure; 1199 va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE); 1200 } 1201 1202 if (SUB_OVERFLOW(va, map->size, &va)) 1203 return false; 1204 va = ROUNDDOWN(va, map->region_size); 1205 /* 1206 * Make sure that va is aligned with pa for 1207 * efficient pgdir mapping. Basically pa & 1208 * pgdir_mask should be == va & pgdir_mask 1209 */ 1210 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 1211 if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va)) 1212 return false; 1213 va += (map->pa - va) & CORE_MMU_PGDIR_MASK; 1214 } 1215 map->va = va; 1216 } 1217 } else { 1218 /* 1219 * Map non-tee ram regions at addresses higher than the tee 1220 * ram region. 1221 */ 1222 for (n = 0; n < mem_map->count; n++) { 1223 map = mem_map->map + n; 1224 map->attr = core_mmu_type_to_attr(map->type); 1225 if (map->va) 1226 continue; 1227 1228 if (!IS_ENABLED(CFG_WITH_LPAE) && 1229 va_is_secure != map_is_secure(map)) { 1230 va_is_secure = !va_is_secure; 1231 if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, 1232 &va)) 1233 return false; 1234 } 1235 1236 if (ROUNDUP_OVERFLOW(va, map->region_size, &va)) 1237 return false; 1238 /* 1239 * Make sure that va is aligned with pa for 1240 * efficient pgdir mapping. Basically pa & 1241 * pgdir_mask should be == va & pgdir_mask 1242 */ 1243 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 1244 vaddr_t offs = (map->pa - va) & 1245 CORE_MMU_PGDIR_MASK; 1246 1247 if (ADD_OVERFLOW(va, offs, &va)) 1248 return false; 1249 } 1250 1251 map->va = va; 1252 if (ADD_OVERFLOW(va, map->size, &va)) 1253 return false; 1254 if (va >= BIT64(core_mmu_get_va_width())) 1255 return false; 1256 } 1257 } 1258 1259 return true; 1260 } 1261 1262 static bool assign_mem_va(vaddr_t tee_ram_va, struct memory_map *mem_map) 1263 { 1264 bool tee_ram_at_top = place_tee_ram_at_top(tee_ram_va); 1265 1266 /* 1267 * Check that we're not overlapping with the user VA range. 1268 */ 1269 if (IS_ENABLED(CFG_WITH_LPAE)) { 1270 /* 1271 * User VA range is supposed to be defined after these 1272 * mappings have been established. 1273 */ 1274 assert(!core_mmu_user_va_range_is_defined()); 1275 } else { 1276 vaddr_t user_va_base = 0; 1277 size_t user_va_size = 0; 1278 1279 assert(core_mmu_user_va_range_is_defined()); 1280 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 1281 if (tee_ram_va < (user_va_base + user_va_size)) 1282 return false; 1283 } 1284 1285 if (IS_ENABLED(CFG_WITH_PAGER)) { 1286 bool prefered_dir = core_mmu_prefer_tee_ram_at_top(tee_ram_va); 1287 1288 /* Try whole mapping covered by a single base xlat entry */ 1289 if (prefered_dir != tee_ram_at_top && 1290 assign_mem_va_dir(tee_ram_va, mem_map, prefered_dir)) 1291 return true; 1292 } 1293 1294 return assign_mem_va_dir(tee_ram_va, mem_map, tee_ram_at_top); 1295 } 1296 1297 static int cmp_init_mem_map(const void *a, const void *b) 1298 { 1299 const struct tee_mmap_region *mm_a = a; 1300 const struct tee_mmap_region *mm_b = b; 1301 int rc = 0; 1302 1303 rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size); 1304 if (!rc) 1305 rc = CMP_TRILEAN(mm_a->pa, mm_b->pa); 1306 /* 1307 * 32bit MMU descriptors cannot mix secure and non-secure mapping in 1308 * the same level2 table. Hence sort secure mapping from non-secure 1309 * mapping. 1310 */ 1311 if (!rc && !IS_ENABLED(CFG_WITH_LPAE)) 1312 rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b)); 1313 1314 return rc; 1315 } 1316 1317 static bool mem_map_add_id_map(struct memory_map *mem_map, 1318 vaddr_t id_map_start, vaddr_t id_map_end) 1319 { 1320 vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE); 1321 vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE); 1322 size_t len = end - start; 1323 size_t n = 0; 1324 1325 1326 for (n = 0; n < mem_map->count; n++) 1327 if (core_is_buffer_intersect(mem_map->map[n].va, 1328 mem_map->map[n].size, start, len)) 1329 return false; 1330 1331 grow_mem_map(mem_map); 1332 mem_map->map[mem_map->count - 1] = (struct tee_mmap_region){ 1333 .type = MEM_AREA_IDENTITY_MAP_RX, 1334 /* 1335 * Could use CORE_MMU_PGDIR_SIZE to potentially save a 1336 * translation table, at the increased risk of clashes with 1337 * the rest of the memory map. 1338 */ 1339 .region_size = SMALL_PAGE_SIZE, 1340 .pa = start, 1341 .va = start, 1342 .size = len, 1343 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1344 }; 1345 1346 return true; 1347 } 1348 1349 static struct memory_map *init_mem_map(struct memory_map *mem_map, 1350 unsigned long seed, 1351 unsigned long *ret_offs) 1352 { 1353 /* 1354 * @id_map_start and @id_map_end describes a physical memory range 1355 * that must be mapped Read-Only eXecutable at identical virtual 1356 * addresses. 1357 */ 1358 vaddr_t id_map_start = (vaddr_t)__identity_map_init_start; 1359 vaddr_t id_map_end = (vaddr_t)__identity_map_init_end; 1360 vaddr_t start_addr = secure_only[0].paddr; 1361 unsigned long offs = 0; 1362 1363 collect_mem_ranges(mem_map); 1364 assign_mem_granularity(mem_map); 1365 1366 /* 1367 * To ease mapping and lower use of xlat tables, sort mapping 1368 * description moving small-page regions after the pgdir regions. 1369 */ 1370 qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region), 1371 cmp_init_mem_map); 1372 1373 if (IS_ENABLED(CFG_WITH_PAGER)) 1374 add_pager_vaspace(mem_map); 1375 1376 if (IS_ENABLED(CFG_CORE_ASLR) && seed) { 1377 vaddr_t base_addr = start_addr + seed; 1378 const unsigned int va_width = core_mmu_get_va_width(); 1379 const vaddr_t va_mask = GENMASK_64(va_width - 1, 1380 SMALL_PAGE_SHIFT); 1381 vaddr_t ba = base_addr; 1382 size_t n = 0; 1383 1384 for (n = 0; n < 3; n++) { 1385 if (n) 1386 ba = base_addr ^ BIT64(va_width - n); 1387 ba &= va_mask; 1388 if (assign_mem_va(ba, mem_map) && 1389 mem_map_add_id_map(mem_map, id_map_start, 1390 id_map_end)) { 1391 offs = ba - start_addr; 1392 DMSG("Mapping core at %#"PRIxVA" offs %#lx", 1393 ba, offs); 1394 goto out; 1395 } else { 1396 DMSG("Failed to map core at %#"PRIxVA, ba); 1397 } 1398 } 1399 EMSG("Failed to map core with seed %#lx", seed); 1400 } 1401 1402 if (!assign_mem_va(start_addr, mem_map)) 1403 panic(); 1404 1405 out: 1406 qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region), 1407 cmp_mmap_by_lower_va); 1408 1409 dump_mmap_table(mem_map); 1410 1411 *ret_offs = offs; 1412 return mem_map; 1413 } 1414 1415 static void check_mem_map(struct memory_map *mem_map) 1416 { 1417 struct tee_mmap_region *m = NULL; 1418 size_t n = 0; 1419 1420 for (n = 0; n < mem_map->count; n++) { 1421 m = mem_map->map + n; 1422 switch (m->type) { 1423 case MEM_AREA_TEE_RAM: 1424 case MEM_AREA_TEE_RAM_RX: 1425 case MEM_AREA_TEE_RAM_RO: 1426 case MEM_AREA_TEE_RAM_RW: 1427 case MEM_AREA_INIT_RAM_RX: 1428 case MEM_AREA_INIT_RAM_RO: 1429 case MEM_AREA_NEX_RAM_RW: 1430 case MEM_AREA_NEX_RAM_RO: 1431 case MEM_AREA_IDENTITY_MAP_RX: 1432 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1433 panic("TEE_RAM can't fit in secure_only"); 1434 break; 1435 case MEM_AREA_TA_RAM: 1436 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1437 panic("TA_RAM can't fit in secure_only"); 1438 break; 1439 case MEM_AREA_NSEC_SHM: 1440 if (!pbuf_is_inside(nsec_shared, m->pa, m->size)) 1441 panic("NS_SHM can't fit in nsec_shared"); 1442 break; 1443 case MEM_AREA_SEC_RAM_OVERALL: 1444 case MEM_AREA_TEE_COHERENT: 1445 case MEM_AREA_TEE_ASAN: 1446 case MEM_AREA_IO_SEC: 1447 case MEM_AREA_IO_NSEC: 1448 case MEM_AREA_EXT_DT: 1449 case MEM_AREA_MANIFEST_DT: 1450 case MEM_AREA_TRANSFER_LIST: 1451 case MEM_AREA_RAM_SEC: 1452 case MEM_AREA_RAM_NSEC: 1453 case MEM_AREA_ROM_SEC: 1454 case MEM_AREA_RES_VASPACE: 1455 case MEM_AREA_SHM_VASPACE: 1456 case MEM_AREA_PAGER_VASPACE: 1457 break; 1458 default: 1459 EMSG("Uhandled memtype %d", m->type); 1460 panic(); 1461 } 1462 } 1463 } 1464 1465 /* 1466 * core_init_mmu_map() - init tee core default memory mapping 1467 * 1468 * This routine sets the static default TEE core mapping. If @seed is > 0 1469 * and configured with CFG_CORE_ASLR it will map tee core at a location 1470 * based on the seed and return the offset from the link address. 1471 * 1472 * If an error happened: core_init_mmu_map is expected to panic. 1473 * 1474 * Note: this function is weak just to make it possible to exclude it from 1475 * the unpaged area. 1476 */ 1477 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg) 1478 { 1479 #ifndef CFG_NS_VIRTUALIZATION 1480 vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE); 1481 #else 1482 vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start, 1483 SMALL_PAGE_SIZE); 1484 #endif 1485 vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start; 1486 struct tee_mmap_region tmp_mmap_region = { }; 1487 struct memory_map mem_map = { }; 1488 unsigned long offs = 0; 1489 1490 if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) && 1491 (core_mmu_tee_load_pa & SMALL_PAGE_MASK)) 1492 panic("OP-TEE load address is not page aligned"); 1493 1494 check_sec_nsec_mem_config(); 1495 1496 mem_map = static_memory_map; 1497 static_memory_map = (struct memory_map){ 1498 .map = &tmp_mmap_region, 1499 .alloc_count = 1, 1500 .count = 1, 1501 }; 1502 /* 1503 * Add a entry covering the translation tables which will be 1504 * involved in some virt_to_phys() and phys_to_virt() conversions. 1505 */ 1506 static_memory_map.map[0] = (struct tee_mmap_region){ 1507 .type = MEM_AREA_TEE_RAM, 1508 .region_size = SMALL_PAGE_SIZE, 1509 .pa = start, 1510 .va = start, 1511 .size = len, 1512 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1513 }; 1514 1515 init_mem_map(&mem_map, seed, &offs); 1516 1517 check_mem_map(&mem_map); 1518 core_init_mmu(&mem_map); 1519 dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL); 1520 core_init_mmu_regs(cfg); 1521 cfg->map_offset = offs; 1522 static_memory_map = mem_map; 1523 } 1524 1525 bool core_mmu_mattr_is_ok(uint32_t mattr) 1526 { 1527 /* 1528 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and 1529 * core_mmu_v7.c:mattr_to_texcb 1530 */ 1531 1532 switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) { 1533 case TEE_MATTR_MEM_TYPE_DEV: 1534 case TEE_MATTR_MEM_TYPE_STRONGLY_O: 1535 case TEE_MATTR_MEM_TYPE_CACHED: 1536 case TEE_MATTR_MEM_TYPE_TAGGED: 1537 return true; 1538 default: 1539 return false; 1540 } 1541 } 1542 1543 /* 1544 * test attributes of target physical buffer 1545 * 1546 * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT). 1547 * 1548 */ 1549 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len) 1550 { 1551 paddr_t ta_base = 0; 1552 size_t ta_size = 0; 1553 struct tee_mmap_region *map; 1554 1555 /* Empty buffers complies with anything */ 1556 if (len == 0) 1557 return true; 1558 1559 switch (attr) { 1560 case CORE_MEM_SEC: 1561 return pbuf_is_inside(secure_only, pbuf, len); 1562 case CORE_MEM_NON_SEC: 1563 return pbuf_is_inside(nsec_shared, pbuf, len) || 1564 pbuf_is_nsec_ddr(pbuf, len); 1565 case CORE_MEM_TEE_RAM: 1566 return core_is_buffer_inside(pbuf, len, TEE_RAM_START, 1567 TEE_RAM_PH_SIZE); 1568 case CORE_MEM_TA_RAM: 1569 core_mmu_get_ta_range(&ta_base, &ta_size); 1570 return core_is_buffer_inside(pbuf, len, ta_base, ta_size); 1571 #ifdef CFG_CORE_RESERVED_SHM 1572 case CORE_MEM_NSEC_SHM: 1573 return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START, 1574 TEE_SHMEM_SIZE); 1575 #endif 1576 case CORE_MEM_SDP_MEM: 1577 return pbuf_is_sdp_mem(pbuf, len); 1578 case CORE_MEM_CACHED: 1579 map = find_map_by_pa(pbuf); 1580 if (!map || !pbuf_inside_map_area(pbuf, len, map)) 1581 return false; 1582 return mattr_is_cached(map->attr); 1583 default: 1584 return false; 1585 } 1586 } 1587 1588 /* test attributes of target virtual buffer (in core mapping) */ 1589 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len) 1590 { 1591 paddr_t p; 1592 1593 /* Empty buffers complies with anything */ 1594 if (len == 0) 1595 return true; 1596 1597 p = virt_to_phys((void *)vbuf); 1598 if (!p) 1599 return false; 1600 1601 return core_pbuf_is(attr, p, len); 1602 } 1603 1604 /* core_va2pa - teecore exported service */ 1605 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa) 1606 { 1607 struct tee_mmap_region *map; 1608 1609 map = find_map_by_va(va); 1610 if (!va_is_in_map(map, (vaddr_t)va)) 1611 return -1; 1612 1613 /* 1614 * We can calculate PA for static map. Virtual address ranges 1615 * reserved to core dynamic mapping return a 'match' (return 0;) 1616 * together with an invalid null physical address. 1617 */ 1618 if (map->pa) 1619 *pa = map->pa + (vaddr_t)va - map->va; 1620 else 1621 *pa = 0; 1622 1623 return 0; 1624 } 1625 1626 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len) 1627 { 1628 if (!pa_is_in_map(map, pa, len)) 1629 return NULL; 1630 1631 return (void *)(vaddr_t)(map->va + pa - map->pa); 1632 } 1633 1634 /* 1635 * teecore gets some memory area definitions 1636 */ 1637 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s, 1638 vaddr_t *e) 1639 { 1640 struct tee_mmap_region *map = find_map_by_type(type); 1641 1642 if (map) { 1643 *s = map->va; 1644 *e = map->va + map->size; 1645 } else { 1646 *s = 0; 1647 *e = 0; 1648 } 1649 } 1650 1651 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa) 1652 { 1653 struct tee_mmap_region *map = find_map_by_pa(pa); 1654 1655 if (!map) 1656 return MEM_AREA_MAXTYPE; 1657 return map->type; 1658 } 1659 1660 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1661 paddr_t pa, uint32_t attr) 1662 { 1663 assert(idx < tbl_info->num_entries); 1664 core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level, 1665 idx, pa, attr); 1666 } 1667 1668 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1669 paddr_t *pa, uint32_t *attr) 1670 { 1671 assert(idx < tbl_info->num_entries); 1672 core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level, 1673 idx, pa, attr); 1674 } 1675 1676 static void clear_region(struct core_mmu_table_info *tbl_info, 1677 struct tee_mmap_region *region) 1678 { 1679 unsigned int end = 0; 1680 unsigned int idx = 0; 1681 1682 /* va, len and pa should be block aligned */ 1683 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1684 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1685 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1686 1687 idx = core_mmu_va2idx(tbl_info, region->va); 1688 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1689 1690 while (idx < end) { 1691 core_mmu_set_entry(tbl_info, idx, 0, 0); 1692 idx++; 1693 } 1694 } 1695 1696 static void set_region(struct core_mmu_table_info *tbl_info, 1697 struct tee_mmap_region *region) 1698 { 1699 unsigned int end; 1700 unsigned int idx; 1701 paddr_t pa; 1702 1703 /* va, len and pa should be block aligned */ 1704 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1705 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1706 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1707 1708 idx = core_mmu_va2idx(tbl_info, region->va); 1709 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1710 pa = region->pa; 1711 1712 while (idx < end) { 1713 core_mmu_set_entry(tbl_info, idx, pa, region->attr); 1714 idx++; 1715 pa += BIT64(tbl_info->shift); 1716 } 1717 } 1718 1719 static void set_pg_region(struct core_mmu_table_info *dir_info, 1720 struct vm_region *region, struct pgt **pgt, 1721 struct core_mmu_table_info *pg_info) 1722 { 1723 struct tee_mmap_region r = { 1724 .va = region->va, 1725 .size = region->size, 1726 .attr = region->attr, 1727 }; 1728 vaddr_t end = r.va + r.size; 1729 uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE; 1730 1731 while (r.va < end) { 1732 if (!pg_info->table || 1733 r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) { 1734 /* 1735 * We're assigning a new translation table. 1736 */ 1737 unsigned int idx; 1738 1739 /* Virtual addresses must grow */ 1740 assert(r.va > pg_info->va_base); 1741 1742 idx = core_mmu_va2idx(dir_info, r.va); 1743 pg_info->va_base = core_mmu_idx2va(dir_info, idx); 1744 1745 /* 1746 * Advance pgt to va_base, note that we may need to 1747 * skip multiple page tables if there are large 1748 * holes in the vm map. 1749 */ 1750 while ((*pgt)->vabase < pg_info->va_base) { 1751 *pgt = SLIST_NEXT(*pgt, link); 1752 /* We should have allocated enough */ 1753 assert(*pgt); 1754 } 1755 assert((*pgt)->vabase == pg_info->va_base); 1756 pg_info->table = (*pgt)->tbl; 1757 1758 core_mmu_set_entry(dir_info, idx, 1759 virt_to_phys(pg_info->table), 1760 pgt_attr); 1761 } 1762 1763 r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base), 1764 end - r.va); 1765 1766 if (!(*pgt)->populated && !mobj_is_paged(region->mobj)) { 1767 size_t granule = BIT(pg_info->shift); 1768 size_t offset = r.va - region->va + region->offset; 1769 1770 r.size = MIN(r.size, 1771 mobj_get_phys_granule(region->mobj)); 1772 r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE); 1773 1774 if (mobj_get_pa(region->mobj, offset, granule, 1775 &r.pa) != TEE_SUCCESS) 1776 panic("Failed to get PA of unpaged mobj"); 1777 set_region(pg_info, &r); 1778 } 1779 r.va += r.size; 1780 } 1781 } 1782 1783 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr, 1784 size_t size_left, paddr_t block_size, 1785 struct tee_mmap_region *mm __maybe_unused) 1786 { 1787 /* VA and PA are aligned to block size at current level */ 1788 if ((vaddr | paddr) & (block_size - 1)) 1789 return false; 1790 1791 /* Remainder fits into block at current level */ 1792 if (size_left < block_size) 1793 return false; 1794 1795 #ifdef CFG_WITH_PAGER 1796 /* 1797 * If pager is enabled, we need to map TEE RAM and the whole pager 1798 * regions with small pages only 1799 */ 1800 if ((map_is_tee_ram(mm) || mm->type == MEM_AREA_PAGER_VASPACE) && 1801 block_size != SMALL_PAGE_SIZE) 1802 return false; 1803 #endif 1804 1805 return true; 1806 } 1807 1808 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm) 1809 { 1810 struct core_mmu_table_info tbl_info; 1811 unsigned int idx; 1812 vaddr_t vaddr = mm->va; 1813 paddr_t paddr = mm->pa; 1814 ssize_t size_left = mm->size; 1815 unsigned int level; 1816 bool table_found; 1817 uint32_t old_attr; 1818 1819 assert(!((vaddr | paddr) & SMALL_PAGE_MASK)); 1820 1821 while (size_left > 0) { 1822 level = CORE_MMU_BASE_TABLE_LEVEL; 1823 1824 while (true) { 1825 paddr_t block_size = 0; 1826 1827 assert(core_mmu_level_in_range(level)); 1828 1829 table_found = core_mmu_find_table(prtn, vaddr, level, 1830 &tbl_info); 1831 if (!table_found) 1832 panic("can't find table for mapping"); 1833 1834 block_size = BIT64(tbl_info.shift); 1835 1836 idx = core_mmu_va2idx(&tbl_info, vaddr); 1837 if (!can_map_at_level(paddr, vaddr, size_left, 1838 block_size, mm)) { 1839 bool secure = mm->attr & TEE_MATTR_SECURE; 1840 1841 /* 1842 * This part of the region can't be mapped at 1843 * this level. Need to go deeper. 1844 */ 1845 if (!core_mmu_entry_to_finer_grained(&tbl_info, 1846 idx, 1847 secure)) 1848 panic("Can't divide MMU entry"); 1849 level = tbl_info.next_level; 1850 continue; 1851 } 1852 1853 /* We can map part of the region at current level */ 1854 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1855 if (old_attr) 1856 panic("Page is already mapped"); 1857 1858 core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr); 1859 paddr += block_size; 1860 vaddr += block_size; 1861 size_left -= block_size; 1862 1863 break; 1864 } 1865 } 1866 } 1867 1868 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, 1869 enum teecore_memtypes memtype) 1870 { 1871 TEE_Result ret; 1872 struct core_mmu_table_info tbl_info; 1873 struct tee_mmap_region *mm; 1874 unsigned int idx; 1875 uint32_t old_attr; 1876 uint32_t exceptions; 1877 vaddr_t vaddr = vstart; 1878 size_t i; 1879 bool secure; 1880 1881 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 1882 1883 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 1884 1885 if (vaddr & SMALL_PAGE_MASK) 1886 return TEE_ERROR_BAD_PARAMETERS; 1887 1888 exceptions = mmu_lock(); 1889 1890 mm = find_map_by_va((void *)vaddr); 1891 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 1892 panic("VA does not belong to any known mm region"); 1893 1894 if (!core_mmu_is_dynamic_vaspace(mm)) 1895 panic("Trying to map into static region"); 1896 1897 for (i = 0; i < num_pages; i++) { 1898 if (pages[i] & SMALL_PAGE_MASK) { 1899 ret = TEE_ERROR_BAD_PARAMETERS; 1900 goto err; 1901 } 1902 1903 while (true) { 1904 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 1905 &tbl_info)) 1906 panic("Can't find pagetable for vaddr "); 1907 1908 idx = core_mmu_va2idx(&tbl_info, vaddr); 1909 if (tbl_info.shift == SMALL_PAGE_SHIFT) 1910 break; 1911 1912 /* This is supertable. Need to divide it. */ 1913 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 1914 secure)) 1915 panic("Failed to spread pgdir on small tables"); 1916 } 1917 1918 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1919 if (old_attr) 1920 panic("Page is already mapped"); 1921 1922 core_mmu_set_entry(&tbl_info, idx, pages[i], 1923 core_mmu_type_to_attr(memtype)); 1924 vaddr += SMALL_PAGE_SIZE; 1925 } 1926 1927 /* 1928 * Make sure all the changes to translation tables are visible 1929 * before returning. TLB doesn't need to be invalidated as we are 1930 * guaranteed that there's no valid mapping in this range. 1931 */ 1932 core_mmu_table_write_barrier(); 1933 mmu_unlock(exceptions); 1934 1935 return TEE_SUCCESS; 1936 err: 1937 mmu_unlock(exceptions); 1938 1939 if (i) 1940 core_mmu_unmap_pages(vstart, i); 1941 1942 return ret; 1943 } 1944 1945 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart, 1946 size_t num_pages, 1947 enum teecore_memtypes memtype) 1948 { 1949 struct core_mmu_table_info tbl_info = { }; 1950 struct tee_mmap_region *mm = NULL; 1951 unsigned int idx = 0; 1952 uint32_t old_attr = 0; 1953 uint32_t exceptions = 0; 1954 vaddr_t vaddr = vstart; 1955 paddr_t paddr = pstart; 1956 size_t i = 0; 1957 bool secure = false; 1958 1959 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 1960 1961 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 1962 1963 if ((vaddr | paddr) & SMALL_PAGE_MASK) 1964 return TEE_ERROR_BAD_PARAMETERS; 1965 1966 exceptions = mmu_lock(); 1967 1968 mm = find_map_by_va((void *)vaddr); 1969 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 1970 panic("VA does not belong to any known mm region"); 1971 1972 if (!core_mmu_is_dynamic_vaspace(mm)) 1973 panic("Trying to map into static region"); 1974 1975 for (i = 0; i < num_pages; i++) { 1976 while (true) { 1977 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 1978 &tbl_info)) 1979 panic("Can't find pagetable for vaddr "); 1980 1981 idx = core_mmu_va2idx(&tbl_info, vaddr); 1982 if (tbl_info.shift == SMALL_PAGE_SHIFT) 1983 break; 1984 1985 /* This is supertable. Need to divide it. */ 1986 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 1987 secure)) 1988 panic("Failed to spread pgdir on small tables"); 1989 } 1990 1991 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1992 if (old_attr) 1993 panic("Page is already mapped"); 1994 1995 core_mmu_set_entry(&tbl_info, idx, paddr, 1996 core_mmu_type_to_attr(memtype)); 1997 paddr += SMALL_PAGE_SIZE; 1998 vaddr += SMALL_PAGE_SIZE; 1999 } 2000 2001 /* 2002 * Make sure all the changes to translation tables are visible 2003 * before returning. TLB doesn't need to be invalidated as we are 2004 * guaranteed that there's no valid mapping in this range. 2005 */ 2006 core_mmu_table_write_barrier(); 2007 mmu_unlock(exceptions); 2008 2009 return TEE_SUCCESS; 2010 } 2011 2012 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages) 2013 { 2014 struct core_mmu_table_info tbl_info; 2015 struct tee_mmap_region *mm; 2016 size_t i; 2017 unsigned int idx; 2018 uint32_t exceptions; 2019 2020 exceptions = mmu_lock(); 2021 2022 mm = find_map_by_va((void *)vstart); 2023 if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1)) 2024 panic("VA does not belong to any known mm region"); 2025 2026 if (!core_mmu_is_dynamic_vaspace(mm)) 2027 panic("Trying to unmap static region"); 2028 2029 for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) { 2030 if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info)) 2031 panic("Can't find pagetable"); 2032 2033 if (tbl_info.shift != SMALL_PAGE_SHIFT) 2034 panic("Invalid pagetable level"); 2035 2036 idx = core_mmu_va2idx(&tbl_info, vstart); 2037 core_mmu_set_entry(&tbl_info, idx, 0, 0); 2038 } 2039 tlbi_all(); 2040 2041 mmu_unlock(exceptions); 2042 } 2043 2044 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, 2045 struct user_mode_ctx *uctx) 2046 { 2047 struct core_mmu_table_info pg_info = { }; 2048 struct pgt_cache *pgt_cache = &uctx->pgt_cache; 2049 struct pgt *pgt = NULL; 2050 struct pgt *p = NULL; 2051 struct vm_region *r = NULL; 2052 2053 if (TAILQ_EMPTY(&uctx->vm_info.regions)) 2054 return; /* Nothing to map */ 2055 2056 /* 2057 * Allocate all page tables in advance. 2058 */ 2059 pgt_get_all(uctx); 2060 pgt = SLIST_FIRST(pgt_cache); 2061 2062 core_mmu_set_info_table(&pg_info, dir_info->next_level, 0, NULL); 2063 2064 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) 2065 set_pg_region(dir_info, r, &pgt, &pg_info); 2066 /* Record that the translation tables now are populated. */ 2067 SLIST_FOREACH(p, pgt_cache, link) { 2068 p->populated = true; 2069 if (p == pgt) 2070 break; 2071 } 2072 assert(p == pgt); 2073 } 2074 2075 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr, 2076 size_t len) 2077 { 2078 struct core_mmu_table_info tbl_info = { }; 2079 struct tee_mmap_region *res_map = NULL; 2080 struct tee_mmap_region *map = NULL; 2081 paddr_t pa = virt_to_phys(addr); 2082 size_t granule = 0; 2083 ptrdiff_t i = 0; 2084 paddr_t p = 0; 2085 size_t l = 0; 2086 2087 map = find_map_by_type_and_pa(type, pa, len); 2088 if (!map) 2089 return TEE_ERROR_GENERIC; 2090 2091 res_map = find_map_by_type(MEM_AREA_RES_VASPACE); 2092 if (!res_map) 2093 return TEE_ERROR_GENERIC; 2094 if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info)) 2095 return TEE_ERROR_GENERIC; 2096 granule = BIT(tbl_info.shift); 2097 2098 if (map < static_memory_map.map || 2099 map >= static_memory_map.map + static_memory_map.count) 2100 return TEE_ERROR_GENERIC; 2101 i = map - static_memory_map.map; 2102 2103 /* Check that we have a full match */ 2104 p = ROUNDDOWN(pa, granule); 2105 l = ROUNDUP(len + pa - p, granule); 2106 if (map->pa != p || map->size != l) 2107 return TEE_ERROR_GENERIC; 2108 2109 clear_region(&tbl_info, map); 2110 tlbi_all(); 2111 2112 /* If possible remove the va range from res_map */ 2113 if (res_map->va - map->size == map->va) { 2114 res_map->va -= map->size; 2115 res_map->size += map->size; 2116 } 2117 2118 /* Remove the entry. */ 2119 rem_array_elem(static_memory_map.map, static_memory_map.count, 2120 sizeof(*static_memory_map.map), i); 2121 static_memory_map.count--; 2122 2123 return TEE_SUCCESS; 2124 } 2125 2126 struct tee_mmap_region * 2127 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len) 2128 { 2129 struct memory_map *mem_map = get_memory_map(); 2130 struct tee_mmap_region *map_found = NULL; 2131 size_t n = 0; 2132 2133 if (!len) 2134 return NULL; 2135 2136 for (n = 0; n < mem_map->count; n++) { 2137 if (mem_map->map[n].type != type) 2138 continue; 2139 2140 if (map_found) 2141 return NULL; 2142 2143 map_found = mem_map->map + n; 2144 } 2145 2146 if (!map_found || map_found->size < len) 2147 return NULL; 2148 2149 return map_found; 2150 } 2151 2152 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len) 2153 { 2154 struct memory_map *mem_map = &static_memory_map; 2155 struct core_mmu_table_info tbl_info = { }; 2156 struct tee_mmap_region *map = NULL; 2157 size_t granule = 0; 2158 paddr_t p = 0; 2159 size_t l = 0; 2160 2161 if (!len) 2162 return NULL; 2163 2164 if (!core_mmu_check_end_pa(addr, len)) 2165 return NULL; 2166 2167 /* Check if the memory is already mapped */ 2168 map = find_map_by_type_and_pa(type, addr, len); 2169 if (map && pbuf_inside_map_area(addr, len, map)) 2170 return (void *)(vaddr_t)(map->va + addr - map->pa); 2171 2172 /* Find the reserved va space used for late mappings */ 2173 map = find_map_by_type(MEM_AREA_RES_VASPACE); 2174 if (!map) 2175 return NULL; 2176 2177 if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info)) 2178 return NULL; 2179 2180 granule = BIT64(tbl_info.shift); 2181 p = ROUNDDOWN(addr, granule); 2182 l = ROUNDUP(len + addr - p, granule); 2183 2184 /* Ban overflowing virtual addresses */ 2185 if (map->size < l) 2186 return NULL; 2187 2188 /* 2189 * Something is wrong, we can't fit the va range into the selected 2190 * table. The reserved va range is possibly missaligned with 2191 * granule. 2192 */ 2193 if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries) 2194 return NULL; 2195 2196 if (static_memory_map.count >= static_memory_map.alloc_count) 2197 return NULL; 2198 2199 mem_map->map[mem_map->count] = (struct tee_mmap_region){ 2200 .va = map->va, 2201 .size = l, 2202 .type = type, 2203 .region_size = granule, 2204 .attr = core_mmu_type_to_attr(type), 2205 .pa = p, 2206 }; 2207 map->va += l; 2208 map->size -= l; 2209 map = mem_map->map + mem_map->count; 2210 mem_map->count++; 2211 2212 set_region(&tbl_info, map); 2213 2214 /* Make sure the new entry is visible before continuing. */ 2215 core_mmu_table_write_barrier(); 2216 2217 return (void *)(vaddr_t)(map->va + addr - map->pa); 2218 } 2219 2220 #ifdef CFG_WITH_PAGER 2221 static vaddr_t get_linear_map_end_va(void) 2222 { 2223 /* this is synced with the generic linker file kern.ld.S */ 2224 return (vaddr_t)__heap2_end; 2225 } 2226 2227 static paddr_t get_linear_map_end_pa(void) 2228 { 2229 return get_linear_map_end_va() - boot_mmu_config.map_offset; 2230 } 2231 #endif 2232 2233 #if defined(CFG_TEE_CORE_DEBUG) 2234 static void check_pa_matches_va(void *va, paddr_t pa) 2235 { 2236 TEE_Result res = TEE_ERROR_GENERIC; 2237 vaddr_t v = (vaddr_t)va; 2238 paddr_t p = 0; 2239 struct core_mmu_table_info ti __maybe_unused = { }; 2240 2241 if (core_mmu_user_va_range_is_defined()) { 2242 vaddr_t user_va_base = 0; 2243 size_t user_va_size = 0; 2244 2245 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 2246 if (v >= user_va_base && 2247 v <= (user_va_base - 1 + user_va_size)) { 2248 if (!core_mmu_user_mapping_is_active()) { 2249 if (pa) 2250 panic("issue in linear address space"); 2251 return; 2252 } 2253 2254 res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx), 2255 va, &p); 2256 if (res == TEE_ERROR_NOT_SUPPORTED) 2257 return; 2258 if (res == TEE_SUCCESS && pa != p) 2259 panic("bad pa"); 2260 if (res != TEE_SUCCESS && pa) 2261 panic("false pa"); 2262 return; 2263 } 2264 } 2265 #ifdef CFG_WITH_PAGER 2266 if (is_unpaged(va)) { 2267 if (v - boot_mmu_config.map_offset != pa) 2268 panic("issue in linear address space"); 2269 return; 2270 } 2271 2272 if (tee_pager_get_table_info(v, &ti)) { 2273 uint32_t a; 2274 2275 /* 2276 * Lookups in the page table managed by the pager is 2277 * dangerous for addresses in the paged area as those pages 2278 * changes all the time. But some ranges are safe, 2279 * rw-locked areas when the page is populated for instance. 2280 */ 2281 core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a); 2282 if (a & TEE_MATTR_VALID_BLOCK) { 2283 paddr_t mask = BIT64(ti.shift) - 1; 2284 2285 p |= v & mask; 2286 if (pa != p) 2287 panic(); 2288 } else { 2289 if (pa) 2290 panic(); 2291 } 2292 return; 2293 } 2294 #endif 2295 2296 if (!core_va2pa_helper(va, &p)) { 2297 /* Verfiy only the static mapping (case non null phys addr) */ 2298 if (p && pa != p) { 2299 DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA, 2300 va, p, pa); 2301 panic(); 2302 } 2303 } else { 2304 if (pa) { 2305 DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa); 2306 panic(); 2307 } 2308 } 2309 } 2310 #else 2311 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused) 2312 { 2313 } 2314 #endif 2315 2316 paddr_t virt_to_phys(void *va) 2317 { 2318 paddr_t pa = 0; 2319 2320 if (!arch_va2pa_helper(va, &pa)) 2321 pa = 0; 2322 check_pa_matches_va(memtag_strip_tag(va), pa); 2323 return pa; 2324 } 2325 2326 #if defined(CFG_TEE_CORE_DEBUG) 2327 static void check_va_matches_pa(paddr_t pa, void *va) 2328 { 2329 paddr_t p = 0; 2330 2331 if (!va) 2332 return; 2333 2334 p = virt_to_phys(va); 2335 if (p != pa) { 2336 DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa); 2337 panic(); 2338 } 2339 } 2340 #else 2341 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused) 2342 { 2343 } 2344 #endif 2345 2346 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len) 2347 { 2348 if (!core_mmu_user_mapping_is_active()) 2349 return NULL; 2350 2351 return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len); 2352 } 2353 2354 #ifdef CFG_WITH_PAGER 2355 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2356 { 2357 paddr_t end_pa = 0; 2358 2359 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 2360 return NULL; 2361 2362 if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end_pa()) { 2363 if (end_pa > get_linear_map_end_pa()) 2364 return NULL; 2365 return (void *)(vaddr_t)(pa + boot_mmu_config.map_offset); 2366 } 2367 2368 return tee_pager_phys_to_virt(pa, len); 2369 } 2370 #else 2371 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2372 { 2373 struct tee_mmap_region *mmap = NULL; 2374 2375 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len); 2376 if (!mmap) 2377 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len); 2378 if (!mmap) 2379 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len); 2380 if (!mmap) 2381 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len); 2382 if (!mmap) 2383 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len); 2384 if (!mmap) 2385 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len); 2386 /* 2387 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only 2388 * used with pager and not needed here. 2389 */ 2390 return map_pa2va(mmap, pa, len); 2391 } 2392 #endif 2393 2394 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len) 2395 { 2396 void *va = NULL; 2397 2398 switch (m) { 2399 case MEM_AREA_TS_VASPACE: 2400 va = phys_to_virt_ts_vaspace(pa, len); 2401 break; 2402 case MEM_AREA_TEE_RAM: 2403 case MEM_AREA_TEE_RAM_RX: 2404 case MEM_AREA_TEE_RAM_RO: 2405 case MEM_AREA_TEE_RAM_RW: 2406 case MEM_AREA_NEX_RAM_RO: 2407 case MEM_AREA_NEX_RAM_RW: 2408 va = phys_to_virt_tee_ram(pa, len); 2409 break; 2410 case MEM_AREA_SHM_VASPACE: 2411 /* Find VA from PA in dynamic SHM is not yet supported */ 2412 va = NULL; 2413 break; 2414 default: 2415 va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len); 2416 } 2417 if (m != MEM_AREA_SEC_RAM_OVERALL) 2418 check_va_matches_pa(pa, va); 2419 return va; 2420 } 2421 2422 void *phys_to_virt_io(paddr_t pa, size_t len) 2423 { 2424 struct tee_mmap_region *map = NULL; 2425 void *va = NULL; 2426 2427 map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len); 2428 if (!map) 2429 map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len); 2430 if (!map) 2431 return NULL; 2432 va = map_pa2va(map, pa, len); 2433 check_va_matches_pa(pa, va); 2434 return va; 2435 } 2436 2437 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len) 2438 { 2439 if (cpu_mmu_enabled()) 2440 return (vaddr_t)phys_to_virt(pa, type, len); 2441 2442 return (vaddr_t)pa; 2443 } 2444 2445 #ifdef CFG_WITH_PAGER 2446 bool is_unpaged(const void *va) 2447 { 2448 vaddr_t v = (vaddr_t)va; 2449 2450 return v >= VCORE_START_VA && v < get_linear_map_end_va(); 2451 } 2452 #endif 2453 2454 #ifdef CFG_NS_VIRTUALIZATION 2455 bool is_nexus(const void *va) 2456 { 2457 vaddr_t v = (vaddr_t)va; 2458 2459 return v >= VCORE_START_VA && v < VCORE_NEX_RW_PA + VCORE_NEX_RW_SZ; 2460 } 2461 #endif 2462 2463 void core_mmu_init_virtualization(void) 2464 { 2465 paddr_t b1 = 0; 2466 paddr_size_t s1 = 0; 2467 2468 static_assert(ARRAY_SIZE(secure_only) <= 2); 2469 if (ARRAY_SIZE(secure_only) == 2) { 2470 b1 = secure_only[1].paddr; 2471 s1 = secure_only[1].size; 2472 } 2473 virt_init_memory(&static_memory_map, secure_only[0].paddr, 2474 secure_only[0].size, b1, s1); 2475 } 2476 2477 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len) 2478 { 2479 assert(p->pa); 2480 if (cpu_mmu_enabled()) { 2481 if (!p->va) 2482 p->va = (vaddr_t)phys_to_virt_io(p->pa, len); 2483 assert(p->va); 2484 return p->va; 2485 } 2486 return p->pa; 2487 } 2488 2489 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len) 2490 { 2491 assert(p->pa); 2492 if (cpu_mmu_enabled()) { 2493 if (!p->va) 2494 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC, 2495 len); 2496 assert(p->va); 2497 return p->va; 2498 } 2499 return p->pa; 2500 } 2501 2502 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len) 2503 { 2504 assert(p->pa); 2505 if (cpu_mmu_enabled()) { 2506 if (!p->va) 2507 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC, 2508 len); 2509 assert(p->va); 2510 return p->va; 2511 } 2512 return p->pa; 2513 } 2514 2515 #ifdef CFG_CORE_RESERVED_SHM 2516 static TEE_Result teecore_init_pub_ram(void) 2517 { 2518 vaddr_t s = 0; 2519 vaddr_t e = 0; 2520 2521 /* get virtual addr/size of NSec shared mem allocated from teecore */ 2522 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e); 2523 2524 if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK) 2525 panic("invalid PUB RAM"); 2526 2527 /* extra check: we could rely on core_mmu_get_mem_by_type() */ 2528 if (!tee_vbuf_is_non_sec(s, e - s)) 2529 panic("PUB RAM is not non-secure"); 2530 2531 #ifdef CFG_PL310 2532 /* Allocate statically the l2cc mutex */ 2533 tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s)); 2534 s += sizeof(uint32_t); /* size of a pl310 mutex */ 2535 s = ROUNDUP(s, SMALL_PAGE_SIZE); /* keep required alignment */ 2536 #endif 2537 2538 default_nsec_shm_paddr = virt_to_phys((void *)s); 2539 default_nsec_shm_size = e - s; 2540 2541 return TEE_SUCCESS; 2542 } 2543 early_init(teecore_init_pub_ram); 2544 #endif /*CFG_CORE_RESERVED_SHM*/ 2545 2546 void core_mmu_init_ta_ram(void) 2547 { 2548 vaddr_t s = 0; 2549 vaddr_t e = 0; 2550 paddr_t ps = 0; 2551 size_t size = 0; 2552 2553 /* 2554 * Get virtual addr/size of RAM where TA are loaded/executedNSec 2555 * shared mem allocated from teecore. 2556 */ 2557 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 2558 virt_get_ta_ram(&s, &e); 2559 else 2560 core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e); 2561 2562 ps = virt_to_phys((void *)s); 2563 size = e - s; 2564 2565 if (!ps || (ps & CORE_MMU_USER_CODE_MASK) || 2566 !size || (size & CORE_MMU_USER_CODE_MASK)) 2567 panic("invalid TA RAM"); 2568 2569 /* extra check: we could rely on core_mmu_get_mem_by_type() */ 2570 if (!tee_pbuf_is_sec(ps, size)) 2571 panic("TA RAM is not secure"); 2572 2573 if (!tee_mm_is_empty(&tee_mm_sec_ddr)) 2574 panic("TA RAM pool is not empty"); 2575 2576 /* remove previous config and init TA ddr memory pool */ 2577 tee_mm_final(&tee_mm_sec_ddr); 2578 tee_mm_init(&tee_mm_sec_ddr, ps, size, CORE_MMU_USER_CODE_SHIFT, 2579 TEE_MM_POOL_NO_FLAGS); 2580 } 2581