1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, 2022 Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved. 6 */ 7 8 #include <assert.h> 9 #include <config.h> 10 #include <kernel/boot.h> 11 #include <kernel/dt.h> 12 #include <kernel/linker.h> 13 #include <kernel/panic.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/tee_l2cc_mutex.h> 16 #include <kernel/tee_misc.h> 17 #include <kernel/tlb_helpers.h> 18 #include <kernel/user_mode_ctx.h> 19 #include <kernel/virtualization.h> 20 #include <libfdt.h> 21 #include <memtag.h> 22 #include <mm/core_memprot.h> 23 #include <mm/core_mmu.h> 24 #include <mm/mobj.h> 25 #include <mm/pgt_cache.h> 26 #include <mm/phys_mem.h> 27 #include <mm/tee_pager.h> 28 #include <mm/vm.h> 29 #include <platform_config.h> 30 #include <stdalign.h> 31 #include <string.h> 32 #include <trace.h> 33 #include <util.h> 34 35 #ifndef DEBUG_XLAT_TABLE 36 #define DEBUG_XLAT_TABLE 0 37 #endif 38 39 #define SHM_VASPACE_SIZE (1024 * 1024 * 32) 40 41 /* Virtual memory pool for core mappings */ 42 tee_mm_pool_t core_virt_mem_pool; 43 44 /* Virtual memory pool for shared memory mappings */ 45 tee_mm_pool_t core_virt_shm_pool; 46 47 #ifdef CFG_CORE_PHYS_RELOCATABLE 48 unsigned long core_mmu_tee_load_pa __nex_bss; 49 #else 50 const unsigned long core_mmu_tee_load_pa = TEE_LOAD_ADDR; 51 #endif 52 53 /* 54 * These variables are initialized before .bss is cleared. To avoid 55 * resetting them when .bss is cleared we're storing them in .data instead, 56 * even if they initially are zero. 57 */ 58 59 #ifdef CFG_CORE_RESERVED_SHM 60 /* Default NSec shared memory allocated from NSec world */ 61 unsigned long default_nsec_shm_size __nex_bss; 62 unsigned long default_nsec_shm_paddr __nex_bss; 63 #endif 64 65 static struct tee_mmap_region static_mmap_regions[CFG_MMAP_REGIONS 66 #if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE) 67 + 1 68 #endif 69 + 1] __nex_bss; 70 static struct memory_map static_memory_map __nex_data = { 71 .map = static_mmap_regions, 72 .alloc_count = ARRAY_SIZE(static_mmap_regions), 73 }; 74 75 /* Define the platform's memory layout. */ 76 struct memaccess_area { 77 paddr_t paddr; 78 size_t size; 79 }; 80 81 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s } 82 83 static struct memaccess_area secure_only[] __nex_data = { 84 #ifdef CFG_CORE_PHYS_RELOCATABLE 85 MEMACCESS_AREA(0, 0), 86 #else 87 #ifdef TRUSTED_SRAM_BASE 88 MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE), 89 #endif 90 MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE), 91 #endif 92 }; 93 94 static struct memaccess_area nsec_shared[] __nex_data = { 95 #ifdef CFG_CORE_RESERVED_SHM 96 MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE), 97 #endif 98 }; 99 100 #if defined(CFG_SECURE_DATA_PATH) 101 static const char *tz_sdp_match = "linaro,secure-heap"; 102 static struct memaccess_area sec_sdp; 103 #ifdef CFG_TEE_SDP_MEM_BASE 104 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE); 105 #endif 106 #ifdef TEE_SDP_TEST_MEM_BASE 107 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE); 108 #endif 109 #endif 110 111 #ifdef CFG_CORE_RESERVED_SHM 112 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE); 113 #endif 114 static unsigned int mmu_spinlock; 115 116 static uint32_t mmu_lock(void) 117 { 118 return cpu_spin_lock_xsave(&mmu_spinlock); 119 } 120 121 static void mmu_unlock(uint32_t exceptions) 122 { 123 cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions); 124 } 125 126 static void grow_mem_map(struct memory_map *mem_map) 127 { 128 if (mem_map->count == mem_map->alloc_count) { 129 EMSG("Out of entries (%zu) in mem_map", mem_map->alloc_count); 130 panic(); 131 } 132 mem_map->count++; 133 } 134 135 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size) 136 { 137 /* 138 * The first range is always used to cover OP-TEE core memory, but 139 * depending on configuration it may cover more than that. 140 */ 141 *base = secure_only[0].paddr; 142 *size = secure_only[0].size; 143 } 144 145 void core_mmu_set_secure_memory(paddr_t base, size_t size) 146 { 147 #ifdef CFG_CORE_PHYS_RELOCATABLE 148 static_assert(ARRAY_SIZE(secure_only) == 1); 149 #endif 150 runtime_assert(IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)); 151 assert(!secure_only[0].size); 152 assert(base && size); 153 154 DMSG("Physical secure memory base %#"PRIxPA" size %#zx", base, size); 155 secure_only[0].paddr = base; 156 secure_only[0].size = size; 157 } 158 159 void core_mmu_get_ta_range(paddr_t *base, size_t *size) 160 { 161 paddr_t b = 0; 162 size_t s = 0; 163 164 static_assert(!(TEE_RAM_VA_SIZE % SMALL_PAGE_SIZE)); 165 #ifdef TA_RAM_START 166 b = TA_RAM_START; 167 s = TA_RAM_SIZE; 168 #else 169 static_assert(ARRAY_SIZE(secure_only) <= 2); 170 if (ARRAY_SIZE(secure_only) == 1) { 171 vaddr_t load_offs = 0; 172 173 assert(core_mmu_tee_load_pa >= secure_only[0].paddr); 174 load_offs = core_mmu_tee_load_pa - secure_only[0].paddr; 175 176 assert(secure_only[0].size > 177 load_offs + TEE_RAM_VA_SIZE + TEE_SDP_TEST_MEM_SIZE); 178 b = secure_only[0].paddr + load_offs + TEE_RAM_VA_SIZE; 179 s = secure_only[0].size - load_offs - TEE_RAM_VA_SIZE - 180 TEE_SDP_TEST_MEM_SIZE; 181 } else { 182 assert(secure_only[1].size > TEE_SDP_TEST_MEM_SIZE); 183 b = secure_only[1].paddr; 184 s = secure_only[1].size - TEE_SDP_TEST_MEM_SIZE; 185 } 186 #endif 187 if (base) 188 *base = b; 189 if (size) 190 *size = s; 191 } 192 193 static struct memory_map *get_memory_map(void) 194 { 195 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 196 struct memory_map *map = virt_get_memory_map(); 197 198 if (map) 199 return map; 200 } 201 202 return &static_memory_map; 203 } 204 205 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen, 206 paddr_t pa, size_t size) 207 { 208 size_t n; 209 210 for (n = 0; n < alen; n++) 211 if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size)) 212 return true; 213 return false; 214 } 215 216 #define pbuf_intersects(a, pa, size) \ 217 _pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size)) 218 219 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen, 220 paddr_t pa, size_t size) 221 { 222 size_t n; 223 224 for (n = 0; n < alen; n++) 225 if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size)) 226 return true; 227 return false; 228 } 229 230 #define pbuf_is_inside(a, pa, size) \ 231 _pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size)) 232 233 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len) 234 { 235 paddr_t end_pa = 0; 236 237 if (!map) 238 return false; 239 240 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 241 return false; 242 243 return (pa >= map->pa && end_pa <= map->pa + map->size - 1); 244 } 245 246 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va) 247 { 248 if (!map) 249 return false; 250 return (va >= map->va && va <= (map->va + map->size - 1)); 251 } 252 253 /* check if target buffer fits in a core default map area */ 254 static bool pbuf_inside_map_area(unsigned long p, size_t l, 255 struct tee_mmap_region *map) 256 { 257 return core_is_buffer_inside(p, l, map->pa, map->size); 258 } 259 260 TEE_Result core_mmu_for_each_map(void *ptr, 261 TEE_Result (*fn)(struct tee_mmap_region *map, 262 void *ptr)) 263 { 264 struct memory_map *mem_map = get_memory_map(); 265 TEE_Result res = TEE_SUCCESS; 266 size_t n = 0; 267 268 for (n = 0; n < mem_map->count; n++) { 269 res = fn(mem_map->map + n, ptr); 270 if (res) 271 return res; 272 } 273 274 return TEE_SUCCESS; 275 } 276 277 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type) 278 { 279 struct memory_map *mem_map = get_memory_map(); 280 size_t n = 0; 281 282 for (n = 0; n < mem_map->count; n++) { 283 if (mem_map->map[n].type == type) 284 return mem_map->map + n; 285 } 286 return NULL; 287 } 288 289 static struct tee_mmap_region * 290 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len) 291 { 292 struct memory_map *mem_map = get_memory_map(); 293 size_t n = 0; 294 295 for (n = 0; n < mem_map->count; n++) { 296 if (mem_map->map[n].type != type) 297 continue; 298 if (pa_is_in_map(mem_map->map + n, pa, len)) 299 return mem_map->map + n; 300 } 301 return NULL; 302 } 303 304 static struct tee_mmap_region *find_map_by_va(void *va) 305 { 306 struct memory_map *mem_map = get_memory_map(); 307 vaddr_t a = (vaddr_t)va; 308 size_t n = 0; 309 310 for (n = 0; n < mem_map->count; n++) { 311 if (a >= mem_map->map[n].va && 312 a <= (mem_map->map[n].va - 1 + mem_map->map[n].size)) 313 return mem_map->map + n; 314 } 315 316 return NULL; 317 } 318 319 static struct tee_mmap_region *find_map_by_pa(unsigned long pa) 320 { 321 struct memory_map *mem_map = get_memory_map(); 322 size_t n = 0; 323 324 for (n = 0; n < mem_map->count; n++) { 325 /* Skip unmapped regions */ 326 if ((mem_map->map[n].attr & TEE_MATTR_VALID_BLOCK) && 327 pa >= mem_map->map[n].pa && 328 pa <= (mem_map->map[n].pa - 1 + mem_map->map[n].size)) 329 return mem_map->map + n; 330 } 331 332 return NULL; 333 } 334 335 #if defined(CFG_SECURE_DATA_PATH) 336 static bool dtb_get_sdp_region(void) 337 { 338 void *fdt = NULL; 339 int node = 0; 340 int tmp_node = 0; 341 paddr_t tmp_addr = 0; 342 size_t tmp_size = 0; 343 344 if (!IS_ENABLED(CFG_EMBED_DTB)) 345 return false; 346 347 fdt = get_embedded_dt(); 348 if (!fdt) 349 panic("No DTB found"); 350 351 node = fdt_node_offset_by_compatible(fdt, -1, tz_sdp_match); 352 if (node < 0) { 353 DMSG("No %s compatible node found", tz_sdp_match); 354 return false; 355 } 356 tmp_node = node; 357 while (tmp_node >= 0) { 358 tmp_node = fdt_node_offset_by_compatible(fdt, tmp_node, 359 tz_sdp_match); 360 if (tmp_node >= 0) 361 DMSG("Ignore SDP pool node %s, supports only 1 node", 362 fdt_get_name(fdt, tmp_node, NULL)); 363 } 364 365 tmp_addr = fdt_reg_base_address(fdt, node); 366 if (tmp_addr == DT_INFO_INVALID_REG) { 367 EMSG("%s: Unable to get base addr from DT", tz_sdp_match); 368 return false; 369 } 370 371 tmp_size = fdt_reg_size(fdt, node); 372 if (tmp_size == DT_INFO_INVALID_REG_SIZE) { 373 EMSG("%s: Unable to get size of base addr from DT", 374 tz_sdp_match); 375 return false; 376 } 377 378 sec_sdp.paddr = tmp_addr; 379 sec_sdp.size = tmp_size; 380 381 return true; 382 } 383 #endif 384 385 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH) 386 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len, 387 const struct core_mmu_phys_mem *start, 388 const struct core_mmu_phys_mem *end) 389 { 390 const struct core_mmu_phys_mem *mem; 391 392 for (mem = start; mem < end; mem++) { 393 if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size)) 394 return true; 395 } 396 397 return false; 398 } 399 #endif 400 401 #ifdef CFG_CORE_DYN_SHM 402 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems, 403 paddr_t pa, size_t size) 404 { 405 struct core_mmu_phys_mem *m = *mem; 406 size_t n = 0; 407 408 while (true) { 409 if (n >= *nelems) { 410 DMSG("No need to carve out %#" PRIxPA " size %#zx", 411 pa, size); 412 return; 413 } 414 if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size)) 415 break; 416 if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size)) 417 panic(); 418 n++; 419 } 420 421 if (pa == m[n].addr && size == m[n].size) { 422 /* Remove this entry */ 423 (*nelems)--; 424 memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n)); 425 m = nex_realloc(m, sizeof(*m) * *nelems); 426 if (!m) 427 panic(); 428 *mem = m; 429 } else if (pa == m[n].addr) { 430 m[n].addr += size; 431 m[n].size -= size; 432 } else if ((pa + size) == (m[n].addr + m[n].size)) { 433 m[n].size -= size; 434 } else { 435 /* Need to split the memory entry */ 436 m = nex_realloc(m, sizeof(*m) * (*nelems + 1)); 437 if (!m) 438 panic(); 439 *mem = m; 440 memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n)); 441 (*nelems)++; 442 m[n].size = pa - m[n].addr; 443 m[n + 1].size -= size + m[n].size; 444 m[n + 1].addr = pa + size; 445 } 446 } 447 448 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start, 449 size_t nelems, 450 struct tee_mmap_region *map) 451 { 452 size_t n; 453 454 for (n = 0; n < nelems; n++) { 455 if (!core_is_buffer_outside(start[n].addr, start[n].size, 456 map->pa, map->size)) { 457 EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ 458 ") overlaps map (type %d %#" PRIxPA ":%#zx)", 459 start[n].addr, start[n].size, 460 map->type, map->pa, map->size); 461 panic(); 462 } 463 } 464 } 465 466 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss; 467 static size_t discovered_nsec_ddr_nelems __nex_bss; 468 469 static int cmp_pmem_by_addr(const void *a, const void *b) 470 { 471 const struct core_mmu_phys_mem *pmem_a = a; 472 const struct core_mmu_phys_mem *pmem_b = b; 473 474 return CMP_TRILEAN(pmem_a->addr, pmem_b->addr); 475 } 476 477 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start, 478 size_t nelems) 479 { 480 struct core_mmu_phys_mem *m = start; 481 size_t num_elems = nelems; 482 struct memory_map *mem_map = &static_memory_map; 483 const struct core_mmu_phys_mem __maybe_unused *pmem; 484 size_t n = 0; 485 486 assert(!discovered_nsec_ddr_start); 487 assert(m && num_elems); 488 489 qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr); 490 491 /* 492 * Non-secure shared memory and also secure data 493 * path memory are supposed to reside inside 494 * non-secure memory. Since NSEC_SHM and SDP_MEM 495 * are used for a specific purpose make holes for 496 * those memory in the normal non-secure memory. 497 * 498 * This has to be done since for instance QEMU 499 * isn't aware of which memory range in the 500 * non-secure memory is used for NSEC_SHM. 501 */ 502 503 #ifdef CFG_SECURE_DATA_PATH 504 if (dtb_get_sdp_region()) 505 carve_out_phys_mem(&m, &num_elems, sec_sdp.paddr, sec_sdp.size); 506 507 for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++) 508 carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size); 509 #endif 510 511 for (n = 0; n < ARRAY_SIZE(secure_only); n++) 512 carve_out_phys_mem(&m, &num_elems, secure_only[n].paddr, 513 secure_only[n].size); 514 515 for (n = 0; n < mem_map->count; n++) { 516 switch (mem_map->map[n].type) { 517 case MEM_AREA_NSEC_SHM: 518 carve_out_phys_mem(&m, &num_elems, mem_map->map[n].pa, 519 mem_map->map[n].size); 520 break; 521 case MEM_AREA_EXT_DT: 522 case MEM_AREA_MANIFEST_DT: 523 case MEM_AREA_RAM_NSEC: 524 case MEM_AREA_RES_VASPACE: 525 case MEM_AREA_SHM_VASPACE: 526 case MEM_AREA_TS_VASPACE: 527 case MEM_AREA_PAGER_VASPACE: 528 break; 529 default: 530 check_phys_mem_is_outside(m, num_elems, 531 mem_map->map + n); 532 } 533 } 534 535 discovered_nsec_ddr_start = m; 536 discovered_nsec_ddr_nelems = num_elems; 537 538 if (!core_mmu_check_end_pa(m[num_elems - 1].addr, 539 m[num_elems - 1].size)) 540 panic(); 541 } 542 543 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start, 544 const struct core_mmu_phys_mem **end) 545 { 546 if (!discovered_nsec_ddr_start) 547 return false; 548 549 *start = discovered_nsec_ddr_start; 550 *end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems; 551 552 return true; 553 } 554 555 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len) 556 { 557 const struct core_mmu_phys_mem *start; 558 const struct core_mmu_phys_mem *end; 559 560 if (!get_discovered_nsec_ddr(&start, &end)) 561 return false; 562 563 return pbuf_is_special_mem(pbuf, len, start, end); 564 } 565 566 bool core_mmu_nsec_ddr_is_defined(void) 567 { 568 const struct core_mmu_phys_mem *start; 569 const struct core_mmu_phys_mem *end; 570 571 if (!get_discovered_nsec_ddr(&start, &end)) 572 return false; 573 574 return start != end; 575 } 576 #else 577 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused) 578 { 579 return false; 580 } 581 #endif /*CFG_CORE_DYN_SHM*/ 582 583 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \ 584 EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \ 585 pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2)) 586 587 #ifdef CFG_SECURE_DATA_PATH 588 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len) 589 { 590 bool is_sdp_mem = false; 591 592 if (sec_sdp.size) 593 is_sdp_mem = core_is_buffer_inside(pbuf, len, sec_sdp.paddr, 594 sec_sdp.size); 595 596 if (!is_sdp_mem) 597 is_sdp_mem = pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin, 598 phys_sdp_mem_end); 599 600 return is_sdp_mem; 601 } 602 603 static struct mobj *core_sdp_mem_alloc_mobj(paddr_t pa, size_t size) 604 { 605 struct mobj *mobj = mobj_phys_alloc(pa, size, TEE_MATTR_MEM_TYPE_CACHED, 606 CORE_MEM_SDP_MEM); 607 608 if (!mobj) 609 panic("can't create SDP physical memory object"); 610 611 return mobj; 612 } 613 614 struct mobj **core_sdp_mem_create_mobjs(void) 615 { 616 const struct core_mmu_phys_mem *mem = NULL; 617 struct mobj **mobj_base = NULL; 618 struct mobj **mobj = NULL; 619 int cnt = phys_sdp_mem_end - phys_sdp_mem_begin; 620 621 if (sec_sdp.size) 622 cnt++; 623 624 /* SDP mobjs table must end with a NULL entry */ 625 mobj_base = calloc(cnt + 1, sizeof(struct mobj *)); 626 if (!mobj_base) 627 panic("Out of memory"); 628 629 mobj = mobj_base; 630 631 for (mem = phys_sdp_mem_begin; mem < phys_sdp_mem_end; mem++, mobj++) 632 *mobj = core_sdp_mem_alloc_mobj(mem->addr, mem->size); 633 634 if (sec_sdp.size) 635 *mobj = core_sdp_mem_alloc_mobj(sec_sdp.paddr, sec_sdp.size); 636 637 return mobj_base; 638 } 639 640 #else /* CFG_SECURE_DATA_PATH */ 641 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused) 642 { 643 return false; 644 } 645 646 #endif /* CFG_SECURE_DATA_PATH */ 647 648 /* Check special memories comply with registered memories */ 649 static void verify_special_mem_areas(struct memory_map *mem_map, 650 const struct core_mmu_phys_mem *start, 651 const struct core_mmu_phys_mem *end, 652 const char *area_name __maybe_unused) 653 { 654 const struct core_mmu_phys_mem *mem = NULL; 655 const struct core_mmu_phys_mem *mem2 = NULL; 656 size_t n = 0; 657 658 if (start == end) { 659 DMSG("No %s memory area defined", area_name); 660 return; 661 } 662 663 for (mem = start; mem < end; mem++) 664 DMSG("%s memory [%" PRIxPA " %" PRIx64 "]", 665 area_name, mem->addr, (uint64_t)mem->addr + mem->size); 666 667 /* Check memories do not intersect each other */ 668 for (mem = start; mem + 1 < end; mem++) { 669 for (mem2 = mem + 1; mem2 < end; mem2++) { 670 if (core_is_buffer_intersect(mem2->addr, mem2->size, 671 mem->addr, mem->size)) { 672 MSG_MEM_INSTERSECT(mem2->addr, mem2->size, 673 mem->addr, mem->size); 674 panic("Special memory intersection"); 675 } 676 } 677 } 678 679 /* 680 * Check memories do not intersect any mapped memory. 681 * This is called before reserved VA space is loaded in mem_map. 682 */ 683 for (mem = start; mem < end; mem++) { 684 for (n = 0; n < mem_map->count; n++) { 685 if (core_is_buffer_intersect(mem->addr, mem->size, 686 mem_map->map[n].pa, 687 mem_map->map[n].size)) { 688 MSG_MEM_INSTERSECT(mem->addr, mem->size, 689 mem_map->map[n].pa, 690 mem_map->map[n].size); 691 panic("Special memory intersection"); 692 } 693 } 694 } 695 } 696 697 static void merge_mmaps(struct tee_mmap_region *dst, 698 const struct tee_mmap_region *src) 699 { 700 paddr_t end_pa = MAX(dst->pa + dst->size - 1, src->pa + src->size - 1); 701 paddr_t pa = MIN(dst->pa, src->pa); 702 703 DMSG("Merging %#"PRIxPA"..%#"PRIxPA" and %#"PRIxPA"..%#"PRIxPA, 704 dst->pa, dst->pa + dst->size - 1, src->pa, 705 src->pa + src->size - 1); 706 dst->pa = pa; 707 dst->size = end_pa - pa + 1; 708 } 709 710 static bool mmaps_are_mergeable(const struct tee_mmap_region *r1, 711 const struct tee_mmap_region *r2) 712 { 713 if (r1->type != r2->type) 714 return false; 715 716 if (r1->pa == r2->pa) 717 return true; 718 719 if (r1->pa < r2->pa) 720 return r1->pa + r1->size >= r2->pa; 721 else 722 return r2->pa + r2->size >= r1->pa; 723 } 724 725 static void add_phys_mem(struct memory_map *mem_map, 726 const char *mem_name __maybe_unused, 727 enum teecore_memtypes mem_type, 728 paddr_t mem_addr, paddr_size_t mem_size) 729 { 730 size_t n = 0; 731 const struct tee_mmap_region m0 = { 732 .type = mem_type, 733 .pa = mem_addr, 734 .size = mem_size, 735 }; 736 737 if (!mem_size) /* Discard null size entries */ 738 return; 739 740 /* 741 * If some ranges of memory of the same type do overlap 742 * each others they are coalesced into one entry. To help this 743 * added entries are sorted by increasing physical. 744 * 745 * Note that it's valid to have the same physical memory as several 746 * different memory types, for instance the same device memory 747 * mapped as both secure and non-secure. This will probably not 748 * happen often in practice. 749 */ 750 DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ, 751 mem_name, teecore_memtype_name(mem_type), mem_addr, mem_size); 752 for (n = 0; n < mem_map->count; n++) { 753 if (mmaps_are_mergeable(mem_map->map + n, &m0)) { 754 merge_mmaps(mem_map->map + n, &m0); 755 /* 756 * The merged result might be mergeable with the 757 * next or previous entry. 758 */ 759 if (n + 1 < mem_map->count && 760 mmaps_are_mergeable(mem_map->map + n, 761 mem_map->map + n + 1)) { 762 merge_mmaps(mem_map->map + n, 763 mem_map->map + n + 1); 764 rem_array_elem(mem_map->map, mem_map->count, 765 sizeof(*mem_map->map), n + 1); 766 mem_map->count--; 767 } 768 if (n > 0 && mmaps_are_mergeable(mem_map->map + n - 1, 769 mem_map->map + n)) { 770 merge_mmaps(mem_map->map + n - 1, 771 mem_map->map + n); 772 rem_array_elem(mem_map->map, mem_map->count, 773 sizeof(*mem_map->map), n); 774 mem_map->count--; 775 } 776 return; 777 } 778 if (mem_type < mem_map->map[n].type || 779 (mem_type == mem_map->map[n].type && 780 mem_addr < mem_map->map[n].pa)) 781 break; /* found the spot where to insert this memory */ 782 } 783 784 grow_mem_map(mem_map); 785 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), 786 n, &m0); 787 } 788 789 static void add_va_space(struct memory_map *mem_map, 790 enum teecore_memtypes type, size_t size) 791 { 792 size_t n = 0; 793 794 DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size); 795 for (n = 0; n < mem_map->count; n++) { 796 if (type < mem_map->map[n].type) 797 break; 798 } 799 800 grow_mem_map(mem_map); 801 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), 802 n, NULL); 803 mem_map->map[n] = (struct tee_mmap_region){ 804 .type = type, 805 .size = size, 806 }; 807 } 808 809 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t) 810 { 811 const uint32_t attr = TEE_MATTR_VALID_BLOCK; 812 const uint32_t tagged = TEE_MATTR_MEM_TYPE_TAGGED << 813 TEE_MATTR_MEM_TYPE_SHIFT; 814 const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED << 815 TEE_MATTR_MEM_TYPE_SHIFT; 816 const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV << 817 TEE_MATTR_MEM_TYPE_SHIFT; 818 819 switch (t) { 820 case MEM_AREA_TEE_RAM: 821 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | tagged; 822 case MEM_AREA_TEE_RAM_RX: 823 case MEM_AREA_INIT_RAM_RX: 824 case MEM_AREA_IDENTITY_MAP_RX: 825 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | tagged; 826 case MEM_AREA_TEE_RAM_RO: 827 case MEM_AREA_INIT_RAM_RO: 828 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | tagged; 829 case MEM_AREA_TEE_RAM_RW: 830 case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */ 831 case MEM_AREA_NEX_RAM_RW: 832 case MEM_AREA_TEE_ASAN: 833 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; 834 case MEM_AREA_TEE_COHERENT: 835 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache; 836 case MEM_AREA_TA_RAM: 837 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; 838 case MEM_AREA_NSEC_SHM: 839 case MEM_AREA_NEX_NSEC_SHM: 840 return attr | TEE_MATTR_PRW | cached; 841 case MEM_AREA_MANIFEST_DT: 842 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached; 843 case MEM_AREA_TRANSFER_LIST: 844 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; 845 case MEM_AREA_EXT_DT: 846 /* 847 * If CFG_MAP_EXT_DT_SECURE is enabled map the external device 848 * tree as secure non-cached memory, otherwise, fall back to 849 * non-secure mapping. 850 */ 851 if (IS_ENABLED(CFG_MAP_EXT_DT_SECURE)) 852 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | 853 noncache; 854 fallthrough; 855 case MEM_AREA_IO_NSEC: 856 return attr | TEE_MATTR_PRW | noncache; 857 case MEM_AREA_IO_SEC: 858 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache; 859 case MEM_AREA_RAM_NSEC: 860 return attr | TEE_MATTR_PRW | cached; 861 case MEM_AREA_RAM_SEC: 862 case MEM_AREA_SEC_RAM_OVERALL: 863 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; 864 case MEM_AREA_ROM_SEC: 865 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached; 866 case MEM_AREA_RES_VASPACE: 867 case MEM_AREA_SHM_VASPACE: 868 return 0; 869 case MEM_AREA_PAGER_VASPACE: 870 return TEE_MATTR_SECURE; 871 default: 872 panic("invalid type"); 873 } 874 } 875 876 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm) 877 { 878 switch (mm->type) { 879 case MEM_AREA_TEE_RAM: 880 case MEM_AREA_TEE_RAM_RX: 881 case MEM_AREA_TEE_RAM_RO: 882 case MEM_AREA_TEE_RAM_RW: 883 case MEM_AREA_INIT_RAM_RX: 884 case MEM_AREA_INIT_RAM_RO: 885 case MEM_AREA_NEX_RAM_RW: 886 case MEM_AREA_NEX_RAM_RO: 887 case MEM_AREA_TEE_ASAN: 888 return true; 889 default: 890 return false; 891 } 892 } 893 894 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm) 895 { 896 return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE); 897 } 898 899 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm) 900 { 901 return mm->region_size == CORE_MMU_PGDIR_SIZE; 902 } 903 904 static int cmp_mmap_by_lower_va(const void *a, const void *b) 905 { 906 const struct tee_mmap_region *mm_a = a; 907 const struct tee_mmap_region *mm_b = b; 908 909 return CMP_TRILEAN(mm_a->va, mm_b->va); 910 } 911 912 static void dump_mmap_table(struct memory_map *mem_map) 913 { 914 size_t n = 0; 915 916 for (n = 0; n < mem_map->count; n++) { 917 struct tee_mmap_region *map = mem_map->map + n; 918 vaddr_t __maybe_unused vstart; 919 920 vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1)); 921 DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA 922 " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)", 923 teecore_memtype_name(map->type), vstart, 924 vstart + map->size - 1, map->pa, 925 (paddr_t)(map->pa + map->size - 1), map->size, 926 map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir"); 927 } 928 } 929 930 #if DEBUG_XLAT_TABLE 931 932 static void dump_xlat_table(vaddr_t va, unsigned int level) 933 { 934 struct core_mmu_table_info tbl_info; 935 unsigned int idx = 0; 936 paddr_t pa; 937 uint32_t attr; 938 939 core_mmu_find_table(NULL, va, level, &tbl_info); 940 va = tbl_info.va_base; 941 for (idx = 0; idx < tbl_info.num_entries; idx++) { 942 core_mmu_get_entry(&tbl_info, idx, &pa, &attr); 943 if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) { 944 const char *security_bit = ""; 945 946 if (core_mmu_entry_have_security_bit(attr)) { 947 if (attr & TEE_MATTR_SECURE) 948 security_bit = "S"; 949 else 950 security_bit = "NS"; 951 } 952 953 if (attr & TEE_MATTR_TABLE) { 954 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 955 " TBL:0x%010" PRIxPA " %s", 956 level * 2, "", level, va, pa, 957 security_bit); 958 dump_xlat_table(va, level + 1); 959 } else if (attr) { 960 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 961 " PA:0x%010" PRIxPA " %s-%s-%s-%s", 962 level * 2, "", level, va, pa, 963 mattr_is_cached(attr) ? "MEM" : 964 "DEV", 965 attr & TEE_MATTR_PW ? "RW" : "RO", 966 attr & TEE_MATTR_PX ? "X " : "XN", 967 security_bit); 968 } else { 969 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 970 " INVALID\n", 971 level * 2, "", level, va); 972 } 973 } 974 va += BIT64(tbl_info.shift); 975 } 976 } 977 978 #else 979 980 static void dump_xlat_table(vaddr_t va __unused, int level __unused) 981 { 982 } 983 984 #endif 985 986 /* 987 * Reserves virtual memory space for pager usage. 988 * 989 * From the start of the first memory used by the link script + 990 * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty 991 * mapping for pager usage. This adds translation tables as needed for the 992 * pager to operate. 993 */ 994 static void add_pager_vaspace(struct memory_map *mem_map) 995 { 996 paddr_t begin = 0; 997 paddr_t end = 0; 998 size_t size = 0; 999 size_t pos = 0; 1000 size_t n = 0; 1001 1002 1003 for (n = 0; n < mem_map->count; n++) { 1004 if (map_is_tee_ram(mem_map->map + n)) { 1005 if (!begin) 1006 begin = mem_map->map[n].pa; 1007 pos = n + 1; 1008 } 1009 } 1010 1011 end = mem_map->map[pos - 1].pa + mem_map->map[pos - 1].size; 1012 assert(end - begin < TEE_RAM_VA_SIZE); 1013 size = TEE_RAM_VA_SIZE - (end - begin); 1014 1015 grow_mem_map(mem_map); 1016 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), 1017 n, NULL); 1018 mem_map->map[n] = (struct tee_mmap_region){ 1019 .type = MEM_AREA_PAGER_VASPACE, 1020 .size = size, 1021 .region_size = SMALL_PAGE_SIZE, 1022 .attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE), 1023 }; 1024 } 1025 1026 static void check_sec_nsec_mem_config(void) 1027 { 1028 size_t n = 0; 1029 1030 for (n = 0; n < ARRAY_SIZE(secure_only); n++) { 1031 if (pbuf_intersects(nsec_shared, secure_only[n].paddr, 1032 secure_only[n].size)) 1033 panic("Invalid memory access config: sec/nsec"); 1034 } 1035 } 1036 1037 static void collect_device_mem_ranges(struct memory_map *mem_map) 1038 { 1039 const char *compatible = "arm,ffa-manifest-device-regions"; 1040 void *fdt = get_manifest_dt(); 1041 const char *name = NULL; 1042 uint64_t page_count = 0; 1043 uint64_t base = 0; 1044 int subnode = 0; 1045 int node = 0; 1046 1047 assert(fdt); 1048 1049 node = fdt_node_offset_by_compatible(fdt, 0, compatible); 1050 if (node < 0) 1051 return; 1052 1053 fdt_for_each_subnode(subnode, fdt, node) { 1054 name = fdt_get_name(fdt, subnode, NULL); 1055 if (!name) 1056 continue; 1057 1058 if (dt_getprop_as_number(fdt, subnode, "base-address", 1059 &base)) { 1060 EMSG("Mandatory field is missing: base-address"); 1061 continue; 1062 } 1063 1064 if (base & SMALL_PAGE_MASK) { 1065 EMSG("base-address is not page aligned"); 1066 continue; 1067 } 1068 1069 if (dt_getprop_as_number(fdt, subnode, "pages-count", 1070 &page_count)) { 1071 EMSG("Mandatory field is missing: pages-count"); 1072 continue; 1073 } 1074 1075 add_phys_mem(mem_map, name, MEM_AREA_IO_SEC, 1076 base, base + page_count * SMALL_PAGE_SIZE); 1077 } 1078 } 1079 1080 static void collect_mem_ranges(struct memory_map *mem_map) 1081 { 1082 const struct core_mmu_phys_mem *mem = NULL; 1083 vaddr_t ram_start = secure_only[0].paddr; 1084 1085 #define ADD_PHYS_MEM(_type, _addr, _size) \ 1086 add_phys_mem(mem_map, #_addr, (_type), (_addr), (_size)) 1087 1088 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) { 1089 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, ram_start, 1090 VCORE_UNPG_RX_PA - ram_start); 1091 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA, 1092 VCORE_UNPG_RX_SZ); 1093 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA, 1094 VCORE_UNPG_RO_SZ); 1095 1096 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1097 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA, 1098 VCORE_UNPG_RW_SZ); 1099 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA, 1100 VCORE_NEX_RW_SZ); 1101 } else { 1102 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA, 1103 VCORE_UNPG_RW_SZ); 1104 } 1105 1106 if (IS_ENABLED(CFG_WITH_PAGER)) { 1107 ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA, 1108 VCORE_INIT_RX_SZ); 1109 ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA, 1110 VCORE_INIT_RO_SZ); 1111 } 1112 } else { 1113 ADD_PHYS_MEM(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE); 1114 } 1115 1116 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1117 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, TRUSTED_DRAM_BASE, 1118 TRUSTED_DRAM_SIZE); 1119 } else { 1120 /* 1121 * Every guest will have own TA RAM if virtualization 1122 * support is enabled. 1123 */ 1124 paddr_t ta_base = 0; 1125 size_t ta_size = 0; 1126 1127 core_mmu_get_ta_range(&ta_base, &ta_size); 1128 ADD_PHYS_MEM(MEM_AREA_TA_RAM, ta_base, ta_size); 1129 } 1130 1131 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS) && 1132 IS_ENABLED(CFG_WITH_PAGER)) { 1133 /* 1134 * Asan ram is part of MEM_AREA_TEE_RAM_RW when pager is 1135 * disabled. 1136 */ 1137 ADD_PHYS_MEM(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ); 1138 } 1139 1140 #undef ADD_PHYS_MEM 1141 1142 /* Collect device memory info from SP manifest */ 1143 if (IS_ENABLED(CFG_CORE_SEL2_SPMC)) 1144 collect_device_mem_ranges(mem_map); 1145 1146 for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) { 1147 /* Only unmapped virtual range may have a null phys addr */ 1148 assert(mem->addr || !core_mmu_type_to_attr(mem->type)); 1149 1150 add_phys_mem(mem_map, mem->name, mem->type, 1151 mem->addr, mem->size); 1152 } 1153 1154 if (IS_ENABLED(CFG_SECURE_DATA_PATH)) 1155 verify_special_mem_areas(mem_map, phys_sdp_mem_begin, 1156 phys_sdp_mem_end, "SDP"); 1157 1158 add_va_space(mem_map, MEM_AREA_RES_VASPACE, CFG_RESERVED_VASPACE_SIZE); 1159 add_va_space(mem_map, MEM_AREA_SHM_VASPACE, SHM_VASPACE_SIZE); 1160 } 1161 1162 static void assign_mem_granularity(struct memory_map *mem_map) 1163 { 1164 size_t n = 0; 1165 1166 /* 1167 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses 1168 * SMALL_PAGE_SIZE. 1169 */ 1170 for (n = 0; n < mem_map->count; n++) { 1171 paddr_t mask = mem_map->map[n].pa | mem_map->map[n].size; 1172 1173 if (!(mask & CORE_MMU_PGDIR_MASK)) 1174 mem_map->map[n].region_size = CORE_MMU_PGDIR_SIZE; 1175 else if (!(mask & SMALL_PAGE_MASK)) 1176 mem_map->map[n].region_size = SMALL_PAGE_SIZE; 1177 else 1178 panic("Impossible memory alignment"); 1179 1180 if (map_is_tee_ram(mem_map->map + n)) 1181 mem_map->map[n].region_size = SMALL_PAGE_SIZE; 1182 } 1183 } 1184 1185 static bool place_tee_ram_at_top(paddr_t paddr) 1186 { 1187 return paddr > BIT64(core_mmu_get_va_width()) / 2; 1188 } 1189 1190 /* 1191 * MMU arch driver shall override this function if it helps 1192 * optimizing the memory footprint of the address translation tables. 1193 */ 1194 bool __weak core_mmu_prefer_tee_ram_at_top(paddr_t paddr) 1195 { 1196 return place_tee_ram_at_top(paddr); 1197 } 1198 1199 static bool assign_mem_va_dir(vaddr_t tee_ram_va, struct memory_map *mem_map, 1200 bool tee_ram_at_top) 1201 { 1202 struct tee_mmap_region *map = NULL; 1203 vaddr_t va = 0; 1204 bool va_is_secure = true; 1205 size_t n = 0; 1206 1207 /* 1208 * tee_ram_va might equals 0 when CFG_CORE_ASLR=y. 1209 * 0 is by design an invalid va, so return false directly. 1210 */ 1211 if (!tee_ram_va) 1212 return false; 1213 1214 /* Clear eventual previous assignments */ 1215 for (n = 0; n < mem_map->count; n++) 1216 mem_map->map[n].va = 0; 1217 1218 /* 1219 * TEE RAM regions are always aligned with region_size. 1220 * 1221 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here 1222 * since it handles virtual memory which covers the part of the ELF 1223 * that cannot fit directly into memory. 1224 */ 1225 va = tee_ram_va; 1226 for (n = 0; n < mem_map->count; n++) { 1227 map = mem_map->map + n; 1228 if (map_is_tee_ram(map) || 1229 map->type == MEM_AREA_PAGER_VASPACE) { 1230 assert(!(va & (map->region_size - 1))); 1231 assert(!(map->size & (map->region_size - 1))); 1232 map->va = va; 1233 if (ADD_OVERFLOW(va, map->size, &va)) 1234 return false; 1235 if (va >= BIT64(core_mmu_get_va_width())) 1236 return false; 1237 } 1238 } 1239 1240 if (tee_ram_at_top) { 1241 /* 1242 * Map non-tee ram regions at addresses lower than the tee 1243 * ram region. 1244 */ 1245 va = tee_ram_va; 1246 for (n = 0; n < mem_map->count; n++) { 1247 map = mem_map->map + n; 1248 map->attr = core_mmu_type_to_attr(map->type); 1249 if (map->va) 1250 continue; 1251 1252 if (!IS_ENABLED(CFG_WITH_LPAE) && 1253 va_is_secure != map_is_secure(map)) { 1254 va_is_secure = !va_is_secure; 1255 va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE); 1256 } 1257 1258 if (SUB_OVERFLOW(va, map->size, &va)) 1259 return false; 1260 va = ROUNDDOWN(va, map->region_size); 1261 /* 1262 * Make sure that va is aligned with pa for 1263 * efficient pgdir mapping. Basically pa & 1264 * pgdir_mask should be == va & pgdir_mask 1265 */ 1266 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 1267 if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va)) 1268 return false; 1269 va += (map->pa - va) & CORE_MMU_PGDIR_MASK; 1270 } 1271 map->va = va; 1272 } 1273 } else { 1274 /* 1275 * Map non-tee ram regions at addresses higher than the tee 1276 * ram region. 1277 */ 1278 for (n = 0; n < mem_map->count; n++) { 1279 map = mem_map->map + n; 1280 map->attr = core_mmu_type_to_attr(map->type); 1281 if (map->va) 1282 continue; 1283 1284 if (!IS_ENABLED(CFG_WITH_LPAE) && 1285 va_is_secure != map_is_secure(map)) { 1286 va_is_secure = !va_is_secure; 1287 if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, 1288 &va)) 1289 return false; 1290 } 1291 1292 if (ROUNDUP_OVERFLOW(va, map->region_size, &va)) 1293 return false; 1294 /* 1295 * Make sure that va is aligned with pa for 1296 * efficient pgdir mapping. Basically pa & 1297 * pgdir_mask should be == va & pgdir_mask 1298 */ 1299 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 1300 vaddr_t offs = (map->pa - va) & 1301 CORE_MMU_PGDIR_MASK; 1302 1303 if (ADD_OVERFLOW(va, offs, &va)) 1304 return false; 1305 } 1306 1307 map->va = va; 1308 if (ADD_OVERFLOW(va, map->size, &va)) 1309 return false; 1310 if (va >= BIT64(core_mmu_get_va_width())) 1311 return false; 1312 } 1313 } 1314 1315 return true; 1316 } 1317 1318 static bool assign_mem_va(vaddr_t tee_ram_va, struct memory_map *mem_map) 1319 { 1320 bool tee_ram_at_top = place_tee_ram_at_top(tee_ram_va); 1321 1322 /* 1323 * Check that we're not overlapping with the user VA range. 1324 */ 1325 if (IS_ENABLED(CFG_WITH_LPAE)) { 1326 /* 1327 * User VA range is supposed to be defined after these 1328 * mappings have been established. 1329 */ 1330 assert(!core_mmu_user_va_range_is_defined()); 1331 } else { 1332 vaddr_t user_va_base = 0; 1333 size_t user_va_size = 0; 1334 1335 assert(core_mmu_user_va_range_is_defined()); 1336 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 1337 if (tee_ram_va < (user_va_base + user_va_size)) 1338 return false; 1339 } 1340 1341 if (IS_ENABLED(CFG_WITH_PAGER)) { 1342 bool prefered_dir = core_mmu_prefer_tee_ram_at_top(tee_ram_va); 1343 1344 /* Try whole mapping covered by a single base xlat entry */ 1345 if (prefered_dir != tee_ram_at_top && 1346 assign_mem_va_dir(tee_ram_va, mem_map, prefered_dir)) 1347 return true; 1348 } 1349 1350 return assign_mem_va_dir(tee_ram_va, mem_map, tee_ram_at_top); 1351 } 1352 1353 static int cmp_init_mem_map(const void *a, const void *b) 1354 { 1355 const struct tee_mmap_region *mm_a = a; 1356 const struct tee_mmap_region *mm_b = b; 1357 int rc = 0; 1358 1359 rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size); 1360 if (!rc) 1361 rc = CMP_TRILEAN(mm_a->pa, mm_b->pa); 1362 /* 1363 * 32bit MMU descriptors cannot mix secure and non-secure mapping in 1364 * the same level2 table. Hence sort secure mapping from non-secure 1365 * mapping. 1366 */ 1367 if (!rc && !IS_ENABLED(CFG_WITH_LPAE)) 1368 rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b)); 1369 1370 return rc; 1371 } 1372 1373 static bool mem_map_add_id_map(struct memory_map *mem_map, 1374 vaddr_t id_map_start, vaddr_t id_map_end) 1375 { 1376 vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE); 1377 vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE); 1378 size_t len = end - start; 1379 size_t n = 0; 1380 1381 1382 for (n = 0; n < mem_map->count; n++) 1383 if (core_is_buffer_intersect(mem_map->map[n].va, 1384 mem_map->map[n].size, start, len)) 1385 return false; 1386 1387 grow_mem_map(mem_map); 1388 mem_map->map[mem_map->count - 1] = (struct tee_mmap_region){ 1389 .type = MEM_AREA_IDENTITY_MAP_RX, 1390 /* 1391 * Could use CORE_MMU_PGDIR_SIZE to potentially save a 1392 * translation table, at the increased risk of clashes with 1393 * the rest of the memory map. 1394 */ 1395 .region_size = SMALL_PAGE_SIZE, 1396 .pa = start, 1397 .va = start, 1398 .size = len, 1399 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1400 }; 1401 1402 return true; 1403 } 1404 1405 static struct memory_map *init_mem_map(struct memory_map *mem_map, 1406 unsigned long seed, 1407 unsigned long *ret_offs) 1408 { 1409 /* 1410 * @id_map_start and @id_map_end describes a physical memory range 1411 * that must be mapped Read-Only eXecutable at identical virtual 1412 * addresses. 1413 */ 1414 vaddr_t id_map_start = (vaddr_t)__identity_map_init_start; 1415 vaddr_t id_map_end = (vaddr_t)__identity_map_init_end; 1416 vaddr_t start_addr = secure_only[0].paddr; 1417 unsigned long offs = 0; 1418 1419 collect_mem_ranges(mem_map); 1420 assign_mem_granularity(mem_map); 1421 1422 /* 1423 * To ease mapping and lower use of xlat tables, sort mapping 1424 * description moving small-page regions after the pgdir regions. 1425 */ 1426 qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region), 1427 cmp_init_mem_map); 1428 1429 if (IS_ENABLED(CFG_WITH_PAGER)) 1430 add_pager_vaspace(mem_map); 1431 1432 if (IS_ENABLED(CFG_CORE_ASLR) && seed) { 1433 vaddr_t base_addr = start_addr + seed; 1434 const unsigned int va_width = core_mmu_get_va_width(); 1435 const vaddr_t va_mask = GENMASK_64(va_width - 1, 1436 SMALL_PAGE_SHIFT); 1437 vaddr_t ba = base_addr; 1438 size_t n = 0; 1439 1440 for (n = 0; n < 3; n++) { 1441 if (n) 1442 ba = base_addr ^ BIT64(va_width - n); 1443 ba &= va_mask; 1444 if (assign_mem_va(ba, mem_map) && 1445 mem_map_add_id_map(mem_map, id_map_start, 1446 id_map_end)) { 1447 offs = ba - start_addr; 1448 DMSG("Mapping core at %#"PRIxVA" offs %#lx", 1449 ba, offs); 1450 goto out; 1451 } else { 1452 DMSG("Failed to map core at %#"PRIxVA, ba); 1453 } 1454 } 1455 EMSG("Failed to map core with seed %#lx", seed); 1456 } 1457 1458 if (!assign_mem_va(start_addr, mem_map)) 1459 panic(); 1460 1461 out: 1462 qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region), 1463 cmp_mmap_by_lower_va); 1464 1465 dump_mmap_table(mem_map); 1466 1467 *ret_offs = offs; 1468 return mem_map; 1469 } 1470 1471 static void check_mem_map(struct memory_map *mem_map) 1472 { 1473 struct tee_mmap_region *m = NULL; 1474 size_t n = 0; 1475 1476 for (n = 0; n < mem_map->count; n++) { 1477 m = mem_map->map + n; 1478 switch (m->type) { 1479 case MEM_AREA_TEE_RAM: 1480 case MEM_AREA_TEE_RAM_RX: 1481 case MEM_AREA_TEE_RAM_RO: 1482 case MEM_AREA_TEE_RAM_RW: 1483 case MEM_AREA_INIT_RAM_RX: 1484 case MEM_AREA_INIT_RAM_RO: 1485 case MEM_AREA_NEX_RAM_RW: 1486 case MEM_AREA_NEX_RAM_RO: 1487 case MEM_AREA_IDENTITY_MAP_RX: 1488 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1489 panic("TEE_RAM can't fit in secure_only"); 1490 break; 1491 case MEM_AREA_TA_RAM: 1492 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1493 panic("TA_RAM can't fit in secure_only"); 1494 break; 1495 case MEM_AREA_NSEC_SHM: 1496 if (!pbuf_is_inside(nsec_shared, m->pa, m->size)) 1497 panic("NS_SHM can't fit in nsec_shared"); 1498 break; 1499 case MEM_AREA_SEC_RAM_OVERALL: 1500 case MEM_AREA_TEE_COHERENT: 1501 case MEM_AREA_TEE_ASAN: 1502 case MEM_AREA_IO_SEC: 1503 case MEM_AREA_IO_NSEC: 1504 case MEM_AREA_EXT_DT: 1505 case MEM_AREA_MANIFEST_DT: 1506 case MEM_AREA_TRANSFER_LIST: 1507 case MEM_AREA_RAM_SEC: 1508 case MEM_AREA_RAM_NSEC: 1509 case MEM_AREA_ROM_SEC: 1510 case MEM_AREA_RES_VASPACE: 1511 case MEM_AREA_SHM_VASPACE: 1512 case MEM_AREA_PAGER_VASPACE: 1513 break; 1514 default: 1515 EMSG("Uhandled memtype %d", m->type); 1516 panic(); 1517 } 1518 } 1519 } 1520 1521 /* 1522 * core_init_mmu_map() - init tee core default memory mapping 1523 * 1524 * This routine sets the static default TEE core mapping. If @seed is > 0 1525 * and configured with CFG_CORE_ASLR it will map tee core at a location 1526 * based on the seed and return the offset from the link address. 1527 * 1528 * If an error happened: core_init_mmu_map is expected to panic. 1529 * 1530 * Note: this function is weak just to make it possible to exclude it from 1531 * the unpaged area. 1532 */ 1533 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg) 1534 { 1535 #ifndef CFG_NS_VIRTUALIZATION 1536 vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE); 1537 #else 1538 vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start, 1539 SMALL_PAGE_SIZE); 1540 #endif 1541 vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start; 1542 struct tee_mmap_region tmp_mmap_region = { }; 1543 struct memory_map mem_map = { }; 1544 unsigned long offs = 0; 1545 1546 if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) && 1547 (core_mmu_tee_load_pa & SMALL_PAGE_MASK)) 1548 panic("OP-TEE load address is not page aligned"); 1549 1550 check_sec_nsec_mem_config(); 1551 1552 mem_map = static_memory_map; 1553 static_memory_map = (struct memory_map){ 1554 .map = &tmp_mmap_region, 1555 .alloc_count = 1, 1556 .count = 1, 1557 }; 1558 /* 1559 * Add a entry covering the translation tables which will be 1560 * involved in some virt_to_phys() and phys_to_virt() conversions. 1561 */ 1562 static_memory_map.map[0] = (struct tee_mmap_region){ 1563 .type = MEM_AREA_TEE_RAM, 1564 .region_size = SMALL_PAGE_SIZE, 1565 .pa = start, 1566 .va = start, 1567 .size = len, 1568 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1569 }; 1570 1571 init_mem_map(&mem_map, seed, &offs); 1572 1573 check_mem_map(&mem_map); 1574 core_init_mmu(&mem_map); 1575 dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL); 1576 core_init_mmu_regs(cfg); 1577 cfg->map_offset = offs; 1578 static_memory_map = mem_map; 1579 } 1580 1581 bool core_mmu_mattr_is_ok(uint32_t mattr) 1582 { 1583 /* 1584 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and 1585 * core_mmu_v7.c:mattr_to_texcb 1586 */ 1587 1588 switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) { 1589 case TEE_MATTR_MEM_TYPE_DEV: 1590 case TEE_MATTR_MEM_TYPE_STRONGLY_O: 1591 case TEE_MATTR_MEM_TYPE_CACHED: 1592 case TEE_MATTR_MEM_TYPE_TAGGED: 1593 return true; 1594 default: 1595 return false; 1596 } 1597 } 1598 1599 /* 1600 * test attributes of target physical buffer 1601 * 1602 * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT). 1603 * 1604 */ 1605 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len) 1606 { 1607 paddr_t ta_base = 0; 1608 size_t ta_size = 0; 1609 struct tee_mmap_region *map; 1610 1611 /* Empty buffers complies with anything */ 1612 if (len == 0) 1613 return true; 1614 1615 switch (attr) { 1616 case CORE_MEM_SEC: 1617 return pbuf_is_inside(secure_only, pbuf, len); 1618 case CORE_MEM_NON_SEC: 1619 return pbuf_is_inside(nsec_shared, pbuf, len) || 1620 pbuf_is_nsec_ddr(pbuf, len); 1621 case CORE_MEM_TEE_RAM: 1622 return core_is_buffer_inside(pbuf, len, TEE_RAM_START, 1623 TEE_RAM_PH_SIZE); 1624 case CORE_MEM_TA_RAM: 1625 core_mmu_get_ta_range(&ta_base, &ta_size); 1626 return core_is_buffer_inside(pbuf, len, ta_base, ta_size); 1627 #ifdef CFG_CORE_RESERVED_SHM 1628 case CORE_MEM_NSEC_SHM: 1629 return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START, 1630 TEE_SHMEM_SIZE); 1631 #endif 1632 case CORE_MEM_SDP_MEM: 1633 return pbuf_is_sdp_mem(pbuf, len); 1634 case CORE_MEM_CACHED: 1635 map = find_map_by_pa(pbuf); 1636 if (!map || !pbuf_inside_map_area(pbuf, len, map)) 1637 return false; 1638 return mattr_is_cached(map->attr); 1639 default: 1640 return false; 1641 } 1642 } 1643 1644 /* test attributes of target virtual buffer (in core mapping) */ 1645 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len) 1646 { 1647 paddr_t p; 1648 1649 /* Empty buffers complies with anything */ 1650 if (len == 0) 1651 return true; 1652 1653 p = virt_to_phys((void *)vbuf); 1654 if (!p) 1655 return false; 1656 1657 return core_pbuf_is(attr, p, len); 1658 } 1659 1660 /* core_va2pa - teecore exported service */ 1661 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa) 1662 { 1663 struct tee_mmap_region *map; 1664 1665 map = find_map_by_va(va); 1666 if (!va_is_in_map(map, (vaddr_t)va)) 1667 return -1; 1668 1669 /* 1670 * We can calculate PA for static map. Virtual address ranges 1671 * reserved to core dynamic mapping return a 'match' (return 0;) 1672 * together with an invalid null physical address. 1673 */ 1674 if (map->pa) 1675 *pa = map->pa + (vaddr_t)va - map->va; 1676 else 1677 *pa = 0; 1678 1679 return 0; 1680 } 1681 1682 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len) 1683 { 1684 if (!pa_is_in_map(map, pa, len)) 1685 return NULL; 1686 1687 return (void *)(vaddr_t)(map->va + pa - map->pa); 1688 } 1689 1690 /* 1691 * teecore gets some memory area definitions 1692 */ 1693 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s, 1694 vaddr_t *e) 1695 { 1696 struct tee_mmap_region *map = find_map_by_type(type); 1697 1698 if (map) { 1699 *s = map->va; 1700 *e = map->va + map->size; 1701 } else { 1702 *s = 0; 1703 *e = 0; 1704 } 1705 } 1706 1707 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa) 1708 { 1709 struct tee_mmap_region *map = find_map_by_pa(pa); 1710 1711 if (!map) 1712 return MEM_AREA_MAXTYPE; 1713 return map->type; 1714 } 1715 1716 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1717 paddr_t pa, uint32_t attr) 1718 { 1719 assert(idx < tbl_info->num_entries); 1720 core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level, 1721 idx, pa, attr); 1722 } 1723 1724 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1725 paddr_t *pa, uint32_t *attr) 1726 { 1727 assert(idx < tbl_info->num_entries); 1728 core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level, 1729 idx, pa, attr); 1730 } 1731 1732 static void clear_region(struct core_mmu_table_info *tbl_info, 1733 struct tee_mmap_region *region) 1734 { 1735 unsigned int end = 0; 1736 unsigned int idx = 0; 1737 1738 /* va, len and pa should be block aligned */ 1739 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1740 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1741 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1742 1743 idx = core_mmu_va2idx(tbl_info, region->va); 1744 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1745 1746 while (idx < end) { 1747 core_mmu_set_entry(tbl_info, idx, 0, 0); 1748 idx++; 1749 } 1750 } 1751 1752 static void set_region(struct core_mmu_table_info *tbl_info, 1753 struct tee_mmap_region *region) 1754 { 1755 unsigned int end; 1756 unsigned int idx; 1757 paddr_t pa; 1758 1759 /* va, len and pa should be block aligned */ 1760 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1761 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1762 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1763 1764 idx = core_mmu_va2idx(tbl_info, region->va); 1765 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1766 pa = region->pa; 1767 1768 while (idx < end) { 1769 core_mmu_set_entry(tbl_info, idx, pa, region->attr); 1770 idx++; 1771 pa += BIT64(tbl_info->shift); 1772 } 1773 } 1774 1775 static void set_pg_region(struct core_mmu_table_info *dir_info, 1776 struct vm_region *region, struct pgt **pgt, 1777 struct core_mmu_table_info *pg_info) 1778 { 1779 struct tee_mmap_region r = { 1780 .va = region->va, 1781 .size = region->size, 1782 .attr = region->attr, 1783 }; 1784 vaddr_t end = r.va + r.size; 1785 uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE; 1786 1787 while (r.va < end) { 1788 if (!pg_info->table || 1789 r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) { 1790 /* 1791 * We're assigning a new translation table. 1792 */ 1793 unsigned int idx; 1794 1795 /* Virtual addresses must grow */ 1796 assert(r.va > pg_info->va_base); 1797 1798 idx = core_mmu_va2idx(dir_info, r.va); 1799 pg_info->va_base = core_mmu_idx2va(dir_info, idx); 1800 1801 /* 1802 * Advance pgt to va_base, note that we may need to 1803 * skip multiple page tables if there are large 1804 * holes in the vm map. 1805 */ 1806 while ((*pgt)->vabase < pg_info->va_base) { 1807 *pgt = SLIST_NEXT(*pgt, link); 1808 /* We should have allocated enough */ 1809 assert(*pgt); 1810 } 1811 assert((*pgt)->vabase == pg_info->va_base); 1812 pg_info->table = (*pgt)->tbl; 1813 1814 core_mmu_set_entry(dir_info, idx, 1815 virt_to_phys(pg_info->table), 1816 pgt_attr); 1817 } 1818 1819 r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base), 1820 end - r.va); 1821 1822 if (!(*pgt)->populated && !mobj_is_paged(region->mobj)) { 1823 size_t granule = BIT(pg_info->shift); 1824 size_t offset = r.va - region->va + region->offset; 1825 1826 r.size = MIN(r.size, 1827 mobj_get_phys_granule(region->mobj)); 1828 r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE); 1829 1830 if (mobj_get_pa(region->mobj, offset, granule, 1831 &r.pa) != TEE_SUCCESS) 1832 panic("Failed to get PA of unpaged mobj"); 1833 set_region(pg_info, &r); 1834 } 1835 r.va += r.size; 1836 } 1837 } 1838 1839 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr, 1840 size_t size_left, paddr_t block_size, 1841 struct tee_mmap_region *mm __maybe_unused) 1842 { 1843 /* VA and PA are aligned to block size at current level */ 1844 if ((vaddr | paddr) & (block_size - 1)) 1845 return false; 1846 1847 /* Remainder fits into block at current level */ 1848 if (size_left < block_size) 1849 return false; 1850 1851 #ifdef CFG_WITH_PAGER 1852 /* 1853 * If pager is enabled, we need to map TEE RAM and the whole pager 1854 * regions with small pages only 1855 */ 1856 if ((map_is_tee_ram(mm) || mm->type == MEM_AREA_PAGER_VASPACE) && 1857 block_size != SMALL_PAGE_SIZE) 1858 return false; 1859 #endif 1860 1861 return true; 1862 } 1863 1864 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm) 1865 { 1866 struct core_mmu_table_info tbl_info; 1867 unsigned int idx; 1868 vaddr_t vaddr = mm->va; 1869 paddr_t paddr = mm->pa; 1870 ssize_t size_left = mm->size; 1871 unsigned int level; 1872 bool table_found; 1873 uint32_t old_attr; 1874 1875 assert(!((vaddr | paddr) & SMALL_PAGE_MASK)); 1876 1877 while (size_left > 0) { 1878 level = CORE_MMU_BASE_TABLE_LEVEL; 1879 1880 while (true) { 1881 paddr_t block_size = 0; 1882 1883 assert(core_mmu_level_in_range(level)); 1884 1885 table_found = core_mmu_find_table(prtn, vaddr, level, 1886 &tbl_info); 1887 if (!table_found) 1888 panic("can't find table for mapping"); 1889 1890 block_size = BIT64(tbl_info.shift); 1891 1892 idx = core_mmu_va2idx(&tbl_info, vaddr); 1893 if (!can_map_at_level(paddr, vaddr, size_left, 1894 block_size, mm)) { 1895 bool secure = mm->attr & TEE_MATTR_SECURE; 1896 1897 /* 1898 * This part of the region can't be mapped at 1899 * this level. Need to go deeper. 1900 */ 1901 if (!core_mmu_entry_to_finer_grained(&tbl_info, 1902 idx, 1903 secure)) 1904 panic("Can't divide MMU entry"); 1905 level = tbl_info.next_level; 1906 continue; 1907 } 1908 1909 /* We can map part of the region at current level */ 1910 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1911 if (old_attr) 1912 panic("Page is already mapped"); 1913 1914 core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr); 1915 paddr += block_size; 1916 vaddr += block_size; 1917 size_left -= block_size; 1918 1919 break; 1920 } 1921 } 1922 } 1923 1924 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, 1925 enum teecore_memtypes memtype) 1926 { 1927 TEE_Result ret; 1928 struct core_mmu_table_info tbl_info; 1929 struct tee_mmap_region *mm; 1930 unsigned int idx; 1931 uint32_t old_attr; 1932 uint32_t exceptions; 1933 vaddr_t vaddr = vstart; 1934 size_t i; 1935 bool secure; 1936 1937 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 1938 1939 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 1940 1941 if (vaddr & SMALL_PAGE_MASK) 1942 return TEE_ERROR_BAD_PARAMETERS; 1943 1944 exceptions = mmu_lock(); 1945 1946 mm = find_map_by_va((void *)vaddr); 1947 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 1948 panic("VA does not belong to any known mm region"); 1949 1950 if (!core_mmu_is_dynamic_vaspace(mm)) 1951 panic("Trying to map into static region"); 1952 1953 for (i = 0; i < num_pages; i++) { 1954 if (pages[i] & SMALL_PAGE_MASK) { 1955 ret = TEE_ERROR_BAD_PARAMETERS; 1956 goto err; 1957 } 1958 1959 while (true) { 1960 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 1961 &tbl_info)) 1962 panic("Can't find pagetable for vaddr "); 1963 1964 idx = core_mmu_va2idx(&tbl_info, vaddr); 1965 if (tbl_info.shift == SMALL_PAGE_SHIFT) 1966 break; 1967 1968 /* This is supertable. Need to divide it. */ 1969 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 1970 secure)) 1971 panic("Failed to spread pgdir on small tables"); 1972 } 1973 1974 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1975 if (old_attr) 1976 panic("Page is already mapped"); 1977 1978 core_mmu_set_entry(&tbl_info, idx, pages[i], 1979 core_mmu_type_to_attr(memtype)); 1980 vaddr += SMALL_PAGE_SIZE; 1981 } 1982 1983 /* 1984 * Make sure all the changes to translation tables are visible 1985 * before returning. TLB doesn't need to be invalidated as we are 1986 * guaranteed that there's no valid mapping in this range. 1987 */ 1988 core_mmu_table_write_barrier(); 1989 mmu_unlock(exceptions); 1990 1991 return TEE_SUCCESS; 1992 err: 1993 mmu_unlock(exceptions); 1994 1995 if (i) 1996 core_mmu_unmap_pages(vstart, i); 1997 1998 return ret; 1999 } 2000 2001 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart, 2002 size_t num_pages, 2003 enum teecore_memtypes memtype) 2004 { 2005 struct core_mmu_table_info tbl_info = { }; 2006 struct tee_mmap_region *mm = NULL; 2007 unsigned int idx = 0; 2008 uint32_t old_attr = 0; 2009 uint32_t exceptions = 0; 2010 vaddr_t vaddr = vstart; 2011 paddr_t paddr = pstart; 2012 size_t i = 0; 2013 bool secure = false; 2014 2015 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 2016 2017 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 2018 2019 if ((vaddr | paddr) & SMALL_PAGE_MASK) 2020 return TEE_ERROR_BAD_PARAMETERS; 2021 2022 exceptions = mmu_lock(); 2023 2024 mm = find_map_by_va((void *)vaddr); 2025 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 2026 panic("VA does not belong to any known mm region"); 2027 2028 if (!core_mmu_is_dynamic_vaspace(mm)) 2029 panic("Trying to map into static region"); 2030 2031 for (i = 0; i < num_pages; i++) { 2032 while (true) { 2033 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 2034 &tbl_info)) 2035 panic("Can't find pagetable for vaddr "); 2036 2037 idx = core_mmu_va2idx(&tbl_info, vaddr); 2038 if (tbl_info.shift == SMALL_PAGE_SHIFT) 2039 break; 2040 2041 /* This is supertable. Need to divide it. */ 2042 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 2043 secure)) 2044 panic("Failed to spread pgdir on small tables"); 2045 } 2046 2047 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 2048 if (old_attr) 2049 panic("Page is already mapped"); 2050 2051 core_mmu_set_entry(&tbl_info, idx, paddr, 2052 core_mmu_type_to_attr(memtype)); 2053 paddr += SMALL_PAGE_SIZE; 2054 vaddr += SMALL_PAGE_SIZE; 2055 } 2056 2057 /* 2058 * Make sure all the changes to translation tables are visible 2059 * before returning. TLB doesn't need to be invalidated as we are 2060 * guaranteed that there's no valid mapping in this range. 2061 */ 2062 core_mmu_table_write_barrier(); 2063 mmu_unlock(exceptions); 2064 2065 return TEE_SUCCESS; 2066 } 2067 2068 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages) 2069 { 2070 struct core_mmu_table_info tbl_info; 2071 struct tee_mmap_region *mm; 2072 size_t i; 2073 unsigned int idx; 2074 uint32_t exceptions; 2075 2076 exceptions = mmu_lock(); 2077 2078 mm = find_map_by_va((void *)vstart); 2079 if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1)) 2080 panic("VA does not belong to any known mm region"); 2081 2082 if (!core_mmu_is_dynamic_vaspace(mm)) 2083 panic("Trying to unmap static region"); 2084 2085 for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) { 2086 if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info)) 2087 panic("Can't find pagetable"); 2088 2089 if (tbl_info.shift != SMALL_PAGE_SHIFT) 2090 panic("Invalid pagetable level"); 2091 2092 idx = core_mmu_va2idx(&tbl_info, vstart); 2093 core_mmu_set_entry(&tbl_info, idx, 0, 0); 2094 } 2095 tlbi_all(); 2096 2097 mmu_unlock(exceptions); 2098 } 2099 2100 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, 2101 struct user_mode_ctx *uctx) 2102 { 2103 struct core_mmu_table_info pg_info = { }; 2104 struct pgt_cache *pgt_cache = &uctx->pgt_cache; 2105 struct pgt *pgt = NULL; 2106 struct pgt *p = NULL; 2107 struct vm_region *r = NULL; 2108 2109 if (TAILQ_EMPTY(&uctx->vm_info.regions)) 2110 return; /* Nothing to map */ 2111 2112 /* 2113 * Allocate all page tables in advance. 2114 */ 2115 pgt_get_all(uctx); 2116 pgt = SLIST_FIRST(pgt_cache); 2117 2118 core_mmu_set_info_table(&pg_info, dir_info->next_level, 0, NULL); 2119 2120 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) 2121 set_pg_region(dir_info, r, &pgt, &pg_info); 2122 /* Record that the translation tables now are populated. */ 2123 SLIST_FOREACH(p, pgt_cache, link) { 2124 p->populated = true; 2125 if (p == pgt) 2126 break; 2127 } 2128 assert(p == pgt); 2129 } 2130 2131 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr, 2132 size_t len) 2133 { 2134 struct core_mmu_table_info tbl_info = { }; 2135 struct tee_mmap_region *res_map = NULL; 2136 struct tee_mmap_region *map = NULL; 2137 paddr_t pa = virt_to_phys(addr); 2138 size_t granule = 0; 2139 ptrdiff_t i = 0; 2140 paddr_t p = 0; 2141 size_t l = 0; 2142 2143 map = find_map_by_type_and_pa(type, pa, len); 2144 if (!map) 2145 return TEE_ERROR_GENERIC; 2146 2147 res_map = find_map_by_type(MEM_AREA_RES_VASPACE); 2148 if (!res_map) 2149 return TEE_ERROR_GENERIC; 2150 if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info)) 2151 return TEE_ERROR_GENERIC; 2152 granule = BIT(tbl_info.shift); 2153 2154 if (map < static_memory_map.map || 2155 map >= static_memory_map.map + static_memory_map.count) 2156 return TEE_ERROR_GENERIC; 2157 i = map - static_memory_map.map; 2158 2159 /* Check that we have a full match */ 2160 p = ROUNDDOWN(pa, granule); 2161 l = ROUNDUP(len + pa - p, granule); 2162 if (map->pa != p || map->size != l) 2163 return TEE_ERROR_GENERIC; 2164 2165 clear_region(&tbl_info, map); 2166 tlbi_all(); 2167 2168 /* If possible remove the va range from res_map */ 2169 if (res_map->va - map->size == map->va) { 2170 res_map->va -= map->size; 2171 res_map->size += map->size; 2172 } 2173 2174 /* Remove the entry. */ 2175 rem_array_elem(static_memory_map.map, static_memory_map.count, 2176 sizeof(*static_memory_map.map), i); 2177 static_memory_map.count--; 2178 2179 return TEE_SUCCESS; 2180 } 2181 2182 struct tee_mmap_region * 2183 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len) 2184 { 2185 struct memory_map *mem_map = get_memory_map(); 2186 struct tee_mmap_region *map_found = NULL; 2187 size_t n = 0; 2188 2189 if (!len) 2190 return NULL; 2191 2192 for (n = 0; n < mem_map->count; n++) { 2193 if (mem_map->map[n].type != type) 2194 continue; 2195 2196 if (map_found) 2197 return NULL; 2198 2199 map_found = mem_map->map + n; 2200 } 2201 2202 if (!map_found || map_found->size < len) 2203 return NULL; 2204 2205 return map_found; 2206 } 2207 2208 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len) 2209 { 2210 struct memory_map *mem_map = &static_memory_map; 2211 struct core_mmu_table_info tbl_info = { }; 2212 struct tee_mmap_region *map = NULL; 2213 size_t granule = 0; 2214 paddr_t p = 0; 2215 size_t l = 0; 2216 2217 if (!len) 2218 return NULL; 2219 2220 if (!core_mmu_check_end_pa(addr, len)) 2221 return NULL; 2222 2223 /* Check if the memory is already mapped */ 2224 map = find_map_by_type_and_pa(type, addr, len); 2225 if (map && pbuf_inside_map_area(addr, len, map)) 2226 return (void *)(vaddr_t)(map->va + addr - map->pa); 2227 2228 /* Find the reserved va space used for late mappings */ 2229 map = find_map_by_type(MEM_AREA_RES_VASPACE); 2230 if (!map) 2231 return NULL; 2232 2233 if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info)) 2234 return NULL; 2235 2236 granule = BIT64(tbl_info.shift); 2237 p = ROUNDDOWN(addr, granule); 2238 l = ROUNDUP(len + addr - p, granule); 2239 2240 /* Ban overflowing virtual addresses */ 2241 if (map->size < l) 2242 return NULL; 2243 2244 /* 2245 * Something is wrong, we can't fit the va range into the selected 2246 * table. The reserved va range is possibly missaligned with 2247 * granule. 2248 */ 2249 if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries) 2250 return NULL; 2251 2252 if (static_memory_map.count >= static_memory_map.alloc_count) 2253 return NULL; 2254 2255 mem_map->map[mem_map->count] = (struct tee_mmap_region){ 2256 .va = map->va, 2257 .size = l, 2258 .type = type, 2259 .region_size = granule, 2260 .attr = core_mmu_type_to_attr(type), 2261 .pa = p, 2262 }; 2263 map->va += l; 2264 map->size -= l; 2265 map = mem_map->map + mem_map->count; 2266 mem_map->count++; 2267 2268 set_region(&tbl_info, map); 2269 2270 /* Make sure the new entry is visible before continuing. */ 2271 core_mmu_table_write_barrier(); 2272 2273 return (void *)(vaddr_t)(map->va + addr - map->pa); 2274 } 2275 2276 #ifdef CFG_WITH_PAGER 2277 static vaddr_t get_linear_map_end_va(void) 2278 { 2279 /* this is synced with the generic linker file kern.ld.S */ 2280 return (vaddr_t)__heap2_end; 2281 } 2282 2283 static paddr_t get_linear_map_end_pa(void) 2284 { 2285 return get_linear_map_end_va() - boot_mmu_config.map_offset; 2286 } 2287 #endif 2288 2289 #if defined(CFG_TEE_CORE_DEBUG) 2290 static void check_pa_matches_va(void *va, paddr_t pa) 2291 { 2292 TEE_Result res = TEE_ERROR_GENERIC; 2293 vaddr_t v = (vaddr_t)va; 2294 paddr_t p = 0; 2295 struct core_mmu_table_info ti __maybe_unused = { }; 2296 2297 if (core_mmu_user_va_range_is_defined()) { 2298 vaddr_t user_va_base = 0; 2299 size_t user_va_size = 0; 2300 2301 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 2302 if (v >= user_va_base && 2303 v <= (user_va_base - 1 + user_va_size)) { 2304 if (!core_mmu_user_mapping_is_active()) { 2305 if (pa) 2306 panic("issue in linear address space"); 2307 return; 2308 } 2309 2310 res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx), 2311 va, &p); 2312 if (res == TEE_ERROR_NOT_SUPPORTED) 2313 return; 2314 if (res == TEE_SUCCESS && pa != p) 2315 panic("bad pa"); 2316 if (res != TEE_SUCCESS && pa) 2317 panic("false pa"); 2318 return; 2319 } 2320 } 2321 #ifdef CFG_WITH_PAGER 2322 if (is_unpaged(va)) { 2323 if (v - boot_mmu_config.map_offset != pa) 2324 panic("issue in linear address space"); 2325 return; 2326 } 2327 2328 if (tee_pager_get_table_info(v, &ti)) { 2329 uint32_t a; 2330 2331 /* 2332 * Lookups in the page table managed by the pager is 2333 * dangerous for addresses in the paged area as those pages 2334 * changes all the time. But some ranges are safe, 2335 * rw-locked areas when the page is populated for instance. 2336 */ 2337 core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a); 2338 if (a & TEE_MATTR_VALID_BLOCK) { 2339 paddr_t mask = BIT64(ti.shift) - 1; 2340 2341 p |= v & mask; 2342 if (pa != p) 2343 panic(); 2344 } else { 2345 if (pa) 2346 panic(); 2347 } 2348 return; 2349 } 2350 #endif 2351 2352 if (!core_va2pa_helper(va, &p)) { 2353 /* Verfiy only the static mapping (case non null phys addr) */ 2354 if (p && pa != p) { 2355 DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA, 2356 va, p, pa); 2357 panic(); 2358 } 2359 } else { 2360 if (pa) { 2361 DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa); 2362 panic(); 2363 } 2364 } 2365 } 2366 #else 2367 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused) 2368 { 2369 } 2370 #endif 2371 2372 paddr_t virt_to_phys(void *va) 2373 { 2374 paddr_t pa = 0; 2375 2376 if (!arch_va2pa_helper(va, &pa)) 2377 pa = 0; 2378 check_pa_matches_va(memtag_strip_tag(va), pa); 2379 return pa; 2380 } 2381 2382 #if defined(CFG_TEE_CORE_DEBUG) 2383 static void check_va_matches_pa(paddr_t pa, void *va) 2384 { 2385 paddr_t p = 0; 2386 2387 if (!va) 2388 return; 2389 2390 p = virt_to_phys(va); 2391 if (p != pa) { 2392 DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa); 2393 panic(); 2394 } 2395 } 2396 #else 2397 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused) 2398 { 2399 } 2400 #endif 2401 2402 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len) 2403 { 2404 if (!core_mmu_user_mapping_is_active()) 2405 return NULL; 2406 2407 return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len); 2408 } 2409 2410 #ifdef CFG_WITH_PAGER 2411 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2412 { 2413 paddr_t end_pa = 0; 2414 2415 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 2416 return NULL; 2417 2418 if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end_pa()) { 2419 if (end_pa > get_linear_map_end_pa()) 2420 return NULL; 2421 return (void *)(vaddr_t)(pa + boot_mmu_config.map_offset); 2422 } 2423 2424 return tee_pager_phys_to_virt(pa, len); 2425 } 2426 #else 2427 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2428 { 2429 struct tee_mmap_region *mmap = NULL; 2430 2431 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len); 2432 if (!mmap) 2433 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len); 2434 if (!mmap) 2435 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len); 2436 if (!mmap) 2437 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len); 2438 if (!mmap) 2439 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len); 2440 if (!mmap) 2441 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len); 2442 /* 2443 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only 2444 * used with pager and not needed here. 2445 */ 2446 return map_pa2va(mmap, pa, len); 2447 } 2448 #endif 2449 2450 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len) 2451 { 2452 void *va = NULL; 2453 2454 switch (m) { 2455 case MEM_AREA_TS_VASPACE: 2456 va = phys_to_virt_ts_vaspace(pa, len); 2457 break; 2458 case MEM_AREA_TEE_RAM: 2459 case MEM_AREA_TEE_RAM_RX: 2460 case MEM_AREA_TEE_RAM_RO: 2461 case MEM_AREA_TEE_RAM_RW: 2462 case MEM_AREA_NEX_RAM_RO: 2463 case MEM_AREA_NEX_RAM_RW: 2464 va = phys_to_virt_tee_ram(pa, len); 2465 break; 2466 case MEM_AREA_SHM_VASPACE: 2467 /* Find VA from PA in dynamic SHM is not yet supported */ 2468 va = NULL; 2469 break; 2470 default: 2471 va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len); 2472 } 2473 if (m != MEM_AREA_SEC_RAM_OVERALL) 2474 check_va_matches_pa(pa, va); 2475 return va; 2476 } 2477 2478 void *phys_to_virt_io(paddr_t pa, size_t len) 2479 { 2480 struct tee_mmap_region *map = NULL; 2481 void *va = NULL; 2482 2483 map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len); 2484 if (!map) 2485 map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len); 2486 if (!map) 2487 return NULL; 2488 va = map_pa2va(map, pa, len); 2489 check_va_matches_pa(pa, va); 2490 return va; 2491 } 2492 2493 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len) 2494 { 2495 if (cpu_mmu_enabled()) 2496 return (vaddr_t)phys_to_virt(pa, type, len); 2497 2498 return (vaddr_t)pa; 2499 } 2500 2501 #ifdef CFG_WITH_PAGER 2502 bool is_unpaged(const void *va) 2503 { 2504 vaddr_t v = (vaddr_t)va; 2505 2506 return v >= VCORE_START_VA && v < get_linear_map_end_va(); 2507 } 2508 #endif 2509 2510 #ifdef CFG_NS_VIRTUALIZATION 2511 bool is_nexus(const void *va) 2512 { 2513 vaddr_t v = (vaddr_t)va; 2514 2515 return v >= VCORE_START_VA && v < VCORE_NEX_RW_PA + VCORE_NEX_RW_SZ; 2516 } 2517 #endif 2518 2519 void core_mmu_init_virtualization(void) 2520 { 2521 paddr_t b1 = 0; 2522 paddr_size_t s1 = 0; 2523 2524 static_assert(ARRAY_SIZE(secure_only) <= 2); 2525 if (ARRAY_SIZE(secure_only) == 2) { 2526 b1 = secure_only[1].paddr; 2527 s1 = secure_only[1].size; 2528 } 2529 virt_init_memory(&static_memory_map, secure_only[0].paddr, 2530 secure_only[0].size, b1, s1); 2531 } 2532 2533 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len) 2534 { 2535 assert(p->pa); 2536 if (cpu_mmu_enabled()) { 2537 if (!p->va) 2538 p->va = (vaddr_t)phys_to_virt_io(p->pa, len); 2539 assert(p->va); 2540 return p->va; 2541 } 2542 return p->pa; 2543 } 2544 2545 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len) 2546 { 2547 assert(p->pa); 2548 if (cpu_mmu_enabled()) { 2549 if (!p->va) 2550 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC, 2551 len); 2552 assert(p->va); 2553 return p->va; 2554 } 2555 return p->pa; 2556 } 2557 2558 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len) 2559 { 2560 assert(p->pa); 2561 if (cpu_mmu_enabled()) { 2562 if (!p->va) 2563 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC, 2564 len); 2565 assert(p->va); 2566 return p->va; 2567 } 2568 return p->pa; 2569 } 2570 2571 #ifdef CFG_CORE_RESERVED_SHM 2572 static TEE_Result teecore_init_pub_ram(void) 2573 { 2574 vaddr_t s = 0; 2575 vaddr_t e = 0; 2576 2577 /* get virtual addr/size of NSec shared mem allocated from teecore */ 2578 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e); 2579 2580 if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK) 2581 panic("invalid PUB RAM"); 2582 2583 /* extra check: we could rely on core_mmu_get_mem_by_type() */ 2584 if (!tee_vbuf_is_non_sec(s, e - s)) 2585 panic("PUB RAM is not non-secure"); 2586 2587 #ifdef CFG_PL310 2588 /* Allocate statically the l2cc mutex */ 2589 tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s)); 2590 s += sizeof(uint32_t); /* size of a pl310 mutex */ 2591 s = ROUNDUP(s, SMALL_PAGE_SIZE); /* keep required alignment */ 2592 #endif 2593 2594 default_nsec_shm_paddr = virt_to_phys((void *)s); 2595 default_nsec_shm_size = e - s; 2596 2597 return TEE_SUCCESS; 2598 } 2599 early_init(teecore_init_pub_ram); 2600 #endif /*CFG_CORE_RESERVED_SHM*/ 2601 2602 void core_mmu_init_phys_mem(void) 2603 { 2604 vaddr_t s = 0; 2605 vaddr_t e = 0; 2606 paddr_t ps = 0; 2607 size_t size = 0; 2608 2609 /* 2610 * Get virtual addr/size of RAM where TA are loaded/executedNSec 2611 * shared mem allocated from teecore. 2612 */ 2613 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 2614 virt_get_ta_ram(&s, &e); 2615 else 2616 core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e); 2617 2618 ps = virt_to_phys((void *)s); 2619 size = e - s; 2620 2621 phys_mem_init(0, 0, ps, size); 2622 } 2623