1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2025 Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved. 6 */ 7 8 #include <assert.h> 9 #include <config.h> 10 #include <kernel/boot.h> 11 #include <kernel/dt.h> 12 #include <kernel/linker.h> 13 #include <kernel/panic.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/tee_l2cc_mutex.h> 16 #include <kernel/tee_misc.h> 17 #include <kernel/tlb_helpers.h> 18 #include <kernel/user_mode_ctx.h> 19 #include <kernel/virtualization.h> 20 #include <libfdt.h> 21 #include <memtag.h> 22 #include <mm/core_memprot.h> 23 #include <mm/core_mmu.h> 24 #include <mm/mobj.h> 25 #include <mm/pgt_cache.h> 26 #include <mm/phys_mem.h> 27 #include <mm/tee_pager.h> 28 #include <mm/vm.h> 29 #include <platform_config.h> 30 #include <stdalign.h> 31 #include <string.h> 32 #include <trace.h> 33 #include <util.h> 34 35 #ifndef DEBUG_XLAT_TABLE 36 #define DEBUG_XLAT_TABLE 0 37 #endif 38 39 #define SHM_VASPACE_SIZE (1024 * 1024 * 32) 40 41 /* Virtual memory pool for core mappings */ 42 tee_mm_pool_t core_virt_mem_pool; 43 44 /* Virtual memory pool for shared memory mappings */ 45 tee_mm_pool_t core_virt_shm_pool; 46 47 #ifdef CFG_CORE_PHYS_RELOCATABLE 48 unsigned long core_mmu_tee_load_pa __nex_bss; 49 #else 50 const unsigned long core_mmu_tee_load_pa = TEE_LOAD_ADDR; 51 #endif 52 53 /* 54 * These variables are initialized before .bss is cleared. To avoid 55 * resetting them when .bss is cleared we're storing them in .data instead, 56 * even if they initially are zero. 57 */ 58 59 #ifdef CFG_CORE_RESERVED_SHM 60 /* Default NSec shared memory allocated from NSec world */ 61 unsigned long default_nsec_shm_size __nex_bss; 62 unsigned long default_nsec_shm_paddr __nex_bss; 63 #endif 64 65 static struct memory_map static_memory_map __nex_bss; 66 void (*memory_map_realloc_func)(struct memory_map *mem_map) __nex_bss; 67 68 /* Offset of the first TEE RAM mapping from start of secure RAM */ 69 static size_t tee_ram_initial_offs __nex_bss; 70 71 /* Define the platform's memory layout. */ 72 struct memaccess_area { 73 paddr_t paddr; 74 size_t size; 75 }; 76 77 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s } 78 79 static struct memaccess_area secure_only[] __nex_data = { 80 #ifdef CFG_CORE_PHYS_RELOCATABLE 81 MEMACCESS_AREA(0, 0), 82 #else 83 #ifdef TRUSTED_SRAM_BASE 84 MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE), 85 #endif 86 MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE), 87 #endif 88 }; 89 90 static struct memaccess_area nsec_shared[] __nex_data = { 91 #ifdef CFG_CORE_RESERVED_SHM 92 MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE), 93 #endif 94 }; 95 96 #if defined(CFG_SECURE_DATA_PATH) 97 static const char *tz_sdp_match = "linaro,secure-heap"; 98 static struct memaccess_area sec_sdp; 99 #ifdef CFG_TEE_SDP_MEM_BASE 100 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE); 101 #endif 102 #ifdef TEE_SDP_TEST_MEM_BASE 103 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE); 104 #endif 105 #endif 106 107 #ifdef CFG_CORE_RESERVED_SHM 108 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE); 109 #endif 110 static unsigned int mmu_spinlock; 111 112 static uint32_t mmu_lock(void) 113 { 114 return cpu_spin_lock_xsave(&mmu_spinlock); 115 } 116 117 static void mmu_unlock(uint32_t exceptions) 118 { 119 cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions); 120 } 121 122 static void heap_realloc_memory_map(struct memory_map *mem_map) 123 { 124 struct tee_mmap_region *m = NULL; 125 struct tee_mmap_region *old = mem_map->map; 126 size_t old_sz = sizeof(*old) * mem_map->alloc_count; 127 size_t sz = old_sz + sizeof(*m); 128 129 assert(nex_malloc_buffer_is_within_alloced(old, old_sz)); 130 m = nex_realloc(old, sz); 131 if (!m) 132 panic(); 133 mem_map->map = m; 134 mem_map->alloc_count++; 135 } 136 137 static void boot_mem_realloc_memory_map(struct memory_map *mem_map) 138 { 139 struct tee_mmap_region *m = NULL; 140 struct tee_mmap_region *old = mem_map->map; 141 size_t old_sz = sizeof(*old) * mem_map->alloc_count; 142 size_t sz = old_sz * 2; 143 144 m = boot_mem_alloc_tmp(sz, alignof(*m)); 145 memcpy(m, old, old_sz); 146 mem_map->map = m; 147 mem_map->alloc_count *= 2; 148 } 149 150 static void grow_mem_map(struct memory_map *mem_map) 151 { 152 if (mem_map->count == mem_map->alloc_count) { 153 if (!memory_map_realloc_func) { 154 EMSG("Out of entries (%zu) in mem_map", 155 mem_map->alloc_count); 156 panic(); 157 } 158 memory_map_realloc_func(mem_map); 159 } 160 mem_map->count++; 161 } 162 163 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size) 164 { 165 /* 166 * The first range is always used to cover OP-TEE core memory, but 167 * depending on configuration it may cover more than that. 168 */ 169 *base = secure_only[0].paddr; 170 *size = secure_only[0].size; 171 } 172 173 void core_mmu_set_secure_memory(paddr_t base, size_t size) 174 { 175 #ifdef CFG_CORE_PHYS_RELOCATABLE 176 static_assert(ARRAY_SIZE(secure_only) == 1); 177 #endif 178 runtime_assert(IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)); 179 assert(!secure_only[0].size); 180 assert(base && size); 181 182 DMSG("Physical secure memory base %#"PRIxPA" size %#zx", base, size); 183 secure_only[0].paddr = base; 184 secure_only[0].size = size; 185 } 186 187 static struct memory_map *get_memory_map(void) 188 { 189 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 190 struct memory_map *map = virt_get_memory_map(); 191 192 if (map) 193 return map; 194 } 195 196 return &static_memory_map; 197 } 198 199 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen, 200 paddr_t pa, size_t size) 201 { 202 size_t n; 203 204 for (n = 0; n < alen; n++) 205 if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size)) 206 return true; 207 return false; 208 } 209 210 #define pbuf_intersects(a, pa, size) \ 211 _pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size)) 212 213 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen, 214 paddr_t pa, size_t size) 215 { 216 size_t n; 217 218 for (n = 0; n < alen; n++) 219 if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size)) 220 return true; 221 return false; 222 } 223 224 #define pbuf_is_inside(a, pa, size) \ 225 _pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size)) 226 227 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len) 228 { 229 paddr_t end_pa = 0; 230 231 if (!map) 232 return false; 233 234 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 235 return false; 236 237 return (pa >= map->pa && end_pa <= map->pa + map->size - 1); 238 } 239 240 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va) 241 { 242 if (!map) 243 return false; 244 return (va >= map->va && va <= (map->va + map->size - 1)); 245 } 246 247 /* check if target buffer fits in a core default map area */ 248 static bool pbuf_inside_map_area(unsigned long p, size_t l, 249 struct tee_mmap_region *map) 250 { 251 return core_is_buffer_inside(p, l, map->pa, map->size); 252 } 253 254 TEE_Result core_mmu_for_each_map(void *ptr, 255 TEE_Result (*fn)(struct tee_mmap_region *map, 256 void *ptr)) 257 { 258 struct memory_map *mem_map = get_memory_map(); 259 TEE_Result res = TEE_SUCCESS; 260 size_t n = 0; 261 262 for (n = 0; n < mem_map->count; n++) { 263 res = fn(mem_map->map + n, ptr); 264 if (res) 265 return res; 266 } 267 268 return TEE_SUCCESS; 269 } 270 271 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type) 272 { 273 struct memory_map *mem_map = get_memory_map(); 274 size_t n = 0; 275 276 for (n = 0; n < mem_map->count; n++) { 277 if (mem_map->map[n].type == type) 278 return mem_map->map + n; 279 } 280 return NULL; 281 } 282 283 static struct tee_mmap_region * 284 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len) 285 { 286 struct memory_map *mem_map = get_memory_map(); 287 size_t n = 0; 288 289 for (n = 0; n < mem_map->count; n++) { 290 if (mem_map->map[n].type != type) 291 continue; 292 if (pa_is_in_map(mem_map->map + n, pa, len)) 293 return mem_map->map + n; 294 } 295 return NULL; 296 } 297 298 static struct tee_mmap_region *find_map_by_va(void *va) 299 { 300 struct memory_map *mem_map = get_memory_map(); 301 vaddr_t a = (vaddr_t)va; 302 size_t n = 0; 303 304 for (n = 0; n < mem_map->count; n++) { 305 if (a >= mem_map->map[n].va && 306 a <= (mem_map->map[n].va - 1 + mem_map->map[n].size)) 307 return mem_map->map + n; 308 } 309 310 return NULL; 311 } 312 313 static struct tee_mmap_region *find_map_by_pa(unsigned long pa) 314 { 315 struct memory_map *mem_map = get_memory_map(); 316 size_t n = 0; 317 318 for (n = 0; n < mem_map->count; n++) { 319 /* Skip unmapped regions */ 320 if ((mem_map->map[n].attr & TEE_MATTR_VALID_BLOCK) && 321 pa >= mem_map->map[n].pa && 322 pa <= (mem_map->map[n].pa - 1 + mem_map->map[n].size)) 323 return mem_map->map + n; 324 } 325 326 return NULL; 327 } 328 329 #if defined(CFG_SECURE_DATA_PATH) 330 static bool dtb_get_sdp_region(void) 331 { 332 void *fdt = NULL; 333 int node = 0; 334 int tmp_node = 0; 335 paddr_t tmp_addr = 0; 336 size_t tmp_size = 0; 337 338 if (!IS_ENABLED(CFG_EMBED_DTB)) 339 return false; 340 341 fdt = get_embedded_dt(); 342 if (!fdt) 343 panic("No DTB found"); 344 345 node = fdt_node_offset_by_compatible(fdt, -1, tz_sdp_match); 346 if (node < 0) { 347 DMSG("No %s compatible node found", tz_sdp_match); 348 return false; 349 } 350 tmp_node = node; 351 while (tmp_node >= 0) { 352 tmp_node = fdt_node_offset_by_compatible(fdt, tmp_node, 353 tz_sdp_match); 354 if (tmp_node >= 0) 355 DMSG("Ignore SDP pool node %s, supports only 1 node", 356 fdt_get_name(fdt, tmp_node, NULL)); 357 } 358 359 if (fdt_reg_info(fdt, node, &tmp_addr, &tmp_size)) { 360 EMSG("%s: Unable to get base addr or size from DT", 361 tz_sdp_match); 362 return false; 363 } 364 365 sec_sdp.paddr = tmp_addr; 366 sec_sdp.size = tmp_size; 367 368 return true; 369 } 370 #endif 371 372 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH) 373 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len, 374 const struct core_mmu_phys_mem *start, 375 const struct core_mmu_phys_mem *end) 376 { 377 const struct core_mmu_phys_mem *mem; 378 379 for (mem = start; mem < end; mem++) { 380 if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size)) 381 return true; 382 } 383 384 return false; 385 } 386 #endif 387 388 #ifdef CFG_CORE_DYN_SHM 389 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems, 390 paddr_t pa, size_t size) 391 { 392 struct core_mmu_phys_mem *m = *mem; 393 size_t n = 0; 394 395 while (n < *nelems) { 396 if (!core_is_buffer_intersect(pa, size, m[n].addr, m[n].size)) { 397 n++; 398 continue; 399 } 400 401 if (core_is_buffer_inside(m[n].addr, m[n].size, pa, size)) { 402 /* m[n] is completely covered by pa:size */ 403 rem_array_elem(m, *nelems, sizeof(*m), n); 404 (*nelems)--; 405 m = nex_realloc(m, sizeof(*m) * *nelems); 406 if (!m) 407 panic(); 408 *mem = m; 409 continue; 410 } 411 412 if (pa > m[n].addr && 413 pa + size - 1 < m[n].addr + m[n].size - 1) { 414 /* 415 * pa:size is strictly inside m[n] range so split 416 * m[n] entry. 417 */ 418 m = nex_realloc(m, sizeof(*m) * (*nelems + 1)); 419 if (!m) 420 panic(); 421 *mem = m; 422 (*nelems)++; 423 ins_array_elem(m, *nelems, sizeof(*m), n + 1, NULL); 424 m[n + 1].addr = pa + size; 425 m[n + 1].size = m[n].addr + m[n].size - pa - size; 426 m[n].size = pa - m[n].addr; 427 n++; 428 } else if (pa <= m[n].addr) { 429 /* 430 * pa:size is overlapping (possibly partially) at the 431 * beginning of m[n]. 432 */ 433 m[n].size = m[n].addr + m[n].size - pa - size; 434 m[n].addr = pa + size; 435 } else { 436 /* 437 * pa:size is overlapping (possibly partially) at 438 * the end of m[n]. 439 */ 440 m[n].size = pa - m[n].addr; 441 } 442 n++; 443 } 444 } 445 446 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start, 447 size_t nelems, 448 struct tee_mmap_region *map) 449 { 450 size_t n; 451 452 for (n = 0; n < nelems; n++) { 453 if (!core_is_buffer_outside(start[n].addr, start[n].size, 454 map->pa, map->size)) { 455 EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ 456 ") overlaps map (type %d %#" PRIxPA ":%#zx)", 457 start[n].addr, start[n].size, 458 map->type, map->pa, map->size); 459 panic(); 460 } 461 } 462 } 463 464 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss; 465 static size_t discovered_nsec_ddr_nelems __nex_bss; 466 467 static int cmp_pmem_by_addr(const void *a, const void *b) 468 { 469 const struct core_mmu_phys_mem *pmem_a = a; 470 const struct core_mmu_phys_mem *pmem_b = b; 471 472 return CMP_TRILEAN(pmem_a->addr, pmem_b->addr); 473 } 474 475 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start, 476 size_t nelems) 477 { 478 struct core_mmu_phys_mem *m = start; 479 size_t num_elems = nelems; 480 struct memory_map *mem_map = &static_memory_map; 481 const struct core_mmu_phys_mem __maybe_unused *pmem; 482 size_t n = 0; 483 484 assert(!discovered_nsec_ddr_start); 485 assert(m && num_elems); 486 487 qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr); 488 489 /* 490 * Non-secure shared memory and also secure data 491 * path memory are supposed to reside inside 492 * non-secure memory. Since NSEC_SHM and SDP_MEM 493 * are used for a specific purpose make holes for 494 * those memory in the normal non-secure memory. 495 * 496 * This has to be done since for instance QEMU 497 * isn't aware of which memory range in the 498 * non-secure memory is used for NSEC_SHM. 499 */ 500 501 #ifdef CFG_SECURE_DATA_PATH 502 if (dtb_get_sdp_region()) 503 carve_out_phys_mem(&m, &num_elems, sec_sdp.paddr, sec_sdp.size); 504 505 for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++) 506 carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size); 507 #endif 508 509 for (n = 0; n < ARRAY_SIZE(secure_only); n++) 510 carve_out_phys_mem(&m, &num_elems, secure_only[n].paddr, 511 secure_only[n].size); 512 513 for (n = 0; n < mem_map->count; n++) { 514 switch (mem_map->map[n].type) { 515 case MEM_AREA_NSEC_SHM: 516 carve_out_phys_mem(&m, &num_elems, mem_map->map[n].pa, 517 mem_map->map[n].size); 518 break; 519 case MEM_AREA_EXT_DT: 520 case MEM_AREA_MANIFEST_DT: 521 case MEM_AREA_RAM_NSEC: 522 case MEM_AREA_RES_VASPACE: 523 case MEM_AREA_SHM_VASPACE: 524 case MEM_AREA_TS_VASPACE: 525 case MEM_AREA_PAGER_VASPACE: 526 case MEM_AREA_NEX_DYN_VASPACE: 527 case MEM_AREA_TEE_DYN_VASPACE: 528 break; 529 default: 530 check_phys_mem_is_outside(m, num_elems, 531 mem_map->map + n); 532 } 533 } 534 535 discovered_nsec_ddr_start = m; 536 discovered_nsec_ddr_nelems = num_elems; 537 538 DMSG("Non-secure RAM:"); 539 for (n = 0; n < num_elems; n++) 540 DMSG("%zu: pa %#"PRIxPA"..%#"PRIxPA" sz %#"PRIxPASZ, 541 n, m[n].addr, m[n].addr + m[n].size - 1, m[n].size); 542 543 if (!core_mmu_check_end_pa(m[num_elems - 1].addr, 544 m[num_elems - 1].size)) 545 panic(); 546 } 547 548 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start, 549 const struct core_mmu_phys_mem **end) 550 { 551 if (!discovered_nsec_ddr_start) 552 return false; 553 554 *start = discovered_nsec_ddr_start; 555 *end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems; 556 557 return true; 558 } 559 560 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len) 561 { 562 const struct core_mmu_phys_mem *start; 563 const struct core_mmu_phys_mem *end; 564 565 if (!get_discovered_nsec_ddr(&start, &end)) 566 return false; 567 568 return pbuf_is_special_mem(pbuf, len, start, end); 569 } 570 571 bool core_mmu_nsec_ddr_is_defined(void) 572 { 573 const struct core_mmu_phys_mem *start; 574 const struct core_mmu_phys_mem *end; 575 576 if (!get_discovered_nsec_ddr(&start, &end)) 577 return false; 578 579 return start != end; 580 } 581 582 TEE_Result 583 core_mmu_for_each_nsec_ddr(void *ptr, 584 TEE_Result (*fn)(const struct core_mmu_phys_mem *m, 585 void *ptr)) 586 { 587 const struct core_mmu_phys_mem *start = NULL; 588 const struct core_mmu_phys_mem *end = NULL; 589 const struct core_mmu_phys_mem *mem = NULL; 590 TEE_Result res = TEE_ERROR_GENERIC; 591 592 if (!get_discovered_nsec_ddr(&start, &end)) 593 return TEE_ERROR_GENERIC; 594 595 for (mem = start; mem < end; mem++) { 596 res = fn(mem, ptr); 597 if (res) 598 return res; 599 } 600 601 return TEE_SUCCESS; 602 } 603 #else 604 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused) 605 { 606 return false; 607 } 608 #endif /*CFG_CORE_DYN_SHM*/ 609 610 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \ 611 EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \ 612 pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2)) 613 614 #ifdef CFG_SECURE_DATA_PATH 615 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len) 616 { 617 bool is_sdp_mem = false; 618 619 if (sec_sdp.size) 620 is_sdp_mem = core_is_buffer_inside(pbuf, len, sec_sdp.paddr, 621 sec_sdp.size); 622 623 if (!is_sdp_mem) 624 is_sdp_mem = pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin, 625 phys_sdp_mem_end); 626 627 if (!is_sdp_mem) { 628 struct mobj *m = mobj_protmem_get_by_pa(pbuf, len); 629 630 if (!m) 631 m = mobj_ffa_protmem_get_by_pa(pbuf, len); 632 if (m) { 633 mobj_put(m); 634 is_sdp_mem = true; 635 } 636 } 637 638 return is_sdp_mem; 639 } 640 641 static struct mobj *core_sdp_mem_alloc_mobj(paddr_t pa, size_t size) 642 { 643 struct mobj *mobj = mobj_phys_alloc(pa, size, TEE_MATTR_MEM_TYPE_CACHED, 644 CORE_MEM_SDP_MEM); 645 646 if (!mobj) 647 panic("can't create SDP physical memory object"); 648 649 return mobj; 650 } 651 652 struct mobj **core_sdp_mem_create_mobjs(void) 653 { 654 const struct core_mmu_phys_mem *mem = NULL; 655 struct mobj **mobj_base = NULL; 656 struct mobj **mobj = NULL; 657 int cnt = phys_sdp_mem_end - phys_sdp_mem_begin; 658 659 if (sec_sdp.size) 660 cnt++; 661 662 /* SDP mobjs table must end with a NULL entry */ 663 mobj_base = calloc(cnt + 1, sizeof(struct mobj *)); 664 if (!mobj_base) 665 panic("Out of memory"); 666 667 mobj = mobj_base; 668 669 for (mem = phys_sdp_mem_begin; mem < phys_sdp_mem_end; mem++, mobj++) 670 *mobj = core_sdp_mem_alloc_mobj(mem->addr, mem->size); 671 672 if (sec_sdp.size) 673 *mobj = core_sdp_mem_alloc_mobj(sec_sdp.paddr, sec_sdp.size); 674 675 return mobj_base; 676 } 677 678 #else /* CFG_SECURE_DATA_PATH */ 679 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused) 680 { 681 return false; 682 } 683 684 #endif /* CFG_SECURE_DATA_PATH */ 685 686 /* Check special memories comply with registered memories */ 687 static void verify_special_mem_areas(struct memory_map *mem_map, 688 const struct core_mmu_phys_mem *start, 689 const struct core_mmu_phys_mem *end, 690 const char *area_name __maybe_unused) 691 { 692 const struct core_mmu_phys_mem *mem = NULL; 693 const struct core_mmu_phys_mem *mem2 = NULL; 694 size_t n = 0; 695 696 if (start == end) { 697 DMSG("No %s memory area defined", area_name); 698 return; 699 } 700 701 for (mem = start; mem < end; mem++) 702 DMSG("%s memory [%" PRIxPA " %" PRIx64 "]", 703 area_name, mem->addr, (uint64_t)mem->addr + mem->size); 704 705 /* Check memories do not intersect each other */ 706 for (mem = start; mem + 1 < end; mem++) { 707 for (mem2 = mem + 1; mem2 < end; mem2++) { 708 if (core_is_buffer_intersect(mem2->addr, mem2->size, 709 mem->addr, mem->size)) { 710 MSG_MEM_INSTERSECT(mem2->addr, mem2->size, 711 mem->addr, mem->size); 712 panic("Special memory intersection"); 713 } 714 } 715 } 716 717 /* 718 * Check memories do not intersect any mapped memory. 719 * This is called before reserved VA space is loaded in mem_map. 720 */ 721 for (mem = start; mem < end; mem++) { 722 for (n = 0; n < mem_map->count; n++) { 723 #ifdef TEE_SDP_TEST_MEM_BASE 724 /* 725 * Ignore MEM_AREA_SEC_RAM_OVERALL since it covers 726 * TEE_SDP_TEST_MEM too. 727 */ 728 if (mem->addr == TEE_SDP_TEST_MEM_BASE && 729 mem->size == TEE_SDP_TEST_MEM_SIZE && 730 mem_map->map[n].type == MEM_AREA_SEC_RAM_OVERALL) 731 continue; 732 #endif 733 if (core_is_buffer_intersect(mem->addr, mem->size, 734 mem_map->map[n].pa, 735 mem_map->map[n].size)) { 736 MSG_MEM_INSTERSECT(mem->addr, mem->size, 737 mem_map->map[n].pa, 738 mem_map->map[n].size); 739 panic("Special memory intersection"); 740 } 741 } 742 } 743 } 744 745 static void merge_mmaps(struct tee_mmap_region *dst, 746 const struct tee_mmap_region *src) 747 { 748 paddr_t end_pa = MAX(dst->pa + dst->size - 1, src->pa + src->size - 1); 749 paddr_t pa = MIN(dst->pa, src->pa); 750 751 DMSG("Merging %#"PRIxPA"..%#"PRIxPA" and %#"PRIxPA"..%#"PRIxPA, 752 dst->pa, dst->pa + dst->size - 1, src->pa, 753 src->pa + src->size - 1); 754 dst->pa = pa; 755 dst->size = end_pa - pa + 1; 756 } 757 758 static bool mmaps_are_mergeable(const struct tee_mmap_region *r1, 759 const struct tee_mmap_region *r2) 760 { 761 if (r1->type != r2->type) 762 return false; 763 764 if (r1->pa == r2->pa) 765 return true; 766 767 if (r1->pa < r2->pa) 768 return r1->pa + r1->size >= r2->pa; 769 else 770 return r2->pa + r2->size >= r1->pa; 771 } 772 773 static void add_phys_mem(struct memory_map *mem_map, 774 const char *mem_name __maybe_unused, 775 enum teecore_memtypes mem_type, 776 paddr_t mem_addr, paddr_size_t mem_size) 777 { 778 size_t n = 0; 779 const struct tee_mmap_region m0 = { 780 .type = mem_type, 781 .pa = mem_addr, 782 .size = mem_size, 783 }; 784 785 if (!mem_size) /* Discard null size entries */ 786 return; 787 788 /* 789 * If some ranges of memory of the same type do overlap 790 * each others they are coalesced into one entry. To help this 791 * added entries are sorted by increasing physical. 792 * 793 * Note that it's valid to have the same physical memory as several 794 * different memory types, for instance the same device memory 795 * mapped as both secure and non-secure. This will probably not 796 * happen often in practice. 797 */ 798 DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ, 799 mem_name, teecore_memtype_name(mem_type), mem_addr, mem_size); 800 for (n = 0; n < mem_map->count; n++) { 801 if (mmaps_are_mergeable(mem_map->map + n, &m0)) { 802 merge_mmaps(mem_map->map + n, &m0); 803 /* 804 * The merged result might be mergeable with the 805 * next or previous entry. 806 */ 807 if (n + 1 < mem_map->count && 808 mmaps_are_mergeable(mem_map->map + n, 809 mem_map->map + n + 1)) { 810 merge_mmaps(mem_map->map + n, 811 mem_map->map + n + 1); 812 rem_array_elem(mem_map->map, mem_map->count, 813 sizeof(*mem_map->map), n + 1); 814 mem_map->count--; 815 } 816 if (n > 0 && mmaps_are_mergeable(mem_map->map + n - 1, 817 mem_map->map + n)) { 818 merge_mmaps(mem_map->map + n - 1, 819 mem_map->map + n); 820 rem_array_elem(mem_map->map, mem_map->count, 821 sizeof(*mem_map->map), n); 822 mem_map->count--; 823 } 824 return; 825 } 826 if (mem_type < mem_map->map[n].type || 827 (mem_type == mem_map->map[n].type && 828 mem_addr < mem_map->map[n].pa)) 829 break; /* found the spot where to insert this memory */ 830 } 831 832 grow_mem_map(mem_map); 833 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), 834 n, &m0); 835 } 836 837 static void add_va_space(struct memory_map *mem_map, 838 enum teecore_memtypes type, size_t size) 839 { 840 size_t n = 0; 841 842 DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size); 843 for (n = 0; n < mem_map->count; n++) { 844 if (type < mem_map->map[n].type) 845 break; 846 } 847 848 grow_mem_map(mem_map); 849 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), 850 n, NULL); 851 mem_map->map[n] = (struct tee_mmap_region){ 852 .type = type, 853 .size = size, 854 }; 855 } 856 857 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t) 858 { 859 const uint32_t attr = TEE_MATTR_VALID_BLOCK; 860 const uint32_t tagged = TEE_MATTR_MEM_TYPE_TAGGED << 861 TEE_MATTR_MEM_TYPE_SHIFT; 862 const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED << 863 TEE_MATTR_MEM_TYPE_SHIFT; 864 const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV << 865 TEE_MATTR_MEM_TYPE_SHIFT; 866 867 switch (t) { 868 case MEM_AREA_TEE_RAM: 869 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | tagged; 870 case MEM_AREA_TEE_RAM_RX: 871 case MEM_AREA_INIT_RAM_RX: 872 case MEM_AREA_IDENTITY_MAP_RX: 873 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | tagged; 874 case MEM_AREA_TEE_RAM_RO: 875 case MEM_AREA_INIT_RAM_RO: 876 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | tagged; 877 case MEM_AREA_TEE_RAM_RW: 878 case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */ 879 case MEM_AREA_NEX_RAM_RW: 880 case MEM_AREA_NEX_DYN_VASPACE: 881 case MEM_AREA_TEE_DYN_VASPACE: 882 case MEM_AREA_TEE_ASAN: 883 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; 884 case MEM_AREA_TEE_COHERENT: 885 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache; 886 case MEM_AREA_NSEC_SHM: 887 case MEM_AREA_NEX_NSEC_SHM: 888 return attr | TEE_MATTR_PRW | cached; 889 case MEM_AREA_MANIFEST_DT: 890 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached; 891 case MEM_AREA_TRANSFER_LIST: 892 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; 893 case MEM_AREA_EXT_DT: 894 /* 895 * If CFG_MAP_EXT_DT_SECURE is enabled map the external device 896 * tree as secure non-cached memory, otherwise, fall back to 897 * non-secure mapping. 898 */ 899 if (IS_ENABLED(CFG_MAP_EXT_DT_SECURE)) 900 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | 901 noncache; 902 fallthrough; 903 case MEM_AREA_IO_NSEC: 904 return attr | TEE_MATTR_PRW | noncache; 905 case MEM_AREA_IO_SEC: 906 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache; 907 case MEM_AREA_RAM_NSEC: 908 return attr | TEE_MATTR_PRW | cached; 909 case MEM_AREA_RAM_SEC: 910 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; 911 case MEM_AREA_SEC_RAM_OVERALL: 912 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; 913 case MEM_AREA_ROM_SEC: 914 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached; 915 case MEM_AREA_RES_VASPACE: 916 case MEM_AREA_SHM_VASPACE: 917 return 0; 918 case MEM_AREA_PAGER_VASPACE: 919 return TEE_MATTR_SECURE; 920 default: 921 panic("invalid type"); 922 } 923 } 924 925 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm) 926 { 927 switch (mm->type) { 928 case MEM_AREA_TEE_RAM: 929 case MEM_AREA_TEE_RAM_RX: 930 case MEM_AREA_TEE_RAM_RO: 931 case MEM_AREA_TEE_RAM_RW: 932 case MEM_AREA_INIT_RAM_RX: 933 case MEM_AREA_INIT_RAM_RO: 934 case MEM_AREA_NEX_RAM_RW: 935 case MEM_AREA_NEX_RAM_RO: 936 case MEM_AREA_TEE_ASAN: 937 return true; 938 default: 939 return false; 940 } 941 } 942 943 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm) 944 { 945 return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE); 946 } 947 948 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm) 949 { 950 return mm->region_size == CORE_MMU_PGDIR_SIZE; 951 } 952 953 static int cmp_mmap_by_lower_va(const void *a, const void *b) 954 { 955 const struct tee_mmap_region *mm_a = a; 956 const struct tee_mmap_region *mm_b = b; 957 958 return CMP_TRILEAN(mm_a->va, mm_b->va); 959 } 960 961 static void dump_mmap_table(struct memory_map *mem_map) 962 { 963 size_t n = 0; 964 965 for (n = 0; n < mem_map->count; n++) { 966 struct tee_mmap_region *map __maybe_unused = mem_map->map + n; 967 968 DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA 969 " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)", 970 teecore_memtype_name(map->type), map->va, 971 map->va + map->size - 1, map->pa, 972 (paddr_t)(map->pa + map->size - 1), map->size, 973 map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir"); 974 } 975 } 976 977 #if DEBUG_XLAT_TABLE 978 979 static void dump_xlat_table(vaddr_t va, unsigned int level) 980 { 981 struct core_mmu_table_info tbl_info; 982 unsigned int idx = 0; 983 paddr_t pa; 984 uint32_t attr; 985 986 core_mmu_find_table(NULL, va, level, &tbl_info); 987 va = tbl_info.va_base; 988 for (idx = 0; idx < tbl_info.num_entries; idx++) { 989 core_mmu_get_entry(&tbl_info, idx, &pa, &attr); 990 if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) { 991 const char *security_bit = ""; 992 993 if (core_mmu_entry_have_security_bit(attr)) { 994 if (attr & TEE_MATTR_SECURE) 995 security_bit = "S"; 996 else 997 security_bit = "NS"; 998 } 999 1000 if (attr & TEE_MATTR_TABLE) { 1001 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 1002 " TBL:0x%010" PRIxPA " %s", 1003 level * 2, "", level, va, pa, 1004 security_bit); 1005 dump_xlat_table(va, level + 1); 1006 } else if (attr) { 1007 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 1008 " PA:0x%010" PRIxPA " %s-%s-%s-%s", 1009 level * 2, "", level, va, pa, 1010 mattr_is_cached(attr) ? "MEM" : 1011 "DEV", 1012 attr & TEE_MATTR_PW ? "RW" : "RO", 1013 attr & TEE_MATTR_PX ? "X " : "XN", 1014 security_bit); 1015 } else { 1016 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 1017 " INVALID\n", 1018 level * 2, "", level, va); 1019 } 1020 } 1021 va += BIT64(tbl_info.shift); 1022 } 1023 } 1024 1025 #else 1026 1027 static void dump_xlat_table(vaddr_t va __unused, int level __unused) 1028 { 1029 } 1030 1031 #endif 1032 1033 /* 1034 * Reserves virtual memory space for pager usage. 1035 * 1036 * From the start of the first memory used by the link script + 1037 * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty 1038 * mapping for pager usage. This adds translation tables as needed for the 1039 * pager to operate. 1040 */ 1041 static void add_pager_vaspace(struct memory_map *mem_map) 1042 { 1043 paddr_t begin = 0; 1044 paddr_t end = 0; 1045 size_t size = 0; 1046 size_t pos = 0; 1047 size_t n = 0; 1048 1049 1050 for (n = 0; n < mem_map->count; n++) { 1051 if (map_is_tee_ram(mem_map->map + n)) { 1052 if (!begin) 1053 begin = mem_map->map[n].pa; 1054 pos = n + 1; 1055 } 1056 } 1057 1058 end = mem_map->map[pos - 1].pa + mem_map->map[pos - 1].size; 1059 assert(end - begin < TEE_RAM_VA_SIZE); 1060 size = TEE_RAM_VA_SIZE - (end - begin); 1061 1062 grow_mem_map(mem_map); 1063 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), 1064 n, NULL); 1065 mem_map->map[n] = (struct tee_mmap_region){ 1066 .type = MEM_AREA_PAGER_VASPACE, 1067 .size = size, 1068 .region_size = SMALL_PAGE_SIZE, 1069 .attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE), 1070 }; 1071 } 1072 1073 static void check_sec_nsec_mem_config(void) 1074 { 1075 size_t n = 0; 1076 1077 for (n = 0; n < ARRAY_SIZE(secure_only); n++) { 1078 if (pbuf_intersects(nsec_shared, secure_only[n].paddr, 1079 secure_only[n].size)) 1080 panic("Invalid memory access config: sec/nsec"); 1081 } 1082 } 1083 1084 static void collect_device_mem_ranges(struct memory_map *mem_map) 1085 { 1086 const char *compatible = "arm,ffa-manifest-device-regions"; 1087 void *fdt = get_manifest_dt(); 1088 const char *name = NULL; 1089 uint64_t page_count = 0; 1090 uint64_t base = 0; 1091 int subnode = 0; 1092 int node = 0; 1093 1094 assert(fdt); 1095 1096 node = fdt_node_offset_by_compatible(fdt, 0, compatible); 1097 if (node < 0) 1098 return; 1099 1100 fdt_for_each_subnode(subnode, fdt, node) { 1101 name = fdt_get_name(fdt, subnode, NULL); 1102 if (!name) 1103 continue; 1104 1105 if (dt_getprop_as_number(fdt, subnode, "base-address", 1106 &base)) { 1107 EMSG("Mandatory field is missing: base-address"); 1108 continue; 1109 } 1110 1111 if (base & SMALL_PAGE_MASK) { 1112 EMSG("base-address is not page aligned"); 1113 continue; 1114 } 1115 1116 if (dt_getprop_as_number(fdt, subnode, "pages-count", 1117 &page_count)) { 1118 EMSG("Mandatory field is missing: pages-count"); 1119 continue; 1120 } 1121 1122 add_phys_mem(mem_map, name, MEM_AREA_IO_SEC, 1123 base, page_count * SMALL_PAGE_SIZE); 1124 } 1125 } 1126 1127 static void collect_mem_ranges(struct memory_map *mem_map) 1128 { 1129 const struct core_mmu_phys_mem *mem = NULL; 1130 vaddr_t ram_start = secure_only[0].paddr; 1131 size_t n = 0; 1132 1133 #define ADD_PHYS_MEM(_type, _addr, _size) \ 1134 add_phys_mem(mem_map, #_addr, (_type), (_addr), (_size)) 1135 1136 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) { 1137 paddr_t next_pa = 0; 1138 1139 /* 1140 * Read-only and read-execute physical memory areas must 1141 * not be mapped by MEM_AREA_SEC_RAM_OVERALL, but all the 1142 * read/write should. 1143 */ 1144 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, ram_start, 1145 VCORE_UNPG_RX_PA - ram_start); 1146 assert(VCORE_UNPG_RX_PA >= ram_start); 1147 tee_ram_initial_offs = VCORE_UNPG_RX_PA - ram_start; 1148 DMSG("tee_ram_initial_offs %#zx", tee_ram_initial_offs); 1149 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA, 1150 VCORE_UNPG_RX_SZ); 1151 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA, 1152 VCORE_UNPG_RO_SZ); 1153 1154 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1155 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA, 1156 VCORE_UNPG_RW_SZ); 1157 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_UNPG_RW_PA, 1158 VCORE_UNPG_RW_SZ); 1159 1160 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA, 1161 VCORE_NEX_RW_SZ); 1162 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_NEX_RW_PA, 1163 VCORE_NEX_RW_SZ); 1164 1165 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_FREE_PA, 1166 VCORE_FREE_SZ); 1167 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_FREE_PA, 1168 VCORE_FREE_SZ); 1169 next_pa = VCORE_FREE_PA + VCORE_FREE_SZ; 1170 } else { 1171 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA, 1172 VCORE_UNPG_RW_SZ); 1173 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_UNPG_RW_PA, 1174 VCORE_UNPG_RW_SZ); 1175 1176 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_FREE_PA, 1177 VCORE_FREE_SZ); 1178 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_FREE_PA, 1179 VCORE_FREE_SZ); 1180 next_pa = VCORE_FREE_PA + VCORE_FREE_SZ; 1181 } 1182 1183 if (IS_ENABLED(CFG_WITH_PAGER)) { 1184 paddr_t pa = 0; 1185 size_t sz = 0; 1186 1187 ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA, 1188 VCORE_INIT_RX_SZ); 1189 ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA, 1190 VCORE_INIT_RO_SZ); 1191 /* 1192 * Core init mapping shall cover up to end of the 1193 * physical RAM. This is required since the hash 1194 * table is appended to the binary data after the 1195 * firmware build sequence. 1196 */ 1197 pa = VCORE_INIT_RO_PA + VCORE_INIT_RO_SZ; 1198 sz = TEE_RAM_START + TEE_RAM_PH_SIZE - pa; 1199 ADD_PHYS_MEM(MEM_AREA_TEE_RAM, pa, sz); 1200 } else { 1201 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, next_pa, 1202 secure_only[0].paddr + 1203 secure_only[0].size - next_pa); 1204 } 1205 } else { 1206 ADD_PHYS_MEM(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE); 1207 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, secure_only[n].paddr, 1208 secure_only[0].size); 1209 } 1210 1211 for (n = 1; n < ARRAY_SIZE(secure_only); n++) 1212 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, secure_only[n].paddr, 1213 secure_only[n].size); 1214 1215 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) 1216 ADD_PHYS_MEM(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ); 1217 1218 #undef ADD_PHYS_MEM 1219 1220 /* Collect device memory info from SP manifest */ 1221 if (IS_ENABLED(CFG_CORE_SEL2_SPMC)) 1222 collect_device_mem_ranges(mem_map); 1223 1224 for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) { 1225 /* Only unmapped virtual range may have a null phys addr */ 1226 assert(mem->addr || !core_mmu_type_to_attr(mem->type)); 1227 1228 add_phys_mem(mem_map, mem->name, mem->type, 1229 mem->addr, mem->size); 1230 } 1231 1232 if (IS_ENABLED(CFG_SECURE_DATA_PATH)) 1233 verify_special_mem_areas(mem_map, phys_sdp_mem_begin, 1234 phys_sdp_mem_end, "SDP"); 1235 1236 add_va_space(mem_map, MEM_AREA_RES_VASPACE, CFG_RESERVED_VASPACE_SIZE); 1237 add_va_space(mem_map, MEM_AREA_SHM_VASPACE, SHM_VASPACE_SIZE); 1238 if (IS_ENABLED(CFG_DYN_CONFIG)) { 1239 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 1240 add_va_space(mem_map, MEM_AREA_NEX_DYN_VASPACE, 1241 ROUNDUP(CFG_NEX_DYN_VASPACE_SIZE, 1242 CORE_MMU_PGDIR_SIZE)); 1243 add_va_space(mem_map, MEM_AREA_TEE_DYN_VASPACE, 1244 CFG_TEE_DYN_VASPACE_SIZE); 1245 } 1246 } 1247 1248 static void assign_mem_granularity(struct memory_map *mem_map) 1249 { 1250 size_t n = 0; 1251 1252 /* 1253 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses 1254 * SMALL_PAGE_SIZE. 1255 */ 1256 for (n = 0; n < mem_map->count; n++) { 1257 paddr_t mask = mem_map->map[n].pa | mem_map->map[n].size; 1258 1259 if (mask & SMALL_PAGE_MASK) 1260 panic("Impossible memory alignment"); 1261 1262 if (map_is_tee_ram(mem_map->map + n)) 1263 mem_map->map[n].region_size = SMALL_PAGE_SIZE; 1264 else 1265 mem_map->map[n].region_size = CORE_MMU_PGDIR_SIZE; 1266 } 1267 } 1268 1269 static bool place_tee_ram_at_top(paddr_t paddr) 1270 { 1271 return paddr > BIT64(core_mmu_get_va_width()) / 2; 1272 } 1273 1274 /* 1275 * MMU arch driver shall override this function if it helps 1276 * optimizing the memory footprint of the address translation tables. 1277 */ 1278 bool __weak core_mmu_prefer_tee_ram_at_top(paddr_t paddr) 1279 { 1280 return place_tee_ram_at_top(paddr); 1281 } 1282 1283 static bool assign_mem_va_dir(vaddr_t tee_ram_va, struct memory_map *mem_map, 1284 bool tee_ram_at_top) 1285 { 1286 struct tee_mmap_region *map = NULL; 1287 bool va_is_nex_shared = false; 1288 bool va_is_secure = true; 1289 vaddr_t va = 0; 1290 size_t n = 0; 1291 1292 /* 1293 * tee_ram_va might equals 0 when CFG_CORE_ASLR=y. 1294 * 0 is by design an invalid va, so return false directly. 1295 */ 1296 if (!tee_ram_va) 1297 return false; 1298 1299 /* Clear eventual previous assignments */ 1300 for (n = 0; n < mem_map->count; n++) 1301 mem_map->map[n].va = 0; 1302 1303 /* 1304 * TEE RAM regions are always aligned with region_size. 1305 * 1306 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here 1307 * since it handles virtual memory which covers the part of the ELF 1308 * that cannot fit directly into memory. 1309 */ 1310 va = tee_ram_va + tee_ram_initial_offs; 1311 for (n = 0; n < mem_map->count; n++) { 1312 map = mem_map->map + n; 1313 if (map_is_tee_ram(map) || 1314 map->type == MEM_AREA_PAGER_VASPACE) { 1315 assert(!(va & (map->region_size - 1))); 1316 assert(!(map->size & (map->region_size - 1))); 1317 map->va = va; 1318 if (ADD_OVERFLOW(va, map->size, &va)) 1319 return false; 1320 if (!core_mmu_va_is_valid(va)) 1321 return false; 1322 } 1323 } 1324 1325 if (tee_ram_at_top) { 1326 /* 1327 * Map non-tee ram regions at addresses lower than the tee 1328 * ram region. 1329 */ 1330 va = tee_ram_va; 1331 for (n = 0; n < mem_map->count; n++) { 1332 map = mem_map->map + n; 1333 map->attr = core_mmu_type_to_attr(map->type); 1334 if (map->va) 1335 continue; 1336 1337 if (!IS_ENABLED(CFG_WITH_LPAE) && 1338 va_is_secure != map_is_secure(map)) { 1339 va_is_secure = !va_is_secure; 1340 va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE); 1341 } else if (va_is_nex_shared != 1342 core_mmu_type_is_nex_shared(map->type)) { 1343 va_is_nex_shared = !va_is_nex_shared; 1344 va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE); 1345 } 1346 1347 if (SUB_OVERFLOW(va, map->size, &va)) 1348 return false; 1349 va = ROUNDDOWN2(va, map->region_size); 1350 /* 1351 * Make sure that va is aligned with pa for 1352 * efficient pgdir mapping. Basically pa & 1353 * pgdir_mask should be == va & pgdir_mask 1354 */ 1355 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 1356 if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va)) 1357 return false; 1358 va += (map->pa - va) & CORE_MMU_PGDIR_MASK; 1359 } 1360 map->va = va; 1361 } 1362 } else { 1363 /* 1364 * Map non-tee ram regions at addresses higher than the tee 1365 * ram region. 1366 */ 1367 for (n = 0; n < mem_map->count; n++) { 1368 map = mem_map->map + n; 1369 map->attr = core_mmu_type_to_attr(map->type); 1370 if (map->va) 1371 continue; 1372 1373 if (!IS_ENABLED(CFG_WITH_LPAE) && 1374 va_is_secure != map_is_secure(map)) { 1375 va_is_secure = !va_is_secure; 1376 if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, 1377 &va)) 1378 return false; 1379 } else if (va_is_nex_shared != 1380 core_mmu_type_is_nex_shared(map->type)) { 1381 va_is_nex_shared = !va_is_nex_shared; 1382 if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, 1383 &va)) 1384 return false; 1385 } 1386 1387 if (ROUNDUP2_OVERFLOW(va, map->region_size, &va)) 1388 return false; 1389 /* 1390 * Make sure that va is aligned with pa for 1391 * efficient pgdir mapping. Basically pa & 1392 * pgdir_mask should be == va & pgdir_mask 1393 */ 1394 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 1395 vaddr_t offs = (map->pa - va) & 1396 CORE_MMU_PGDIR_MASK; 1397 1398 if (ADD_OVERFLOW(va, offs, &va)) 1399 return false; 1400 } 1401 1402 map->va = va; 1403 if (ADD_OVERFLOW(va, map->size, &va)) 1404 return false; 1405 if (!core_mmu_va_is_valid(va)) 1406 return false; 1407 } 1408 } 1409 1410 return true; 1411 } 1412 1413 static bool assign_mem_va(vaddr_t tee_ram_va, struct memory_map *mem_map) 1414 { 1415 bool tee_ram_at_top = place_tee_ram_at_top(tee_ram_va); 1416 1417 /* 1418 * Check that we're not overlapping with the user VA range. 1419 */ 1420 if (IS_ENABLED(CFG_WITH_LPAE)) { 1421 /* 1422 * User VA range is supposed to be defined after these 1423 * mappings have been established. 1424 */ 1425 assert(!core_mmu_user_va_range_is_defined()); 1426 } else { 1427 vaddr_t user_va_base = 0; 1428 size_t user_va_size = 0; 1429 1430 assert(core_mmu_user_va_range_is_defined()); 1431 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 1432 if (tee_ram_va < (user_va_base + user_va_size)) 1433 return false; 1434 } 1435 1436 if (IS_ENABLED(CFG_WITH_PAGER)) { 1437 bool prefered_dir = core_mmu_prefer_tee_ram_at_top(tee_ram_va); 1438 1439 /* Try whole mapping covered by a single base xlat entry */ 1440 if (prefered_dir != tee_ram_at_top && 1441 assign_mem_va_dir(tee_ram_va, mem_map, prefered_dir)) 1442 return true; 1443 } 1444 1445 return assign_mem_va_dir(tee_ram_va, mem_map, tee_ram_at_top); 1446 } 1447 1448 static int cmp_init_mem_map(const void *a, const void *b) 1449 { 1450 const struct tee_mmap_region *mm_a = a; 1451 const struct tee_mmap_region *mm_b = b; 1452 int rc = 0; 1453 1454 rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size); 1455 if (!rc) 1456 rc = CMP_TRILEAN(mm_a->pa, mm_b->pa); 1457 /* 1458 * 32bit MMU descriptors cannot mix secure and non-secure mapping in 1459 * the same level2 table. Hence sort secure mapping from non-secure 1460 * mapping. 1461 */ 1462 if (!rc && !IS_ENABLED(CFG_WITH_LPAE)) 1463 rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b)); 1464 1465 /* 1466 * Nexus mappings shared between partitions should not be mixed 1467 * with other mappings in the same translation table. Hence sort 1468 * nexus shared mappings from other mappings. 1469 */ 1470 if (!rc) 1471 rc = CMP_TRILEAN(core_mmu_type_is_nex_shared(mm_a->type), 1472 core_mmu_type_is_nex_shared(mm_b->type)); 1473 1474 return rc; 1475 } 1476 1477 static bool mem_map_add_id_map(struct memory_map *mem_map, 1478 vaddr_t id_map_start, vaddr_t id_map_end) 1479 { 1480 vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE); 1481 vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE); 1482 size_t len = end - start; 1483 size_t n = 0; 1484 1485 1486 for (n = 0; n < mem_map->count; n++) 1487 if (core_is_buffer_intersect(mem_map->map[n].va, 1488 mem_map->map[n].size, start, len)) 1489 return false; 1490 1491 grow_mem_map(mem_map); 1492 mem_map->map[mem_map->count - 1] = (struct tee_mmap_region){ 1493 .type = MEM_AREA_IDENTITY_MAP_RX, 1494 /* 1495 * Could use CORE_MMU_PGDIR_SIZE to potentially save a 1496 * translation table, at the increased risk of clashes with 1497 * the rest of the memory map. 1498 */ 1499 .region_size = SMALL_PAGE_SIZE, 1500 .pa = start, 1501 .va = start, 1502 .size = len, 1503 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1504 }; 1505 1506 return true; 1507 } 1508 1509 static struct memory_map *init_mem_map(struct memory_map *mem_map, 1510 unsigned long seed, 1511 unsigned long *ret_offs) 1512 { 1513 /* 1514 * @id_map_start and @id_map_end describes a physical memory range 1515 * that must be mapped Read-Only eXecutable at identical virtual 1516 * addresses. 1517 */ 1518 vaddr_t id_map_start = (vaddr_t)__identity_map_init_start; 1519 vaddr_t id_map_end = (vaddr_t)__identity_map_init_end; 1520 vaddr_t start_addr = secure_only[0].paddr; 1521 unsigned long offs = 0; 1522 1523 collect_mem_ranges(mem_map); 1524 assign_mem_granularity(mem_map); 1525 1526 /* 1527 * To ease mapping and lower use of xlat tables, sort mapping 1528 * description moving small-page regions after the pgdir regions. 1529 */ 1530 qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region), 1531 cmp_init_mem_map); 1532 1533 if (IS_ENABLED(CFG_WITH_PAGER)) 1534 add_pager_vaspace(mem_map); 1535 1536 if (IS_ENABLED(CFG_CORE_ASLR) && seed) { 1537 vaddr_t ba = 0; 1538 size_t n = 0; 1539 1540 for (n = 0; n < 3; n++) { 1541 ba = arch_aslr_base_addr(start_addr, seed, n); 1542 if (assign_mem_va(ba, mem_map) && 1543 mem_map_add_id_map(mem_map, id_map_start, 1544 id_map_end)) { 1545 offs = ba - start_addr; 1546 DMSG("Mapping core at %#"PRIxVA" offs %#lx", 1547 ba, offs); 1548 goto out; 1549 } else { 1550 DMSG("Failed to map core at %#"PRIxVA, ba); 1551 } 1552 } 1553 EMSG("Failed to map core with seed %#lx", seed); 1554 } 1555 1556 if (!assign_mem_va(start_addr, mem_map)) 1557 panic(); 1558 1559 out: 1560 qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region), 1561 cmp_mmap_by_lower_va); 1562 1563 dump_mmap_table(mem_map); 1564 1565 *ret_offs = offs; 1566 return mem_map; 1567 } 1568 1569 static void check_mem_map(struct memory_map *mem_map) 1570 { 1571 struct tee_mmap_region *m = NULL; 1572 size_t n = 0; 1573 1574 for (n = 0; n < mem_map->count; n++) { 1575 m = mem_map->map + n; 1576 switch (m->type) { 1577 case MEM_AREA_TEE_RAM: 1578 case MEM_AREA_TEE_RAM_RX: 1579 case MEM_AREA_TEE_RAM_RO: 1580 case MEM_AREA_TEE_RAM_RW: 1581 case MEM_AREA_INIT_RAM_RX: 1582 case MEM_AREA_INIT_RAM_RO: 1583 case MEM_AREA_NEX_RAM_RW: 1584 case MEM_AREA_NEX_RAM_RO: 1585 case MEM_AREA_IDENTITY_MAP_RX: 1586 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1587 panic("TEE_RAM can't fit in secure_only"); 1588 break; 1589 case MEM_AREA_SEC_RAM_OVERALL: 1590 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1591 panic("SEC_RAM_OVERALL can't fit in secure_only"); 1592 break; 1593 case MEM_AREA_NSEC_SHM: 1594 if (!pbuf_is_inside(nsec_shared, m->pa, m->size)) 1595 panic("NS_SHM can't fit in nsec_shared"); 1596 break; 1597 case MEM_AREA_TEE_COHERENT: 1598 case MEM_AREA_TEE_ASAN: 1599 case MEM_AREA_IO_SEC: 1600 case MEM_AREA_IO_NSEC: 1601 case MEM_AREA_EXT_DT: 1602 case MEM_AREA_MANIFEST_DT: 1603 case MEM_AREA_TRANSFER_LIST: 1604 case MEM_AREA_RAM_SEC: 1605 case MEM_AREA_RAM_NSEC: 1606 case MEM_AREA_ROM_SEC: 1607 case MEM_AREA_RES_VASPACE: 1608 case MEM_AREA_SHM_VASPACE: 1609 case MEM_AREA_PAGER_VASPACE: 1610 case MEM_AREA_NEX_DYN_VASPACE: 1611 case MEM_AREA_TEE_DYN_VASPACE: 1612 break; 1613 default: 1614 EMSG("Uhandled memtype %d", m->type); 1615 panic(); 1616 } 1617 } 1618 } 1619 1620 /* 1621 * core_init_mmu_map() - init tee core default memory mapping 1622 * 1623 * This routine sets the static default TEE core mapping. If @seed is > 0 1624 * and configured with CFG_CORE_ASLR it will map tee core at a location 1625 * based on the seed and return the offset from the link address. 1626 * 1627 * If an error happened: core_init_mmu_map is expected to panic. 1628 * 1629 * Note: this function is weak just to make it possible to exclude it from 1630 * the unpaged area. 1631 */ 1632 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg) 1633 { 1634 #ifndef CFG_NS_VIRTUALIZATION 1635 vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE); 1636 #else 1637 vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start, 1638 SMALL_PAGE_SIZE); 1639 #endif 1640 #ifdef CFG_DYN_CONFIG 1641 vaddr_t len = ROUNDUP(VCORE_FREE_END_PA, SMALL_PAGE_SIZE) - start; 1642 #else 1643 vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start; 1644 #endif 1645 struct tee_mmap_region tmp_mmap_region = { }; 1646 struct memory_map mem_map = { }; 1647 unsigned long offs = 0; 1648 1649 if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) && 1650 (core_mmu_tee_load_pa & SMALL_PAGE_MASK)) 1651 panic("OP-TEE load address is not page aligned"); 1652 1653 check_sec_nsec_mem_config(); 1654 1655 mem_map.alloc_count = CFG_MMAP_REGIONS; 1656 mem_map.map = boot_mem_alloc_tmp(mem_map.alloc_count * 1657 sizeof(*mem_map.map), 1658 alignof(*mem_map.map)); 1659 memory_map_realloc_func = boot_mem_realloc_memory_map; 1660 1661 static_memory_map = (struct memory_map){ 1662 .map = &tmp_mmap_region, 1663 .alloc_count = 1, 1664 .count = 1, 1665 }; 1666 /* 1667 * Add a entry covering the translation tables which will be 1668 * involved in some virt_to_phys() and phys_to_virt() conversions. 1669 */ 1670 static_memory_map.map[0] = (struct tee_mmap_region){ 1671 .type = MEM_AREA_TEE_RAM, 1672 .region_size = SMALL_PAGE_SIZE, 1673 .pa = start, 1674 .va = start, 1675 .size = len, 1676 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1677 }; 1678 1679 init_mem_map(&mem_map, seed, &offs); 1680 1681 check_mem_map(&mem_map); 1682 core_init_mmu(&mem_map); 1683 dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL); 1684 core_init_mmu_regs(cfg); 1685 cfg->map_offset = offs; 1686 static_memory_map = mem_map; 1687 boot_mem_add_reloc(&static_memory_map.map); 1688 } 1689 1690 void core_mmu_save_mem_map(void) 1691 { 1692 size_t alloc_count = static_memory_map.count + 5; 1693 size_t elem_sz = sizeof(*static_memory_map.map); 1694 void *p = NULL; 1695 1696 p = nex_calloc(alloc_count, elem_sz); 1697 if (!p) 1698 panic(); 1699 memcpy(p, static_memory_map.map, static_memory_map.count * elem_sz); 1700 static_memory_map.map = p; 1701 static_memory_map.alloc_count = alloc_count; 1702 memory_map_realloc_func = heap_realloc_memory_map; 1703 } 1704 1705 bool core_mmu_mattr_is_ok(uint32_t mattr) 1706 { 1707 /* 1708 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and 1709 * core_mmu_v7.c:mattr_to_texcb 1710 */ 1711 1712 switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) { 1713 case TEE_MATTR_MEM_TYPE_DEV: 1714 case TEE_MATTR_MEM_TYPE_STRONGLY_O: 1715 case TEE_MATTR_MEM_TYPE_CACHED: 1716 case TEE_MATTR_MEM_TYPE_TAGGED: 1717 return true; 1718 default: 1719 return false; 1720 } 1721 } 1722 1723 /* 1724 * test attributes of target physical buffer 1725 * 1726 * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT). 1727 * 1728 */ 1729 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len) 1730 { 1731 struct tee_mmap_region *map; 1732 1733 /* Empty buffers complies with anything */ 1734 if (len == 0) 1735 return true; 1736 1737 switch (attr) { 1738 case CORE_MEM_SEC: 1739 return pbuf_is_inside(secure_only, pbuf, len); 1740 case CORE_MEM_NON_SEC: 1741 return pbuf_is_inside(nsec_shared, pbuf, len) || 1742 pbuf_is_nsec_ddr(pbuf, len); 1743 case CORE_MEM_TEE_RAM: 1744 return core_is_buffer_inside(pbuf, len, TEE_RAM_START, 1745 TEE_RAM_PH_SIZE); 1746 #ifdef CFG_CORE_RESERVED_SHM 1747 case CORE_MEM_NSEC_SHM: 1748 return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START, 1749 TEE_SHMEM_SIZE); 1750 #endif 1751 case CORE_MEM_SDP_MEM: 1752 return pbuf_is_sdp_mem(pbuf, len); 1753 case CORE_MEM_CACHED: 1754 map = find_map_by_pa(pbuf); 1755 if (!map || !pbuf_inside_map_area(pbuf, len, map)) 1756 return false; 1757 return mattr_is_cached(map->attr); 1758 default: 1759 return false; 1760 } 1761 } 1762 1763 /* test attributes of target virtual buffer (in core mapping) */ 1764 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len) 1765 { 1766 paddr_t p; 1767 1768 /* Empty buffers complies with anything */ 1769 if (len == 0) 1770 return true; 1771 1772 p = virt_to_phys((void *)vbuf); 1773 if (!p) 1774 return false; 1775 1776 return core_pbuf_is(attr, p, len); 1777 } 1778 1779 /* core_va2pa - teecore exported service */ 1780 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa) 1781 { 1782 struct tee_mmap_region *map; 1783 1784 map = find_map_by_va(va); 1785 if (!va_is_in_map(map, (vaddr_t)va)) 1786 return -1; 1787 1788 /* 1789 * We can calculate PA for static map. Virtual address ranges 1790 * reserved to core dynamic mapping return a 'match' (return 0;) 1791 * together with an invalid null physical address. 1792 */ 1793 if (map->pa) 1794 *pa = map->pa + (vaddr_t)va - map->va; 1795 else 1796 *pa = 0; 1797 1798 return 0; 1799 } 1800 1801 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len) 1802 { 1803 if (!pa_is_in_map(map, pa, len)) 1804 return NULL; 1805 1806 return (void *)(vaddr_t)(map->va + pa - map->pa); 1807 } 1808 1809 /* 1810 * teecore gets some memory area definitions 1811 */ 1812 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s, 1813 vaddr_t *e) 1814 { 1815 struct tee_mmap_region *map = find_map_by_type(type); 1816 1817 if (map) { 1818 *s = map->va; 1819 *e = map->va + map->size; 1820 } else { 1821 *s = 0; 1822 *e = 0; 1823 } 1824 } 1825 1826 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa) 1827 { 1828 struct tee_mmap_region *map = find_map_by_pa(pa); 1829 1830 /* VA spaces have no valid PAs in the memory map */ 1831 if (!map || map->type == MEM_AREA_RES_VASPACE || 1832 map->type == MEM_AREA_SHM_VASPACE) 1833 return MEM_AREA_MAXTYPE; 1834 return map->type; 1835 } 1836 1837 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1838 paddr_t pa, uint32_t attr) 1839 { 1840 assert(idx < tbl_info->num_entries); 1841 core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level, 1842 idx, pa, attr); 1843 } 1844 1845 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1846 paddr_t *pa, uint32_t *attr) 1847 { 1848 assert(idx < tbl_info->num_entries); 1849 core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level, 1850 idx, pa, attr); 1851 } 1852 1853 static void clear_region(struct core_mmu_table_info *tbl_info, 1854 struct tee_mmap_region *region) 1855 { 1856 unsigned int end = 0; 1857 unsigned int idx = 0; 1858 1859 /* va, len and pa should be block aligned */ 1860 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1861 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1862 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1863 1864 idx = core_mmu_va2idx(tbl_info, region->va); 1865 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1866 1867 while (idx < end) { 1868 core_mmu_set_entry(tbl_info, idx, 0, 0); 1869 idx++; 1870 } 1871 } 1872 1873 static void set_region(struct core_mmu_table_info *tbl_info, 1874 struct tee_mmap_region *region) 1875 { 1876 unsigned int end; 1877 unsigned int idx; 1878 paddr_t pa; 1879 1880 /* va, len and pa should be block aligned */ 1881 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1882 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1883 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1884 1885 idx = core_mmu_va2idx(tbl_info, region->va); 1886 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1887 pa = region->pa; 1888 1889 while (idx < end) { 1890 core_mmu_set_entry(tbl_info, idx, pa, region->attr); 1891 idx++; 1892 pa += BIT64(tbl_info->shift); 1893 } 1894 } 1895 1896 static void set_pg_region(struct core_mmu_table_info *dir_info, 1897 struct vm_region *region, struct pgt **pgt, 1898 struct core_mmu_table_info *pg_info) 1899 { 1900 struct tee_mmap_region r = { 1901 .va = region->va, 1902 .size = region->size, 1903 .attr = region->attr, 1904 }; 1905 vaddr_t end = r.va + r.size; 1906 uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE; 1907 1908 while (r.va < end) { 1909 if (!pg_info->table || 1910 r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) { 1911 /* 1912 * We're assigning a new translation table. 1913 */ 1914 unsigned int idx; 1915 1916 /* Virtual addresses must grow */ 1917 assert(r.va > pg_info->va_base); 1918 1919 idx = core_mmu_va2idx(dir_info, r.va); 1920 pg_info->va_base = core_mmu_idx2va(dir_info, idx); 1921 1922 /* 1923 * Advance pgt to va_base, note that we may need to 1924 * skip multiple page tables if there are large 1925 * holes in the vm map. 1926 */ 1927 while ((*pgt)->vabase < pg_info->va_base) { 1928 *pgt = SLIST_NEXT(*pgt, link); 1929 /* We should have allocated enough */ 1930 assert(*pgt); 1931 } 1932 assert((*pgt)->vabase == pg_info->va_base); 1933 pg_info->table = (*pgt)->tbl; 1934 1935 core_mmu_set_entry(dir_info, idx, 1936 virt_to_phys(pg_info->table), 1937 pgt_attr); 1938 } 1939 1940 r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base), 1941 end - r.va); 1942 1943 if (!(*pgt)->populated && !mobj_is_paged(region->mobj)) { 1944 size_t granule = BIT(pg_info->shift); 1945 size_t offset = r.va - region->va + region->offset; 1946 1947 r.size = MIN(r.size, 1948 mobj_get_phys_granule(region->mobj)); 1949 r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE); 1950 1951 if (mobj_get_pa(region->mobj, offset, granule, 1952 &r.pa) != TEE_SUCCESS) 1953 panic("Failed to get PA of unpaged mobj"); 1954 set_region(pg_info, &r); 1955 } 1956 r.va += r.size; 1957 } 1958 } 1959 1960 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr, 1961 size_t size_left, paddr_t block_size, 1962 struct tee_mmap_region *mm) 1963 { 1964 /* VA and PA are aligned to block size at current level */ 1965 if ((vaddr | paddr) & (block_size - 1)) 1966 return false; 1967 1968 /* Remainder fits into block at current level */ 1969 if (size_left < block_size) 1970 return false; 1971 1972 /* 1973 * The required block size of the region is compatible with the 1974 * block size of the current level. 1975 */ 1976 if (mm->region_size < block_size) 1977 return false; 1978 1979 #ifdef CFG_WITH_PAGER 1980 /* 1981 * If pager is enabled, we need to map TEE RAM and the whole pager 1982 * regions with small pages only 1983 */ 1984 if ((map_is_tee_ram(mm) || mm->type == MEM_AREA_PAGER_VASPACE) && 1985 block_size != SMALL_PAGE_SIZE) 1986 return false; 1987 #endif 1988 1989 return true; 1990 } 1991 1992 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm) 1993 { 1994 struct core_mmu_table_info tbl_info = { }; 1995 unsigned int idx = 0; 1996 vaddr_t vaddr = mm->va; 1997 paddr_t paddr = mm->pa; 1998 ssize_t size_left = mm->size; 1999 uint32_t attr = mm->attr; 2000 unsigned int level = 0; 2001 bool table_found = false; 2002 uint32_t old_attr = 0; 2003 2004 assert(!((vaddr | paddr) & SMALL_PAGE_MASK)); 2005 if (!paddr) 2006 attr = 0; 2007 2008 while (size_left > 0) { 2009 level = CORE_MMU_BASE_TABLE_LEVEL; 2010 2011 while (true) { 2012 paddr_t block_size = 0; 2013 2014 assert(core_mmu_level_in_range(level)); 2015 2016 table_found = core_mmu_find_table(prtn, vaddr, level, 2017 &tbl_info); 2018 if (!table_found) 2019 panic("can't find table for mapping"); 2020 2021 block_size = BIT64(tbl_info.shift); 2022 2023 idx = core_mmu_va2idx(&tbl_info, vaddr); 2024 if (!can_map_at_level(paddr, vaddr, size_left, 2025 block_size, mm)) { 2026 bool secure = mm->attr & TEE_MATTR_SECURE; 2027 2028 /* 2029 * This part of the region can't be mapped at 2030 * this level. Need to go deeper. 2031 */ 2032 if (!core_mmu_entry_to_finer_grained(&tbl_info, 2033 idx, 2034 secure)) 2035 panic("Can't divide MMU entry"); 2036 level = tbl_info.next_level; 2037 continue; 2038 } 2039 2040 /* We can map part of the region at current level */ 2041 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 2042 if (old_attr) 2043 panic("Page is already mapped"); 2044 2045 core_mmu_set_entry(&tbl_info, idx, paddr, attr); 2046 /* 2047 * Dynamic vaspace regions don't have a physical 2048 * address initially but we need to allocate and 2049 * initialize the translation tables now for later 2050 * updates to work properly. 2051 */ 2052 if (paddr) 2053 paddr += block_size; 2054 vaddr += block_size; 2055 size_left -= block_size; 2056 2057 break; 2058 } 2059 } 2060 } 2061 2062 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, 2063 enum teecore_memtypes memtype) 2064 { 2065 TEE_Result ret; 2066 struct core_mmu_table_info tbl_info; 2067 struct tee_mmap_region *mm; 2068 unsigned int idx; 2069 uint32_t old_attr; 2070 uint32_t exceptions; 2071 vaddr_t vaddr = vstart; 2072 size_t i; 2073 bool secure; 2074 2075 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 2076 2077 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 2078 2079 if (vaddr & SMALL_PAGE_MASK) 2080 return TEE_ERROR_BAD_PARAMETERS; 2081 2082 exceptions = mmu_lock(); 2083 2084 mm = find_map_by_va((void *)vaddr); 2085 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 2086 panic("VA does not belong to any known mm region"); 2087 2088 if (!core_mmu_is_dynamic_vaspace(mm)) 2089 panic("Trying to map into static region"); 2090 2091 for (i = 0; i < num_pages; i++) { 2092 if (pages[i] & SMALL_PAGE_MASK) { 2093 ret = TEE_ERROR_BAD_PARAMETERS; 2094 goto err; 2095 } 2096 2097 while (true) { 2098 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 2099 &tbl_info)) 2100 panic("Can't find pagetable for vaddr "); 2101 2102 idx = core_mmu_va2idx(&tbl_info, vaddr); 2103 if (tbl_info.shift == SMALL_PAGE_SHIFT) 2104 break; 2105 2106 /* This is supertable. Need to divide it. */ 2107 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 2108 secure)) 2109 panic("Failed to spread pgdir on small tables"); 2110 } 2111 2112 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 2113 if (old_attr) 2114 panic("Page is already mapped"); 2115 2116 core_mmu_set_entry(&tbl_info, idx, pages[i], 2117 core_mmu_type_to_attr(memtype)); 2118 vaddr += SMALL_PAGE_SIZE; 2119 } 2120 2121 /* 2122 * Make sure all the changes to translation tables are visible 2123 * before returning. TLB doesn't need to be invalidated as we are 2124 * guaranteed that there's no valid mapping in this range. 2125 */ 2126 core_mmu_table_write_barrier(); 2127 mmu_unlock(exceptions); 2128 2129 return TEE_SUCCESS; 2130 err: 2131 mmu_unlock(exceptions); 2132 2133 if (i) 2134 core_mmu_unmap_pages(vstart, i); 2135 2136 return ret; 2137 } 2138 2139 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart, 2140 size_t num_pages, 2141 enum teecore_memtypes memtype) 2142 { 2143 struct core_mmu_table_info tbl_info = { }; 2144 struct tee_mmap_region *mm = NULL; 2145 unsigned int idx = 0; 2146 uint32_t old_attr = 0; 2147 uint32_t exceptions = 0; 2148 vaddr_t vaddr = vstart; 2149 paddr_t paddr = pstart; 2150 size_t i = 0; 2151 bool secure = false; 2152 2153 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 2154 2155 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 2156 2157 if ((vaddr | paddr) & SMALL_PAGE_MASK) 2158 return TEE_ERROR_BAD_PARAMETERS; 2159 2160 exceptions = mmu_lock(); 2161 2162 mm = find_map_by_va((void *)vaddr); 2163 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 2164 panic("VA does not belong to any known mm region"); 2165 2166 if (!core_mmu_is_dynamic_vaspace(mm)) 2167 panic("Trying to map into static region"); 2168 2169 for (i = 0; i < num_pages; i++) { 2170 while (true) { 2171 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 2172 &tbl_info)) 2173 panic("Can't find pagetable for vaddr "); 2174 2175 idx = core_mmu_va2idx(&tbl_info, vaddr); 2176 if (tbl_info.shift == SMALL_PAGE_SHIFT) 2177 break; 2178 2179 /* This is supertable. Need to divide it. */ 2180 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 2181 secure)) 2182 panic("Failed to spread pgdir on small tables"); 2183 } 2184 2185 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 2186 if (old_attr) 2187 panic("Page is already mapped"); 2188 2189 core_mmu_set_entry(&tbl_info, idx, paddr, 2190 core_mmu_type_to_attr(memtype)); 2191 paddr += SMALL_PAGE_SIZE; 2192 vaddr += SMALL_PAGE_SIZE; 2193 } 2194 2195 /* 2196 * Make sure all the changes to translation tables are visible 2197 * before returning. TLB doesn't need to be invalidated as we are 2198 * guaranteed that there's no valid mapping in this range. 2199 */ 2200 core_mmu_table_write_barrier(); 2201 mmu_unlock(exceptions); 2202 2203 return TEE_SUCCESS; 2204 } 2205 2206 static bool mem_range_is_in_vcore_free(vaddr_t vstart, size_t num_pages) 2207 { 2208 return core_is_buffer_inside(vstart, num_pages * SMALL_PAGE_SIZE, 2209 VCORE_FREE_PA, VCORE_FREE_SZ); 2210 } 2211 2212 static void maybe_remove_from_mem_map(vaddr_t vstart, size_t num_pages) 2213 { 2214 struct memory_map *mem_map = NULL; 2215 struct tee_mmap_region *mm = NULL; 2216 size_t idx = 0; 2217 vaddr_t va = 0; 2218 2219 mm = find_map_by_va((void *)vstart); 2220 if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1)) 2221 panic("VA does not belong to any known mm region"); 2222 2223 if (core_mmu_is_dynamic_vaspace(mm)) 2224 return; 2225 2226 if (!mem_range_is_in_vcore_free(vstart, num_pages)) 2227 panic("Trying to unmap static region"); 2228 2229 /* 2230 * We're going to remove a memory from the VCORE_FREE memory range. 2231 * Depending where the range is we may need to remove the matching 2232 * mm, peal of a bit from the start or end of the mm, or split it 2233 * into two with a whole in the middle. 2234 */ 2235 2236 va = ROUNDDOWN(vstart, SMALL_PAGE_SIZE); 2237 assert(mm->region_size == SMALL_PAGE_SIZE); 2238 2239 if (va == mm->va && mm->size == num_pages * SMALL_PAGE_SIZE) { 2240 mem_map = get_memory_map(); 2241 idx = mm - mem_map->map; 2242 assert(idx < mem_map->count); 2243 2244 rem_array_elem(mem_map->map, mem_map->count, 2245 sizeof(*mem_map->map), idx); 2246 mem_map->count--; 2247 } else if (va == mm->va) { 2248 mm->va += num_pages * SMALL_PAGE_SIZE; 2249 mm->pa += num_pages * SMALL_PAGE_SIZE; 2250 mm->size -= num_pages * SMALL_PAGE_SIZE; 2251 } else if (va + num_pages * SMALL_PAGE_SIZE == mm->va + mm->size) { 2252 mm->size -= num_pages * SMALL_PAGE_SIZE; 2253 } else { 2254 struct tee_mmap_region m = *mm; 2255 2256 mem_map = get_memory_map(); 2257 idx = mm - mem_map->map; 2258 assert(idx < mem_map->count); 2259 2260 mm->size = va - mm->va; 2261 m.va += mm->size + num_pages * SMALL_PAGE_SIZE; 2262 m.pa += mm->size + num_pages * SMALL_PAGE_SIZE; 2263 m.size -= mm->size + num_pages * SMALL_PAGE_SIZE; 2264 grow_mem_map(mem_map); 2265 ins_array_elem(mem_map->map, mem_map->count, 2266 sizeof(*mem_map->map), idx + 1, &m); 2267 } 2268 } 2269 2270 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages) 2271 { 2272 struct core_mmu_table_info tbl_info; 2273 size_t i; 2274 unsigned int idx; 2275 uint32_t exceptions; 2276 2277 exceptions = mmu_lock(); 2278 2279 maybe_remove_from_mem_map(vstart, num_pages); 2280 2281 for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) { 2282 if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info)) 2283 panic("Can't find pagetable"); 2284 2285 if (tbl_info.shift != SMALL_PAGE_SHIFT) 2286 panic("Invalid pagetable level"); 2287 2288 idx = core_mmu_va2idx(&tbl_info, vstart); 2289 core_mmu_set_entry(&tbl_info, idx, 0, 0); 2290 } 2291 tlbi_all(); 2292 2293 mmu_unlock(exceptions); 2294 } 2295 2296 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, 2297 struct user_mode_ctx *uctx) 2298 { 2299 struct core_mmu_table_info pg_info = { }; 2300 struct pgt_cache *pgt_cache = &uctx->pgt_cache; 2301 struct pgt *pgt = NULL; 2302 struct pgt *p = NULL; 2303 struct vm_region *r = NULL; 2304 2305 if (TAILQ_EMPTY(&uctx->vm_info.regions)) 2306 return; /* Nothing to map */ 2307 2308 /* 2309 * Allocate all page tables in advance. 2310 */ 2311 pgt_get_all(uctx); 2312 pgt = SLIST_FIRST(pgt_cache); 2313 2314 core_mmu_set_info_table(&pg_info, dir_info->next_level, 0, NULL); 2315 2316 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) 2317 set_pg_region(dir_info, r, &pgt, &pg_info); 2318 /* Record that the translation tables now are populated. */ 2319 SLIST_FOREACH(p, pgt_cache, link) { 2320 p->populated = true; 2321 if (p == pgt) 2322 break; 2323 } 2324 assert(p == pgt); 2325 } 2326 2327 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr, 2328 size_t len) 2329 { 2330 struct core_mmu_table_info tbl_info = { }; 2331 struct tee_mmap_region *res_map = NULL; 2332 struct tee_mmap_region *map = NULL; 2333 struct tee_mmap_region r = { }; 2334 paddr_t pa = virt_to_phys(addr); 2335 size_t granule = 0; 2336 vaddr_t tbl_span = 0; 2337 vaddr_t end = 0; 2338 ptrdiff_t i = 0; 2339 paddr_t p = 0; 2340 size_t l = 0; 2341 2342 map = find_map_by_type_and_pa(type, pa, len); 2343 if (!map) 2344 return TEE_ERROR_GENERIC; 2345 2346 res_map = find_map_by_type(MEM_AREA_RES_VASPACE); 2347 if (!res_map) 2348 return TEE_ERROR_GENERIC; 2349 if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info)) 2350 return TEE_ERROR_GENERIC; 2351 granule = BIT(tbl_info.shift); 2352 2353 if (map < static_memory_map.map || 2354 map >= static_memory_map.map + static_memory_map.count) 2355 return TEE_ERROR_GENERIC; 2356 i = map - static_memory_map.map; 2357 2358 /* Check that we have a full match */ 2359 p = ROUNDDOWN2(pa, granule); 2360 l = ROUNDUP2(len + pa - p, granule); 2361 if (map->pa != p || map->size != l) 2362 return TEE_ERROR_GENERIC; 2363 2364 if (ADD_OVERFLOW(map->va, map->size, &end)) 2365 return TEE_ERROR_GENERIC; 2366 for (r = *map; r.va < end; r.pa += r.size, r.va += r.size) { 2367 if (!core_mmu_find_table(NULL, r.va, UINT_MAX, &tbl_info)) 2368 panic("can't find table for unmapping"); 2369 2370 tbl_span = BIT64(tbl_info.shift) * tbl_info.num_entries; 2371 r.size = MIN(tbl_span - (r.va - tbl_info.va_base), 2372 end - r.va); 2373 clear_region(&tbl_info, &r); 2374 } 2375 tlbi_all(); 2376 2377 /* If possible remove the va range from res_map */ 2378 if (res_map->va - map->size == map->va) { 2379 res_map->va -= map->size; 2380 res_map->size += map->size; 2381 } 2382 2383 /* Remove the entry. */ 2384 rem_array_elem(static_memory_map.map, static_memory_map.count, 2385 sizeof(*static_memory_map.map), i); 2386 static_memory_map.count--; 2387 2388 return TEE_SUCCESS; 2389 } 2390 2391 struct tee_mmap_region * 2392 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len) 2393 { 2394 struct memory_map *mem_map = get_memory_map(); 2395 struct tee_mmap_region *map_found = NULL; 2396 size_t n = 0; 2397 2398 if (!len) 2399 return NULL; 2400 2401 for (n = 0; n < mem_map->count; n++) { 2402 if (mem_map->map[n].type != type) 2403 continue; 2404 2405 if (map_found) 2406 return NULL; 2407 2408 map_found = mem_map->map + n; 2409 } 2410 2411 if (!map_found || map_found->size < len) 2412 return NULL; 2413 2414 return map_found; 2415 } 2416 2417 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len) 2418 { 2419 struct memory_map *mem_map = &static_memory_map; 2420 struct core_mmu_table_info tbl_info = { }; 2421 struct tee_mmap_region *map = NULL; 2422 struct tee_mmap_region r = { }; 2423 size_t granule = 0; 2424 vaddr_t tbl_span = 0; 2425 vaddr_t end = 0; 2426 paddr_t p = 0; 2427 size_t l = 0; 2428 2429 if (!len) 2430 return NULL; 2431 2432 if (!core_mmu_check_end_pa(addr, len)) 2433 return NULL; 2434 2435 /* Check if the memory is already mapped */ 2436 map = find_map_by_type_and_pa(type, addr, len); 2437 if (map && pbuf_inside_map_area(addr, len, map)) 2438 return (void *)(vaddr_t)(map->va + addr - map->pa); 2439 2440 /* Find the reserved va space used for late mappings */ 2441 map = find_map_by_type(MEM_AREA_RES_VASPACE); 2442 if (!map) 2443 return NULL; 2444 2445 if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info)) 2446 return NULL; 2447 2448 granule = BIT64(tbl_info.shift); 2449 p = ROUNDDOWN2(addr, granule); 2450 l = ROUNDUP2(len + addr - p, granule); 2451 2452 /* Ban overflowing virtual addresses */ 2453 if (map->size < l) 2454 return NULL; 2455 2456 if (static_memory_map.count >= static_memory_map.alloc_count) 2457 return NULL; 2458 2459 mem_map->map[mem_map->count] = (struct tee_mmap_region){ 2460 .va = map->va, 2461 .size = l, 2462 .type = type, 2463 .region_size = granule, 2464 .attr = core_mmu_type_to_attr(type), 2465 .pa = p, 2466 }; 2467 map->va += l; 2468 map->size -= l; 2469 map = mem_map->map + mem_map->count; 2470 mem_map->count++; 2471 2472 if (ADD_OVERFLOW(map->va, map->size, &end)) 2473 panic("VA overflow in add_mapping"); 2474 for (r = *map; r.va < end; r.pa += r.size, r.va += r.size) { 2475 if (!core_mmu_find_table(NULL, r.va, UINT_MAX, &tbl_info)) 2476 panic("can't find table for mapping"); 2477 2478 tbl_span = BIT64(tbl_info.shift) * tbl_info.num_entries; 2479 r.size = MIN(tbl_span - (r.va - tbl_info.va_base), 2480 end - r.va); 2481 set_region(&tbl_info, &r); 2482 } 2483 2484 /* Make sure the new entry is visible before continuing. */ 2485 core_mmu_table_write_barrier(); 2486 2487 return (void *)(vaddr_t)(map->va + addr - map->pa); 2488 } 2489 2490 #ifdef CFG_WITH_PAGER 2491 static vaddr_t get_linear_map_end_va(void) 2492 { 2493 /* this is synced with the generic linker file kern.ld.S */ 2494 return (vaddr_t)__heap2_end; 2495 } 2496 2497 static paddr_t get_linear_map_end_pa(void) 2498 { 2499 return get_linear_map_end_va() - boot_mmu_config.map_offset; 2500 } 2501 #endif 2502 2503 #if defined(CFG_TEE_CORE_DEBUG) 2504 static void check_pa_matches_va(void *va, paddr_t pa) 2505 { 2506 TEE_Result res = TEE_ERROR_GENERIC; 2507 vaddr_t v = (vaddr_t)va; 2508 paddr_t p = 0; 2509 struct core_mmu_table_info ti __maybe_unused = { }; 2510 2511 if (core_mmu_user_va_range_is_defined()) { 2512 vaddr_t user_va_base = 0; 2513 size_t user_va_size = 0; 2514 2515 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 2516 if (v >= user_va_base && 2517 v <= (user_va_base - 1 + user_va_size)) { 2518 if (!core_mmu_user_mapping_is_active()) { 2519 if (pa) 2520 panic("issue in linear address space"); 2521 return; 2522 } 2523 2524 res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx), 2525 va, &p); 2526 if (res == TEE_ERROR_NOT_SUPPORTED) 2527 return; 2528 if (res == TEE_SUCCESS && pa != p) 2529 panic("bad pa"); 2530 if (res != TEE_SUCCESS && pa) 2531 panic("false pa"); 2532 return; 2533 } 2534 } 2535 #ifdef CFG_WITH_PAGER 2536 if (is_unpaged(va)) { 2537 if (v - boot_mmu_config.map_offset != pa) 2538 panic("issue in linear address space"); 2539 return; 2540 } 2541 2542 if (tee_pager_get_table_info(v, &ti)) { 2543 uint32_t a; 2544 2545 /* 2546 * Lookups in the page table managed by the pager is 2547 * dangerous for addresses in the paged area as those pages 2548 * changes all the time. But some ranges are safe, 2549 * rw-locked areas when the page is populated for instance. 2550 */ 2551 core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a); 2552 if (a & TEE_MATTR_VALID_BLOCK) { 2553 paddr_t mask = BIT64(ti.shift) - 1; 2554 2555 p |= v & mask; 2556 if (pa != p) 2557 panic(); 2558 } else { 2559 if (pa) 2560 panic(); 2561 } 2562 return; 2563 } 2564 #endif 2565 2566 if (!core_va2pa_helper(va, &p)) { 2567 /* Verfiy only the static mapping (case non null phys addr) */ 2568 if (p && pa != p) { 2569 DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA, 2570 va, p, pa); 2571 panic(); 2572 } 2573 } else { 2574 if (pa) { 2575 DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa); 2576 panic(); 2577 } 2578 } 2579 } 2580 #else 2581 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused) 2582 { 2583 } 2584 #endif 2585 2586 paddr_t virt_to_phys(void *va) 2587 { 2588 paddr_t pa = 0; 2589 2590 if (!arch_va2pa_helper(va, &pa)) 2591 pa = 0; 2592 check_pa_matches_va(memtag_strip_tag(va), pa); 2593 return pa; 2594 } 2595 2596 /* 2597 * Don't use check_va_matches_pa() for RISC-V, as its callee 2598 * arch_va2pa_helper() will call it eventually, this creates 2599 * indirect recursion and can lead to a stack overflow. 2600 * Moreover, if arch_va2pa_helper() returns true, it implies 2601 * the va2pa mapping is matched, no need to check it again. 2602 */ 2603 #if defined(CFG_TEE_CORE_DEBUG) && !defined(__riscv) 2604 static void check_va_matches_pa(paddr_t pa, void *va) 2605 { 2606 paddr_t p = 0; 2607 2608 if (!va) 2609 return; 2610 2611 p = virt_to_phys(va); 2612 if (p != pa) { 2613 DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa); 2614 panic(); 2615 } 2616 } 2617 #else 2618 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused) 2619 { 2620 } 2621 #endif 2622 2623 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len) 2624 { 2625 if (!core_mmu_user_mapping_is_active()) 2626 return NULL; 2627 2628 return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len); 2629 } 2630 2631 #ifdef CFG_WITH_PAGER 2632 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2633 { 2634 paddr_t end_pa = 0; 2635 2636 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 2637 return NULL; 2638 2639 if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end_pa()) { 2640 if (end_pa > get_linear_map_end_pa()) 2641 return NULL; 2642 return (void *)(vaddr_t)(pa + boot_mmu_config.map_offset); 2643 } 2644 2645 return tee_pager_phys_to_virt(pa, len); 2646 } 2647 #else 2648 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2649 { 2650 struct tee_mmap_region *mmap = NULL; 2651 2652 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len); 2653 if (!mmap) 2654 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len); 2655 if (!mmap) 2656 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len); 2657 if (!mmap) 2658 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len); 2659 if (!mmap) 2660 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len); 2661 if (!mmap) 2662 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len); 2663 2664 /* 2665 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only 2666 * used with pager and not needed here. 2667 */ 2668 return map_pa2va(mmap, pa, len); 2669 } 2670 #endif 2671 2672 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len) 2673 { 2674 void *va = NULL; 2675 2676 switch (m) { 2677 case MEM_AREA_TS_VASPACE: 2678 va = phys_to_virt_ts_vaspace(pa, len); 2679 break; 2680 case MEM_AREA_TEE_RAM: 2681 case MEM_AREA_TEE_RAM_RX: 2682 case MEM_AREA_TEE_RAM_RO: 2683 case MEM_AREA_TEE_RAM_RW: 2684 case MEM_AREA_NEX_RAM_RO: 2685 case MEM_AREA_NEX_RAM_RW: 2686 va = phys_to_virt_tee_ram(pa, len); 2687 break; 2688 case MEM_AREA_SHM_VASPACE: 2689 case MEM_AREA_NEX_DYN_VASPACE: 2690 case MEM_AREA_TEE_DYN_VASPACE: 2691 /* Find VA from PA in dynamic SHM is not yet supported */ 2692 va = NULL; 2693 break; 2694 default: 2695 va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len); 2696 } 2697 if (m != MEM_AREA_SEC_RAM_OVERALL) 2698 check_va_matches_pa(pa, va); 2699 return va; 2700 } 2701 2702 void *phys_to_virt_io(paddr_t pa, size_t len) 2703 { 2704 struct tee_mmap_region *map = NULL; 2705 void *va = NULL; 2706 2707 map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len); 2708 if (!map) 2709 map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len); 2710 if (!map) 2711 return NULL; 2712 va = map_pa2va(map, pa, len); 2713 check_va_matches_pa(pa, va); 2714 return va; 2715 } 2716 2717 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len) 2718 { 2719 if (cpu_mmu_enabled()) 2720 return (vaddr_t)phys_to_virt(pa, type, len); 2721 2722 return (vaddr_t)pa; 2723 } 2724 2725 #ifdef CFG_WITH_PAGER 2726 bool is_unpaged(const void *va) 2727 { 2728 vaddr_t v = (vaddr_t)va; 2729 2730 return v >= VCORE_START_VA && v < get_linear_map_end_va(); 2731 } 2732 #endif 2733 2734 #ifdef CFG_NS_VIRTUALIZATION 2735 bool is_nexus(const void *va) 2736 { 2737 vaddr_t v = (vaddr_t)va; 2738 2739 return v >= VCORE_START_VA && v < VCORE_NEX_RW_PA + VCORE_NEX_RW_SZ; 2740 } 2741 #endif 2742 2743 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len) 2744 { 2745 assert(p->pa); 2746 if (cpu_mmu_enabled()) { 2747 if (!p->va) 2748 p->va = (vaddr_t)phys_to_virt_io(p->pa, len); 2749 assert(p->va); 2750 return p->va; 2751 } 2752 return p->pa; 2753 } 2754 2755 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len) 2756 { 2757 assert(p->pa); 2758 if (cpu_mmu_enabled()) { 2759 if (!p->va) 2760 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC, 2761 len); 2762 assert(p->va); 2763 return p->va; 2764 } 2765 return p->pa; 2766 } 2767 2768 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len) 2769 { 2770 assert(p->pa); 2771 if (cpu_mmu_enabled()) { 2772 if (!p->va) 2773 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC, 2774 len); 2775 assert(p->va); 2776 return p->va; 2777 } 2778 return p->pa; 2779 } 2780 2781 #ifdef CFG_CORE_RESERVED_SHM 2782 static TEE_Result teecore_init_pub_ram(void) 2783 { 2784 vaddr_t s = 0; 2785 vaddr_t e = 0; 2786 2787 /* get virtual addr/size of NSec shared mem allocated from teecore */ 2788 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e); 2789 2790 if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK) 2791 panic("invalid PUB RAM"); 2792 2793 /* extra check: we could rely on core_mmu_get_mem_by_type() */ 2794 if (!tee_vbuf_is_non_sec(s, e - s)) 2795 panic("PUB RAM is not non-secure"); 2796 2797 #ifdef CFG_PL310 2798 /* Allocate statically the l2cc mutex */ 2799 tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s)); 2800 s += sizeof(uint32_t); /* size of a pl310 mutex */ 2801 s = ROUNDUP(s, SMALL_PAGE_SIZE); /* keep required alignment */ 2802 #endif 2803 2804 default_nsec_shm_paddr = virt_to_phys((void *)s); 2805 default_nsec_shm_size = e - s; 2806 2807 return TEE_SUCCESS; 2808 } 2809 early_init(teecore_init_pub_ram); 2810 #endif /*CFG_CORE_RESERVED_SHM*/ 2811 2812 static void __maybe_unused carve_out_core_mem(paddr_t pa, paddr_t end_pa) 2813 { 2814 tee_mm_entry_t *mm __maybe_unused = NULL; 2815 2816 DMSG("%#"PRIxPA" .. %#"PRIxPA, pa, end_pa); 2817 mm = phys_mem_alloc2(pa, end_pa - pa); 2818 assert(mm); 2819 } 2820 2821 void core_mmu_init_phys_mem(void) 2822 { 2823 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 2824 paddr_t b1 = 0; 2825 paddr_size_t s1 = 0; 2826 2827 static_assert(ARRAY_SIZE(secure_only) <= 2); 2828 2829 if (ARRAY_SIZE(secure_only) == 2) { 2830 b1 = secure_only[1].paddr; 2831 s1 = secure_only[1].size; 2832 } 2833 virt_init_memory(&static_memory_map, secure_only[0].paddr, 2834 secure_only[0].size, b1, s1); 2835 } else { 2836 #ifdef CFG_WITH_PAGER 2837 /* 2838 * The pager uses all core memory so there's no need to add 2839 * it to the pool. 2840 */ 2841 static_assert(ARRAY_SIZE(secure_only) == 2); 2842 phys_mem_init(0, 0, secure_only[1].paddr, secure_only[1].size); 2843 #else /*!CFG_WITH_PAGER*/ 2844 size_t align = BIT(CORE_MMU_USER_CODE_SHIFT); 2845 paddr_t end_pa = 0; 2846 size_t size = 0; 2847 paddr_t ps = 0; 2848 paddr_t pa = 0; 2849 2850 static_assert(ARRAY_SIZE(secure_only) <= 2); 2851 if (ARRAY_SIZE(secure_only) == 2) { 2852 ps = secure_only[1].paddr; 2853 size = secure_only[1].size; 2854 } 2855 phys_mem_init(secure_only[0].paddr, secure_only[0].size, 2856 ps, size); 2857 2858 /* 2859 * The VCORE macros are relocatable so we need to translate 2860 * the addresses now that the MMU is enabled. 2861 */ 2862 end_pa = vaddr_to_phys(ROUNDUP2(VCORE_FREE_END_PA, 2863 align) - 1) + 1; 2864 /* Carve out the part used by OP-TEE core */ 2865 carve_out_core_mem(vaddr_to_phys(VCORE_UNPG_RX_PA), end_pa); 2866 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) { 2867 pa = vaddr_to_phys(ROUNDUP2(ASAN_MAP_PA, align)); 2868 carve_out_core_mem(pa, pa + ASAN_MAP_SZ); 2869 } 2870 2871 /* Carve out test SDP memory */ 2872 #ifdef TEE_SDP_TEST_MEM_BASE 2873 if (TEE_SDP_TEST_MEM_SIZE) { 2874 pa = TEE_SDP_TEST_MEM_BASE; 2875 carve_out_core_mem(pa, pa + TEE_SDP_TEST_MEM_SIZE); 2876 } 2877 #endif 2878 #endif /*!CFG_WITH_PAGER*/ 2879 } 2880 } 2881