1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, 2022 Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved. 6 */ 7 8 #include <assert.h> 9 #include <config.h> 10 #include <kernel/boot.h> 11 #include <kernel/dt.h> 12 #include <kernel/linker.h> 13 #include <kernel/panic.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/tee_l2cc_mutex.h> 16 #include <kernel/tee_misc.h> 17 #include <kernel/tlb_helpers.h> 18 #include <kernel/user_mode_ctx.h> 19 #include <kernel/virtualization.h> 20 #include <libfdt.h> 21 #include <mm/core_memprot.h> 22 #include <mm/core_mmu.h> 23 #include <mm/mobj.h> 24 #include <mm/pgt_cache.h> 25 #include <mm/tee_pager.h> 26 #include <mm/vm.h> 27 #include <platform_config.h> 28 #include <string.h> 29 #include <trace.h> 30 #include <util.h> 31 32 #ifndef DEBUG_XLAT_TABLE 33 #define DEBUG_XLAT_TABLE 0 34 #endif 35 36 #define SHM_VASPACE_SIZE (1024 * 1024 * 32) 37 38 /* 39 * These variables are initialized before .bss is cleared. To avoid 40 * resetting them when .bss is cleared we're storing them in .data instead, 41 * even if they initially are zero. 42 */ 43 44 #ifdef CFG_CORE_RESERVED_SHM 45 /* Default NSec shared memory allocated from NSec world */ 46 unsigned long default_nsec_shm_size __nex_bss; 47 unsigned long default_nsec_shm_paddr __nex_bss; 48 #endif 49 50 static struct tee_mmap_region static_memory_map[CFG_MMAP_REGIONS 51 #ifdef CFG_CORE_ASLR 52 + 1 53 #endif 54 + 1] __nex_bss; 55 56 /* Define the platform's memory layout. */ 57 struct memaccess_area { 58 paddr_t paddr; 59 size_t size; 60 }; 61 62 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s } 63 64 static struct memaccess_area secure_only[] __nex_data = { 65 #ifdef TRUSTED_SRAM_BASE 66 MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE), 67 #endif 68 MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE), 69 }; 70 71 static struct memaccess_area nsec_shared[] __nex_data = { 72 #ifdef CFG_CORE_RESERVED_SHM 73 MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE), 74 #endif 75 }; 76 77 #if defined(CFG_SECURE_DATA_PATH) 78 static const char *tz_sdp_match = "linaro,secure-heap"; 79 static struct memaccess_area sec_sdp; 80 #ifdef CFG_TEE_SDP_MEM_BASE 81 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE); 82 #endif 83 #ifdef TEE_SDP_TEST_MEM_BASE 84 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE); 85 #endif 86 #endif 87 88 #ifdef CFG_CORE_RESERVED_SHM 89 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE); 90 #endif 91 static unsigned int mmu_spinlock; 92 93 static uint32_t mmu_lock(void) 94 { 95 return cpu_spin_lock_xsave(&mmu_spinlock); 96 } 97 98 static void mmu_unlock(uint32_t exceptions) 99 { 100 cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions); 101 } 102 103 void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size) 104 { 105 /* 106 * The first range is always used to cover OP-TEE core memory, but 107 * depending on configuration it may cover more than that. 108 */ 109 *base = secure_only[0].paddr; 110 *size = secure_only[0].size; 111 } 112 113 static struct tee_mmap_region *get_memory_map(void) 114 { 115 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 116 struct tee_mmap_region *map = virt_get_memory_map(); 117 118 if (map) 119 return map; 120 } 121 122 return static_memory_map; 123 } 124 125 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen, 126 paddr_t pa, size_t size) 127 { 128 size_t n; 129 130 for (n = 0; n < alen; n++) 131 if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size)) 132 return true; 133 return false; 134 } 135 136 #define pbuf_intersects(a, pa, size) \ 137 _pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size)) 138 139 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen, 140 paddr_t pa, size_t size) 141 { 142 size_t n; 143 144 for (n = 0; n < alen; n++) 145 if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size)) 146 return true; 147 return false; 148 } 149 150 #define pbuf_is_inside(a, pa, size) \ 151 _pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size)) 152 153 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len) 154 { 155 paddr_t end_pa = 0; 156 157 if (!map) 158 return false; 159 160 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 161 return false; 162 163 return (pa >= map->pa && end_pa <= map->pa + map->size - 1); 164 } 165 166 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va) 167 { 168 if (!map) 169 return false; 170 return (va >= map->va && va <= (map->va + map->size - 1)); 171 } 172 173 /* check if target buffer fits in a core default map area */ 174 static bool pbuf_inside_map_area(unsigned long p, size_t l, 175 struct tee_mmap_region *map) 176 { 177 return core_is_buffer_inside(p, l, map->pa, map->size); 178 } 179 180 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type) 181 { 182 struct tee_mmap_region *map; 183 184 for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) 185 if (map->type == type) 186 return map; 187 return NULL; 188 } 189 190 static struct tee_mmap_region * 191 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len) 192 { 193 struct tee_mmap_region *map; 194 195 for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) { 196 if (map->type != type) 197 continue; 198 if (pa_is_in_map(map, pa, len)) 199 return map; 200 } 201 return NULL; 202 } 203 204 static struct tee_mmap_region *find_map_by_va(void *va) 205 { 206 struct tee_mmap_region *map = get_memory_map(); 207 unsigned long a = (unsigned long)va; 208 209 while (!core_mmap_is_end_of_table(map)) { 210 if (a >= map->va && a <= (map->va - 1 + map->size)) 211 return map; 212 map++; 213 } 214 return NULL; 215 } 216 217 static struct tee_mmap_region *find_map_by_pa(unsigned long pa) 218 { 219 struct tee_mmap_region *map = get_memory_map(); 220 221 while (!core_mmap_is_end_of_table(map)) { 222 if (pa >= map->pa && pa <= (map->pa + map->size - 1)) 223 return map; 224 map++; 225 } 226 return NULL; 227 } 228 229 #if defined(CFG_SECURE_DATA_PATH) 230 static bool dtb_get_sdp_region(void) 231 { 232 void *fdt = NULL; 233 int node = 0; 234 int tmp_node = 0; 235 paddr_t tmp_addr = 0; 236 size_t tmp_size = 0; 237 238 if (!IS_ENABLED(CFG_EMBED_DTB)) 239 return false; 240 241 fdt = get_embedded_dt(); 242 if (!fdt) 243 panic("No DTB found"); 244 245 node = fdt_node_offset_by_compatible(fdt, -1, tz_sdp_match); 246 if (node < 0) { 247 DMSG("No %s compatible node found", tz_sdp_match); 248 return false; 249 } 250 tmp_node = node; 251 while (tmp_node >= 0) { 252 tmp_node = fdt_node_offset_by_compatible(fdt, tmp_node, 253 tz_sdp_match); 254 if (tmp_node >= 0) 255 DMSG("Ignore SDP pool node %s, supports only 1 node", 256 fdt_get_name(fdt, tmp_node, NULL)); 257 } 258 259 tmp_addr = fdt_reg_base_address(fdt, node); 260 if (tmp_addr == DT_INFO_INVALID_REG) { 261 EMSG("%s: Unable to get base addr from DT", tz_sdp_match); 262 return false; 263 } 264 265 tmp_size = fdt_reg_size(fdt, node); 266 if (tmp_size == DT_INFO_INVALID_REG_SIZE) { 267 EMSG("%s: Unable to get size of base addr from DT", 268 tz_sdp_match); 269 return false; 270 } 271 272 sec_sdp.paddr = tmp_addr; 273 sec_sdp.size = tmp_size; 274 275 return true; 276 } 277 #endif 278 279 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH) 280 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len, 281 const struct core_mmu_phys_mem *start, 282 const struct core_mmu_phys_mem *end) 283 { 284 const struct core_mmu_phys_mem *mem; 285 286 for (mem = start; mem < end; mem++) { 287 if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size)) 288 return true; 289 } 290 291 return false; 292 } 293 #endif 294 295 #ifdef CFG_CORE_DYN_SHM 296 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems, 297 paddr_t pa, size_t size) 298 { 299 struct core_mmu_phys_mem *m = *mem; 300 size_t n = 0; 301 302 while (true) { 303 if (n >= *nelems) { 304 DMSG("No need to carve out %#" PRIxPA " size %#zx", 305 pa, size); 306 return; 307 } 308 if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size)) 309 break; 310 if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size)) 311 panic(); 312 n++; 313 } 314 315 if (pa == m[n].addr && size == m[n].size) { 316 /* Remove this entry */ 317 (*nelems)--; 318 memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n)); 319 m = nex_realloc(m, sizeof(*m) * *nelems); 320 if (!m) 321 panic(); 322 *mem = m; 323 } else if (pa == m[n].addr) { 324 m[n].addr += size; 325 m[n].size -= size; 326 } else if ((pa + size) == (m[n].addr + m[n].size)) { 327 m[n].size -= size; 328 } else { 329 /* Need to split the memory entry */ 330 m = nex_realloc(m, sizeof(*m) * (*nelems + 1)); 331 if (!m) 332 panic(); 333 *mem = m; 334 memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n)); 335 (*nelems)++; 336 m[n].size = pa - m[n].addr; 337 m[n + 1].size -= size + m[n].size; 338 m[n + 1].addr = pa + size; 339 } 340 } 341 342 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start, 343 size_t nelems, 344 struct tee_mmap_region *map) 345 { 346 size_t n; 347 348 for (n = 0; n < nelems; n++) { 349 if (!core_is_buffer_outside(start[n].addr, start[n].size, 350 map->pa, map->size)) { 351 EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ 352 ") overlaps map (type %d %#" PRIxPA ":%#zx)", 353 start[n].addr, start[n].size, 354 map->type, map->pa, map->size); 355 panic(); 356 } 357 } 358 } 359 360 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss; 361 static size_t discovered_nsec_ddr_nelems __nex_bss; 362 363 static int cmp_pmem_by_addr(const void *a, const void *b) 364 { 365 const struct core_mmu_phys_mem *pmem_a = a; 366 const struct core_mmu_phys_mem *pmem_b = b; 367 368 return CMP_TRILEAN(pmem_a->addr, pmem_b->addr); 369 } 370 371 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start, 372 size_t nelems) 373 { 374 struct core_mmu_phys_mem *m = start; 375 size_t num_elems = nelems; 376 struct tee_mmap_region *map = static_memory_map; 377 const struct core_mmu_phys_mem __maybe_unused *pmem; 378 379 assert(!discovered_nsec_ddr_start); 380 assert(m && num_elems); 381 382 qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr); 383 384 /* 385 * Non-secure shared memory and also secure data 386 * path memory are supposed to reside inside 387 * non-secure memory. Since NSEC_SHM and SDP_MEM 388 * are used for a specific purpose make holes for 389 * those memory in the normal non-secure memory. 390 * 391 * This has to be done since for instance QEMU 392 * isn't aware of which memory range in the 393 * non-secure memory is used for NSEC_SHM. 394 */ 395 396 #ifdef CFG_SECURE_DATA_PATH 397 if (dtb_get_sdp_region()) 398 carve_out_phys_mem(&m, &num_elems, sec_sdp.paddr, sec_sdp.size); 399 400 for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++) 401 carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size); 402 #endif 403 404 carve_out_phys_mem(&m, &num_elems, TEE_RAM_START, TEE_RAM_PH_SIZE); 405 carve_out_phys_mem(&m, &num_elems, TA_RAM_START, TA_RAM_SIZE); 406 407 for (map = static_memory_map; !core_mmap_is_end_of_table(map); map++) { 408 switch (map->type) { 409 case MEM_AREA_NSEC_SHM: 410 carve_out_phys_mem(&m, &num_elems, map->pa, map->size); 411 break; 412 case MEM_AREA_EXT_DT: 413 case MEM_AREA_RES_VASPACE: 414 case MEM_AREA_SHM_VASPACE: 415 case MEM_AREA_TS_VASPACE: 416 case MEM_AREA_PAGER_VASPACE: 417 break; 418 default: 419 check_phys_mem_is_outside(m, num_elems, map); 420 } 421 } 422 423 discovered_nsec_ddr_start = m; 424 discovered_nsec_ddr_nelems = num_elems; 425 426 if (!core_mmu_check_end_pa(m[num_elems - 1].addr, 427 m[num_elems - 1].size)) 428 panic(); 429 } 430 431 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start, 432 const struct core_mmu_phys_mem **end) 433 { 434 if (!discovered_nsec_ddr_start) 435 return false; 436 437 *start = discovered_nsec_ddr_start; 438 *end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems; 439 440 return true; 441 } 442 443 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len) 444 { 445 const struct core_mmu_phys_mem *start; 446 const struct core_mmu_phys_mem *end; 447 448 if (!get_discovered_nsec_ddr(&start, &end)) 449 return false; 450 451 return pbuf_is_special_mem(pbuf, len, start, end); 452 } 453 454 bool core_mmu_nsec_ddr_is_defined(void) 455 { 456 const struct core_mmu_phys_mem *start; 457 const struct core_mmu_phys_mem *end; 458 459 if (!get_discovered_nsec_ddr(&start, &end)) 460 return false; 461 462 return start != end; 463 } 464 #else 465 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused) 466 { 467 return false; 468 } 469 #endif /*CFG_CORE_DYN_SHM*/ 470 471 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \ 472 EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \ 473 pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2)) 474 475 #ifdef CFG_SECURE_DATA_PATH 476 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len) 477 { 478 bool is_sdp_mem = false; 479 480 if (sec_sdp.size) 481 is_sdp_mem = core_is_buffer_inside(pbuf, len, sec_sdp.paddr, 482 sec_sdp.size); 483 484 if (!is_sdp_mem) 485 is_sdp_mem = pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin, 486 phys_sdp_mem_end); 487 488 return is_sdp_mem; 489 } 490 491 static struct mobj *core_sdp_mem_alloc_mobj(paddr_t pa, size_t size) 492 { 493 struct mobj *mobj = mobj_phys_alloc(pa, size, TEE_MATTR_MEM_TYPE_CACHED, 494 CORE_MEM_SDP_MEM); 495 496 if (!mobj) 497 panic("can't create SDP physical memory object"); 498 499 return mobj; 500 } 501 502 struct mobj **core_sdp_mem_create_mobjs(void) 503 { 504 const struct core_mmu_phys_mem *mem = NULL; 505 struct mobj **mobj_base = NULL; 506 struct mobj **mobj = NULL; 507 int cnt = phys_sdp_mem_end - phys_sdp_mem_begin; 508 509 if (sec_sdp.size) 510 cnt++; 511 512 /* SDP mobjs table must end with a NULL entry */ 513 mobj_base = calloc(cnt + 1, sizeof(struct mobj *)); 514 if (!mobj_base) 515 panic("Out of memory"); 516 517 mobj = mobj_base; 518 519 for (mem = phys_sdp_mem_begin; mem < phys_sdp_mem_end; mem++, mobj++) 520 *mobj = core_sdp_mem_alloc_mobj(mem->addr, mem->size); 521 522 if (sec_sdp.size) 523 *mobj = core_sdp_mem_alloc_mobj(sec_sdp.paddr, sec_sdp.size); 524 525 return mobj_base; 526 } 527 528 #else /* CFG_SECURE_DATA_PATH */ 529 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused) 530 { 531 return false; 532 } 533 534 #endif /* CFG_SECURE_DATA_PATH */ 535 536 /* Check special memories comply with registered memories */ 537 static void verify_special_mem_areas(struct tee_mmap_region *mem_map, 538 size_t len, 539 const struct core_mmu_phys_mem *start, 540 const struct core_mmu_phys_mem *end, 541 const char *area_name __maybe_unused) 542 { 543 const struct core_mmu_phys_mem *mem; 544 const struct core_mmu_phys_mem *mem2; 545 struct tee_mmap_region *mmap; 546 size_t n; 547 548 if (start == end) { 549 DMSG("No %s memory area defined", area_name); 550 return; 551 } 552 553 for (mem = start; mem < end; mem++) 554 DMSG("%s memory [%" PRIxPA " %" PRIx64 "]", 555 area_name, mem->addr, (uint64_t)mem->addr + mem->size); 556 557 /* Check memories do not intersect each other */ 558 for (mem = start; mem + 1 < end; mem++) { 559 for (mem2 = mem + 1; mem2 < end; mem2++) { 560 if (core_is_buffer_intersect(mem2->addr, mem2->size, 561 mem->addr, mem->size)) { 562 MSG_MEM_INSTERSECT(mem2->addr, mem2->size, 563 mem->addr, mem->size); 564 panic("Special memory intersection"); 565 } 566 } 567 } 568 569 /* 570 * Check memories do not intersect any mapped memory. 571 * This is called before reserved VA space is loaded in mem_map. 572 */ 573 for (mem = start; mem < end; mem++) { 574 for (mmap = mem_map, n = 0; n < len; mmap++, n++) { 575 if (core_is_buffer_intersect(mem->addr, mem->size, 576 mmap->pa, mmap->size)) { 577 MSG_MEM_INSTERSECT(mem->addr, mem->size, 578 mmap->pa, mmap->size); 579 panic("Special memory intersection"); 580 } 581 } 582 } 583 } 584 585 static void add_phys_mem(struct tee_mmap_region *memory_map, size_t num_elems, 586 const char *mem_name __maybe_unused, 587 enum teecore_memtypes mem_type, 588 paddr_t mem_addr, paddr_size_t mem_size, size_t *last) 589 { 590 size_t n = 0; 591 paddr_t pa; 592 paddr_size_t size; 593 594 if (!mem_size) /* Discard null size entries */ 595 return; 596 /* 597 * If some ranges of memory of the same type do overlap 598 * each others they are coalesced into one entry. To help this 599 * added entries are sorted by increasing physical. 600 * 601 * Note that it's valid to have the same physical memory as several 602 * different memory types, for instance the same device memory 603 * mapped as both secure and non-secure. This will probably not 604 * happen often in practice. 605 */ 606 DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ, 607 mem_name, teecore_memtype_name(mem_type), mem_addr, mem_size); 608 while (true) { 609 if (n >= (num_elems - 1)) { 610 EMSG("Out of entries (%zu) in memory_map", num_elems); 611 panic(); 612 } 613 if (n == *last) 614 break; 615 pa = memory_map[n].pa; 616 size = memory_map[n].size; 617 if (mem_type == memory_map[n].type && 618 ((pa <= (mem_addr + (mem_size - 1))) && 619 (mem_addr <= (pa + (size - 1))))) { 620 DMSG("Physical mem map overlaps 0x%" PRIxPA, mem_addr); 621 memory_map[n].pa = MIN(pa, mem_addr); 622 memory_map[n].size = MAX(size, mem_size) + 623 (pa - memory_map[n].pa); 624 return; 625 } 626 if (mem_type < memory_map[n].type || 627 (mem_type == memory_map[n].type && mem_addr < pa)) 628 break; /* found the spot where to insert this memory */ 629 n++; 630 } 631 632 memmove(memory_map + n + 1, memory_map + n, 633 sizeof(struct tee_mmap_region) * (*last - n)); 634 (*last)++; 635 memset(memory_map + n, 0, sizeof(memory_map[0])); 636 memory_map[n].type = mem_type; 637 memory_map[n].pa = mem_addr; 638 memory_map[n].size = mem_size; 639 } 640 641 static void add_va_space(struct tee_mmap_region *memory_map, size_t num_elems, 642 enum teecore_memtypes type, size_t size, size_t *last) 643 { 644 size_t n = 0; 645 646 DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size); 647 while (true) { 648 if (n >= (num_elems - 1)) { 649 EMSG("Out of entries (%zu) in memory_map", num_elems); 650 panic(); 651 } 652 if (n == *last) 653 break; 654 if (type < memory_map[n].type) 655 break; 656 n++; 657 } 658 659 memmove(memory_map + n + 1, memory_map + n, 660 sizeof(struct tee_mmap_region) * (*last - n)); 661 (*last)++; 662 memset(memory_map + n, 0, sizeof(memory_map[0])); 663 memory_map[n].type = type; 664 memory_map[n].size = size; 665 } 666 667 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t) 668 { 669 const uint32_t attr = TEE_MATTR_VALID_BLOCK; 670 const uint32_t tagged = TEE_MATTR_MEM_TYPE_TAGGED << 671 TEE_MATTR_MEM_TYPE_SHIFT; 672 const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED << 673 TEE_MATTR_MEM_TYPE_SHIFT; 674 const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV << 675 TEE_MATTR_MEM_TYPE_SHIFT; 676 677 switch (t) { 678 case MEM_AREA_TEE_RAM: 679 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | tagged; 680 case MEM_AREA_TEE_RAM_RX: 681 case MEM_AREA_INIT_RAM_RX: 682 case MEM_AREA_IDENTITY_MAP_RX: 683 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | tagged; 684 case MEM_AREA_TEE_RAM_RO: 685 case MEM_AREA_INIT_RAM_RO: 686 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | tagged; 687 case MEM_AREA_TEE_RAM_RW: 688 case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */ 689 case MEM_AREA_NEX_RAM_RW: 690 case MEM_AREA_TEE_ASAN: 691 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; 692 case MEM_AREA_TEE_COHERENT: 693 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache; 694 case MEM_AREA_TA_RAM: 695 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; 696 case MEM_AREA_NSEC_SHM: 697 return attr | TEE_MATTR_PRW | cached; 698 case MEM_AREA_EXT_DT: 699 /* 700 * If CFG_MAP_EXT_DT_SECURE is enabled map the external device 701 * tree as secure non-cached memory, otherwise, fall back to 702 * non-secure mapping. 703 */ 704 if (IS_ENABLED(CFG_MAP_EXT_DT_SECURE)) 705 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | 706 noncache; 707 fallthrough; 708 case MEM_AREA_IO_NSEC: 709 return attr | TEE_MATTR_PRW | noncache; 710 case MEM_AREA_IO_SEC: 711 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache; 712 case MEM_AREA_RAM_NSEC: 713 return attr | TEE_MATTR_PRW | cached; 714 case MEM_AREA_RAM_SEC: 715 case MEM_AREA_SEC_RAM_OVERALL: 716 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; 717 case MEM_AREA_RES_VASPACE: 718 case MEM_AREA_SHM_VASPACE: 719 return 0; 720 case MEM_AREA_PAGER_VASPACE: 721 return TEE_MATTR_SECURE; 722 default: 723 panic("invalid type"); 724 } 725 } 726 727 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm) 728 { 729 switch (mm->type) { 730 case MEM_AREA_TEE_RAM: 731 case MEM_AREA_TEE_RAM_RX: 732 case MEM_AREA_TEE_RAM_RO: 733 case MEM_AREA_TEE_RAM_RW: 734 case MEM_AREA_INIT_RAM_RX: 735 case MEM_AREA_INIT_RAM_RO: 736 case MEM_AREA_NEX_RAM_RW: 737 case MEM_AREA_NEX_RAM_RO: 738 case MEM_AREA_TEE_ASAN: 739 return true; 740 default: 741 return false; 742 } 743 } 744 745 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm) 746 { 747 return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE); 748 } 749 750 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm) 751 { 752 return mm->region_size == CORE_MMU_PGDIR_SIZE; 753 } 754 755 static int cmp_mmap_by_lower_va(const void *a, const void *b) 756 { 757 const struct tee_mmap_region *mm_a = a; 758 const struct tee_mmap_region *mm_b = b; 759 760 return CMP_TRILEAN(mm_a->va, mm_b->va); 761 } 762 763 static void dump_mmap_table(struct tee_mmap_region *memory_map) 764 { 765 struct tee_mmap_region *map; 766 767 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 768 vaddr_t __maybe_unused vstart; 769 770 vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1)); 771 DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA 772 " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)", 773 teecore_memtype_name(map->type), vstart, 774 vstart + map->size - 1, map->pa, 775 (paddr_t)(map->pa + map->size - 1), map->size, 776 map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir"); 777 } 778 } 779 780 #if DEBUG_XLAT_TABLE 781 782 static void dump_xlat_table(vaddr_t va, unsigned int level) 783 { 784 struct core_mmu_table_info tbl_info; 785 unsigned int idx = 0; 786 paddr_t pa; 787 uint32_t attr; 788 789 core_mmu_find_table(NULL, va, level, &tbl_info); 790 va = tbl_info.va_base; 791 for (idx = 0; idx < tbl_info.num_entries; idx++) { 792 core_mmu_get_entry(&tbl_info, idx, &pa, &attr); 793 if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) { 794 const char *security_bit = ""; 795 796 if (core_mmu_entry_have_security_bit(attr)) { 797 if (attr & TEE_MATTR_SECURE) 798 security_bit = "S"; 799 else 800 security_bit = "NS"; 801 } 802 803 if (attr & TEE_MATTR_TABLE) { 804 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 805 " TBL:0x%010" PRIxPA " %s", 806 level * 2, "", level, va, pa, 807 security_bit); 808 dump_xlat_table(va, level + 1); 809 } else if (attr) { 810 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 811 " PA:0x%010" PRIxPA " %s-%s-%s-%s", 812 level * 2, "", level, va, pa, 813 mattr_is_cached(attr) ? "MEM" : 814 "DEV", 815 attr & TEE_MATTR_PW ? "RW" : "RO", 816 attr & TEE_MATTR_PX ? "X " : "XN", 817 security_bit); 818 } else { 819 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 820 " INVALID\n", 821 level * 2, "", level, va); 822 } 823 } 824 va += BIT64(tbl_info.shift); 825 } 826 } 827 828 #else 829 830 static void dump_xlat_table(vaddr_t va __unused, int level __unused) 831 { 832 } 833 834 #endif 835 836 /* 837 * Reserves virtual memory space for pager usage. 838 * 839 * From the start of the first memory used by the link script + 840 * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty 841 * mapping for pager usage. This adds translation tables as needed for the 842 * pager to operate. 843 */ 844 static void add_pager_vaspace(struct tee_mmap_region *mmap, size_t num_elems, 845 size_t *last) 846 { 847 paddr_t begin = 0; 848 paddr_t end = 0; 849 size_t size = 0; 850 size_t pos = 0; 851 size_t n = 0; 852 853 if (*last >= (num_elems - 1)) { 854 EMSG("Out of entries (%zu) in memory map", num_elems); 855 panic(); 856 } 857 858 for (n = 0; !core_mmap_is_end_of_table(mmap + n); n++) { 859 if (map_is_tee_ram(mmap + n)) { 860 if (!begin) 861 begin = mmap[n].pa; 862 pos = n + 1; 863 } 864 } 865 866 end = mmap[pos - 1].pa + mmap[pos - 1].size; 867 size = TEE_RAM_VA_SIZE - (end - begin); 868 if (!size) 869 return; 870 871 assert(pos <= *last); 872 memmove(mmap + pos + 1, mmap + pos, 873 sizeof(struct tee_mmap_region) * (*last - pos)); 874 (*last)++; 875 memset(mmap + pos, 0, sizeof(mmap[0])); 876 mmap[pos].type = MEM_AREA_PAGER_VASPACE; 877 mmap[pos].va = 0; 878 mmap[pos].size = size; 879 mmap[pos].region_size = SMALL_PAGE_SIZE; 880 mmap[pos].attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE); 881 } 882 883 static void check_sec_nsec_mem_config(void) 884 { 885 size_t n = 0; 886 887 for (n = 0; n < ARRAY_SIZE(secure_only); n++) { 888 if (pbuf_intersects(nsec_shared, secure_only[n].paddr, 889 secure_only[n].size)) 890 panic("Invalid memory access config: sec/nsec"); 891 } 892 } 893 894 static size_t collect_mem_ranges(struct tee_mmap_region *memory_map, 895 size_t num_elems) 896 { 897 const struct core_mmu_phys_mem *mem = NULL; 898 size_t last = 0; 899 900 901 #define ADD_PHYS_MEM(_type, _addr, _size) \ 902 add_phys_mem(memory_map, num_elems, #_addr, (_type), \ 903 (_addr), (_size), &last) 904 905 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) { 906 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, TEE_RAM_START, 907 VCORE_UNPG_RX_PA - TEE_RAM_START); 908 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA, 909 VCORE_UNPG_RX_SZ); 910 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA, 911 VCORE_UNPG_RO_SZ); 912 913 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 914 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA, 915 VCORE_UNPG_RW_SZ); 916 ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA, 917 VCORE_NEX_RW_SZ); 918 } else { 919 ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA, 920 VCORE_UNPG_RW_SZ); 921 } 922 923 if (IS_ENABLED(CFG_WITH_PAGER)) { 924 ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA, 925 VCORE_INIT_RX_SZ); 926 ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA, 927 VCORE_INIT_RO_SZ); 928 } 929 } else { 930 ADD_PHYS_MEM(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE); 931 } 932 933 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 934 ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, TRUSTED_DRAM_BASE, 935 TRUSTED_DRAM_SIZE); 936 } else { 937 /* 938 * Every guest will have own TA RAM if virtualization 939 * support is enabled. 940 */ 941 ADD_PHYS_MEM(MEM_AREA_TA_RAM, TA_RAM_START, TA_RAM_SIZE); 942 } 943 944 if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS) && 945 IS_ENABLED(CFG_WITH_PAGER)) { 946 /* 947 * Asan ram is part of MEM_AREA_TEE_RAM_RW when pager is 948 * disabled. 949 */ 950 ADD_PHYS_MEM(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ); 951 } 952 953 #undef ADD_PHYS_MEM 954 955 for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) { 956 /* Only unmapped virtual range may have a null phys addr */ 957 assert(mem->addr || !core_mmu_type_to_attr(mem->type)); 958 959 add_phys_mem(memory_map, num_elems, mem->name, mem->type, 960 mem->addr, mem->size, &last); 961 } 962 963 if (IS_ENABLED(CFG_SECURE_DATA_PATH)) 964 verify_special_mem_areas(memory_map, num_elems, 965 phys_sdp_mem_begin, 966 phys_sdp_mem_end, "SDP"); 967 968 add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE, 969 CFG_RESERVED_VASPACE_SIZE, &last); 970 971 add_va_space(memory_map, num_elems, MEM_AREA_SHM_VASPACE, 972 SHM_VASPACE_SIZE, &last); 973 974 memory_map[last].type = MEM_AREA_END; 975 976 return last; 977 } 978 979 static void assign_mem_granularity(struct tee_mmap_region *memory_map) 980 { 981 struct tee_mmap_region *map = NULL; 982 983 /* 984 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses 985 * SMALL_PAGE_SIZE. 986 */ 987 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 988 paddr_t mask = map->pa | map->size; 989 990 if (!(mask & CORE_MMU_PGDIR_MASK)) 991 map->region_size = CORE_MMU_PGDIR_SIZE; 992 else if (!(mask & SMALL_PAGE_MASK)) 993 map->region_size = SMALL_PAGE_SIZE; 994 else 995 panic("Impossible memory alignment"); 996 997 if (map_is_tee_ram(map)) 998 map->region_size = SMALL_PAGE_SIZE; 999 } 1000 } 1001 1002 static bool place_tee_ram_at_top(paddr_t paddr) 1003 { 1004 return paddr > BIT64(core_mmu_get_va_width()) / 2; 1005 } 1006 1007 /* 1008 * MMU arch driver shall override this function if it helps 1009 * optimizing the memory footprint of the address translation tables. 1010 */ 1011 bool __weak core_mmu_prefer_tee_ram_at_top(paddr_t paddr) 1012 { 1013 return place_tee_ram_at_top(paddr); 1014 } 1015 1016 static bool assign_mem_va_dir(vaddr_t tee_ram_va, 1017 struct tee_mmap_region *memory_map, 1018 bool tee_ram_at_top) 1019 { 1020 struct tee_mmap_region *map = NULL; 1021 vaddr_t va = 0; 1022 bool va_is_secure = true; 1023 1024 /* 1025 * tee_ram_va might equals 0 when CFG_CORE_ASLR=y. 1026 * 0 is by design an invalid va, so return false directly. 1027 */ 1028 if (!tee_ram_va) 1029 return false; 1030 1031 /* Clear eventual previous assignments */ 1032 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) 1033 map->va = 0; 1034 1035 /* 1036 * TEE RAM regions are always aligned with region_size. 1037 * 1038 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here 1039 * since it handles virtual memory which covers the part of the ELF 1040 * that cannot fit directly into memory. 1041 */ 1042 va = tee_ram_va; 1043 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 1044 if (map_is_tee_ram(map) || 1045 map->type == MEM_AREA_PAGER_VASPACE) { 1046 assert(!(va & (map->region_size - 1))); 1047 assert(!(map->size & (map->region_size - 1))); 1048 map->va = va; 1049 if (ADD_OVERFLOW(va, map->size, &va)) 1050 return false; 1051 if (va >= BIT64(core_mmu_get_va_width())) 1052 return false; 1053 } 1054 } 1055 1056 if (tee_ram_at_top) { 1057 /* 1058 * Map non-tee ram regions at addresses lower than the tee 1059 * ram region. 1060 */ 1061 va = tee_ram_va; 1062 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 1063 map->attr = core_mmu_type_to_attr(map->type); 1064 if (map->va) 1065 continue; 1066 1067 if (!IS_ENABLED(CFG_WITH_LPAE) && 1068 va_is_secure != map_is_secure(map)) { 1069 va_is_secure = !va_is_secure; 1070 va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE); 1071 } 1072 1073 if (SUB_OVERFLOW(va, map->size, &va)) 1074 return false; 1075 va = ROUNDDOWN(va, map->region_size); 1076 /* 1077 * Make sure that va is aligned with pa for 1078 * efficient pgdir mapping. Basically pa & 1079 * pgdir_mask should be == va & pgdir_mask 1080 */ 1081 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 1082 if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va)) 1083 return false; 1084 va += (map->pa - va) & CORE_MMU_PGDIR_MASK; 1085 } 1086 map->va = va; 1087 } 1088 } else { 1089 /* 1090 * Map non-tee ram regions at addresses higher than the tee 1091 * ram region. 1092 */ 1093 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 1094 map->attr = core_mmu_type_to_attr(map->type); 1095 if (map->va) 1096 continue; 1097 1098 if (!IS_ENABLED(CFG_WITH_LPAE) && 1099 va_is_secure != map_is_secure(map)) { 1100 va_is_secure = !va_is_secure; 1101 if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, 1102 &va)) 1103 return false; 1104 } 1105 1106 if (ROUNDUP_OVERFLOW(va, map->region_size, &va)) 1107 return false; 1108 /* 1109 * Make sure that va is aligned with pa for 1110 * efficient pgdir mapping. Basically pa & 1111 * pgdir_mask should be == va & pgdir_mask 1112 */ 1113 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 1114 vaddr_t offs = (map->pa - va) & 1115 CORE_MMU_PGDIR_MASK; 1116 1117 if (ADD_OVERFLOW(va, offs, &va)) 1118 return false; 1119 } 1120 1121 map->va = va; 1122 if (ADD_OVERFLOW(va, map->size, &va)) 1123 return false; 1124 if (va >= BIT64(core_mmu_get_va_width())) 1125 return false; 1126 } 1127 } 1128 1129 return true; 1130 } 1131 1132 static bool assign_mem_va(vaddr_t tee_ram_va, 1133 struct tee_mmap_region *memory_map) 1134 { 1135 bool tee_ram_at_top = place_tee_ram_at_top(tee_ram_va); 1136 1137 /* 1138 * Check that we're not overlapping with the user VA range. 1139 */ 1140 if (IS_ENABLED(CFG_WITH_LPAE)) { 1141 /* 1142 * User VA range is supposed to be defined after these 1143 * mappings have been established. 1144 */ 1145 assert(!core_mmu_user_va_range_is_defined()); 1146 } else { 1147 vaddr_t user_va_base = 0; 1148 size_t user_va_size = 0; 1149 1150 assert(core_mmu_user_va_range_is_defined()); 1151 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 1152 if (tee_ram_va < (user_va_base + user_va_size)) 1153 return false; 1154 } 1155 1156 if (IS_ENABLED(CFG_WITH_PAGER)) { 1157 bool prefered_dir = core_mmu_prefer_tee_ram_at_top(tee_ram_va); 1158 1159 /* Try whole mapping covered by a single base xlat entry */ 1160 if (prefered_dir != tee_ram_at_top && 1161 assign_mem_va_dir(tee_ram_va, memory_map, prefered_dir)) 1162 return true; 1163 } 1164 1165 return assign_mem_va_dir(tee_ram_va, memory_map, tee_ram_at_top); 1166 } 1167 1168 static int cmp_init_mem_map(const void *a, const void *b) 1169 { 1170 const struct tee_mmap_region *mm_a = a; 1171 const struct tee_mmap_region *mm_b = b; 1172 int rc = 0; 1173 1174 rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size); 1175 if (!rc) 1176 rc = CMP_TRILEAN(mm_a->pa, mm_b->pa); 1177 /* 1178 * 32bit MMU descriptors cannot mix secure and non-secure mapping in 1179 * the same level2 table. Hence sort secure mapping from non-secure 1180 * mapping. 1181 */ 1182 if (!rc && !IS_ENABLED(CFG_WITH_LPAE)) 1183 rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b)); 1184 1185 return rc; 1186 } 1187 1188 static bool mem_map_add_id_map(struct tee_mmap_region *memory_map, 1189 size_t num_elems, size_t *last, 1190 vaddr_t id_map_start, vaddr_t id_map_end) 1191 { 1192 struct tee_mmap_region *map = NULL; 1193 vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE); 1194 vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE); 1195 size_t len = end - start; 1196 1197 if (*last >= num_elems - 1) { 1198 EMSG("Out of entries (%zu) in memory map", num_elems); 1199 panic(); 1200 } 1201 1202 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) 1203 if (core_is_buffer_intersect(map->va, map->size, start, len)) 1204 return false; 1205 1206 *map = (struct tee_mmap_region){ 1207 .type = MEM_AREA_IDENTITY_MAP_RX, 1208 /* 1209 * Could use CORE_MMU_PGDIR_SIZE to potentially save a 1210 * translation table, at the increased risk of clashes with 1211 * the rest of the memory map. 1212 */ 1213 .region_size = SMALL_PAGE_SIZE, 1214 .pa = start, 1215 .va = start, 1216 .size = len, 1217 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1218 }; 1219 1220 (*last)++; 1221 1222 return true; 1223 } 1224 1225 static unsigned long init_mem_map(struct tee_mmap_region *memory_map, 1226 size_t num_elems, unsigned long seed) 1227 { 1228 /* 1229 * @id_map_start and @id_map_end describes a physical memory range 1230 * that must be mapped Read-Only eXecutable at identical virtual 1231 * addresses. 1232 */ 1233 vaddr_t id_map_start = (vaddr_t)__identity_map_init_start; 1234 vaddr_t id_map_end = (vaddr_t)__identity_map_init_end; 1235 unsigned long offs = 0; 1236 size_t last = 0; 1237 1238 last = collect_mem_ranges(memory_map, num_elems); 1239 assign_mem_granularity(memory_map); 1240 1241 /* 1242 * To ease mapping and lower use of xlat tables, sort mapping 1243 * description moving small-page regions after the pgdir regions. 1244 */ 1245 qsort(memory_map, last, sizeof(struct tee_mmap_region), 1246 cmp_init_mem_map); 1247 1248 add_pager_vaspace(memory_map, num_elems, &last); 1249 if (IS_ENABLED(CFG_CORE_ASLR) && seed) { 1250 vaddr_t base_addr = TEE_RAM_START + seed; 1251 const unsigned int va_width = core_mmu_get_va_width(); 1252 const vaddr_t va_mask = GENMASK_64(va_width - 1, 1253 SMALL_PAGE_SHIFT); 1254 vaddr_t ba = base_addr; 1255 size_t n = 0; 1256 1257 for (n = 0; n < 3; n++) { 1258 if (n) 1259 ba = base_addr ^ BIT64(va_width - n); 1260 ba &= va_mask; 1261 if (assign_mem_va(ba, memory_map) && 1262 mem_map_add_id_map(memory_map, num_elems, &last, 1263 id_map_start, id_map_end)) { 1264 offs = ba - TEE_RAM_START; 1265 DMSG("Mapping core at %#"PRIxVA" offs %#lx", 1266 ba, offs); 1267 goto out; 1268 } else { 1269 DMSG("Failed to map core at %#"PRIxVA, ba); 1270 } 1271 } 1272 EMSG("Failed to map core with seed %#lx", seed); 1273 } 1274 1275 if (!assign_mem_va(TEE_RAM_START, memory_map)) 1276 panic(); 1277 1278 out: 1279 qsort(memory_map, last, sizeof(struct tee_mmap_region), 1280 cmp_mmap_by_lower_va); 1281 1282 dump_mmap_table(memory_map); 1283 1284 return offs; 1285 } 1286 1287 static void check_mem_map(struct tee_mmap_region *map) 1288 { 1289 struct tee_mmap_region *m = NULL; 1290 1291 for (m = map; !core_mmap_is_end_of_table(m); m++) { 1292 switch (m->type) { 1293 case MEM_AREA_TEE_RAM: 1294 case MEM_AREA_TEE_RAM_RX: 1295 case MEM_AREA_TEE_RAM_RO: 1296 case MEM_AREA_TEE_RAM_RW: 1297 case MEM_AREA_INIT_RAM_RX: 1298 case MEM_AREA_INIT_RAM_RO: 1299 case MEM_AREA_NEX_RAM_RW: 1300 case MEM_AREA_NEX_RAM_RO: 1301 case MEM_AREA_IDENTITY_MAP_RX: 1302 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1303 panic("TEE_RAM can't fit in secure_only"); 1304 break; 1305 case MEM_AREA_TA_RAM: 1306 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1307 panic("TA_RAM can't fit in secure_only"); 1308 break; 1309 case MEM_AREA_NSEC_SHM: 1310 if (!pbuf_is_inside(nsec_shared, m->pa, m->size)) 1311 panic("NS_SHM can't fit in nsec_shared"); 1312 break; 1313 case MEM_AREA_SEC_RAM_OVERALL: 1314 case MEM_AREA_TEE_COHERENT: 1315 case MEM_AREA_TEE_ASAN: 1316 case MEM_AREA_IO_SEC: 1317 case MEM_AREA_IO_NSEC: 1318 case MEM_AREA_EXT_DT: 1319 case MEM_AREA_RAM_SEC: 1320 case MEM_AREA_RAM_NSEC: 1321 case MEM_AREA_RES_VASPACE: 1322 case MEM_AREA_SHM_VASPACE: 1323 case MEM_AREA_PAGER_VASPACE: 1324 break; 1325 default: 1326 EMSG("Uhandled memtype %d", m->type); 1327 panic(); 1328 } 1329 } 1330 } 1331 1332 static struct tee_mmap_region *get_tmp_mmap(void) 1333 { 1334 struct tee_mmap_region *tmp_mmap = (void *)__heap1_start; 1335 1336 #ifdef CFG_WITH_PAGER 1337 if (__heap1_end - __heap1_start < (ptrdiff_t)sizeof(static_memory_map)) 1338 tmp_mmap = (void *)__heap2_start; 1339 #endif 1340 1341 memset(tmp_mmap, 0, sizeof(static_memory_map)); 1342 1343 return tmp_mmap; 1344 } 1345 1346 /* 1347 * core_init_mmu_map() - init tee core default memory mapping 1348 * 1349 * This routine sets the static default TEE core mapping. If @seed is > 0 1350 * and configured with CFG_CORE_ASLR it will map tee core at a location 1351 * based on the seed and return the offset from the link address. 1352 * 1353 * If an error happened: core_init_mmu_map is expected to panic. 1354 * 1355 * Note: this function is weak just to make it possible to exclude it from 1356 * the unpaged area. 1357 */ 1358 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg) 1359 { 1360 #ifndef CFG_NS_VIRTUALIZATION 1361 vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE); 1362 #else 1363 vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start, 1364 SMALL_PAGE_SIZE); 1365 #endif 1366 vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start; 1367 struct tee_mmap_region *tmp_mmap = get_tmp_mmap(); 1368 unsigned long offs = 0; 1369 1370 check_sec_nsec_mem_config(); 1371 1372 /* 1373 * Add a entry covering the translation tables which will be 1374 * involved in some virt_to_phys() and phys_to_virt() conversions. 1375 */ 1376 static_memory_map[0] = (struct tee_mmap_region){ 1377 .type = MEM_AREA_TEE_RAM, 1378 .region_size = SMALL_PAGE_SIZE, 1379 .pa = start, 1380 .va = start, 1381 .size = len, 1382 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1383 }; 1384 1385 COMPILE_TIME_ASSERT(CFG_MMAP_REGIONS >= 13); 1386 offs = init_mem_map(tmp_mmap, ARRAY_SIZE(static_memory_map), seed); 1387 1388 check_mem_map(tmp_mmap); 1389 core_init_mmu(tmp_mmap); 1390 dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL); 1391 core_init_mmu_regs(cfg); 1392 cfg->map_offset = offs; 1393 memcpy(static_memory_map, tmp_mmap, sizeof(static_memory_map)); 1394 } 1395 1396 bool core_mmu_mattr_is_ok(uint32_t mattr) 1397 { 1398 /* 1399 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and 1400 * core_mmu_v7.c:mattr_to_texcb 1401 */ 1402 1403 switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) { 1404 case TEE_MATTR_MEM_TYPE_DEV: 1405 case TEE_MATTR_MEM_TYPE_STRONGLY_O: 1406 case TEE_MATTR_MEM_TYPE_CACHED: 1407 case TEE_MATTR_MEM_TYPE_TAGGED: 1408 return true; 1409 default: 1410 return false; 1411 } 1412 } 1413 1414 /* 1415 * test attributes of target physical buffer 1416 * 1417 * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT). 1418 * 1419 */ 1420 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len) 1421 { 1422 struct tee_mmap_region *map; 1423 1424 /* Empty buffers complies with anything */ 1425 if (len == 0) 1426 return true; 1427 1428 switch (attr) { 1429 case CORE_MEM_SEC: 1430 return pbuf_is_inside(secure_only, pbuf, len); 1431 case CORE_MEM_NON_SEC: 1432 return pbuf_is_inside(nsec_shared, pbuf, len) || 1433 pbuf_is_nsec_ddr(pbuf, len); 1434 case CORE_MEM_TEE_RAM: 1435 return core_is_buffer_inside(pbuf, len, TEE_RAM_START, 1436 TEE_RAM_PH_SIZE); 1437 case CORE_MEM_TA_RAM: 1438 return core_is_buffer_inside(pbuf, len, TA_RAM_START, 1439 TA_RAM_SIZE); 1440 #ifdef CFG_CORE_RESERVED_SHM 1441 case CORE_MEM_NSEC_SHM: 1442 return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START, 1443 TEE_SHMEM_SIZE); 1444 #endif 1445 case CORE_MEM_SDP_MEM: 1446 return pbuf_is_sdp_mem(pbuf, len); 1447 case CORE_MEM_CACHED: 1448 map = find_map_by_pa(pbuf); 1449 if (!map || !pbuf_inside_map_area(pbuf, len, map)) 1450 return false; 1451 return mattr_is_cached(map->attr); 1452 default: 1453 return false; 1454 } 1455 } 1456 1457 /* test attributes of target virtual buffer (in core mapping) */ 1458 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len) 1459 { 1460 paddr_t p; 1461 1462 /* Empty buffers complies with anything */ 1463 if (len == 0) 1464 return true; 1465 1466 p = virt_to_phys((void *)vbuf); 1467 if (!p) 1468 return false; 1469 1470 return core_pbuf_is(attr, p, len); 1471 } 1472 1473 /* core_va2pa - teecore exported service */ 1474 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa) 1475 { 1476 struct tee_mmap_region *map; 1477 1478 map = find_map_by_va(va); 1479 if (!va_is_in_map(map, (vaddr_t)va)) 1480 return -1; 1481 1482 /* 1483 * We can calculate PA for static map. Virtual address ranges 1484 * reserved to core dynamic mapping return a 'match' (return 0;) 1485 * together with an invalid null physical address. 1486 */ 1487 if (map->pa) 1488 *pa = map->pa + (vaddr_t)va - map->va; 1489 else 1490 *pa = 0; 1491 1492 return 0; 1493 } 1494 1495 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len) 1496 { 1497 if (!pa_is_in_map(map, pa, len)) 1498 return NULL; 1499 1500 return (void *)(vaddr_t)(map->va + pa - map->pa); 1501 } 1502 1503 /* 1504 * teecore gets some memory area definitions 1505 */ 1506 void core_mmu_get_mem_by_type(unsigned int type, vaddr_t *s, vaddr_t *e) 1507 { 1508 struct tee_mmap_region *map = find_map_by_type(type); 1509 1510 if (map) { 1511 *s = map->va; 1512 *e = map->va + map->size; 1513 } else { 1514 *s = 0; 1515 *e = 0; 1516 } 1517 } 1518 1519 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa) 1520 { 1521 struct tee_mmap_region *map = find_map_by_pa(pa); 1522 1523 if (!map) 1524 return MEM_AREA_MAXTYPE; 1525 return map->type; 1526 } 1527 1528 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1529 paddr_t pa, uint32_t attr) 1530 { 1531 assert(idx < tbl_info->num_entries); 1532 core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level, 1533 idx, pa, attr); 1534 } 1535 1536 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1537 paddr_t *pa, uint32_t *attr) 1538 { 1539 assert(idx < tbl_info->num_entries); 1540 core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level, 1541 idx, pa, attr); 1542 } 1543 1544 static void clear_region(struct core_mmu_table_info *tbl_info, 1545 struct tee_mmap_region *region) 1546 { 1547 unsigned int end = 0; 1548 unsigned int idx = 0; 1549 1550 /* va, len and pa should be block aligned */ 1551 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1552 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1553 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1554 1555 idx = core_mmu_va2idx(tbl_info, region->va); 1556 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1557 1558 while (idx < end) { 1559 core_mmu_set_entry(tbl_info, idx, 0, 0); 1560 idx++; 1561 } 1562 } 1563 1564 static void set_region(struct core_mmu_table_info *tbl_info, 1565 struct tee_mmap_region *region) 1566 { 1567 unsigned int end; 1568 unsigned int idx; 1569 paddr_t pa; 1570 1571 /* va, len and pa should be block aligned */ 1572 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1573 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1574 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1575 1576 idx = core_mmu_va2idx(tbl_info, region->va); 1577 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1578 pa = region->pa; 1579 1580 while (idx < end) { 1581 core_mmu_set_entry(tbl_info, idx, pa, region->attr); 1582 idx++; 1583 pa += BIT64(tbl_info->shift); 1584 } 1585 } 1586 1587 static void set_pg_region(struct core_mmu_table_info *dir_info, 1588 struct vm_region *region, struct pgt **pgt, 1589 struct core_mmu_table_info *pg_info) 1590 { 1591 struct tee_mmap_region r = { 1592 .va = region->va, 1593 .size = region->size, 1594 .attr = region->attr, 1595 }; 1596 vaddr_t end = r.va + r.size; 1597 uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE; 1598 1599 while (r.va < end) { 1600 if (!pg_info->table || 1601 r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) { 1602 /* 1603 * We're assigning a new translation table. 1604 */ 1605 unsigned int idx; 1606 1607 /* Virtual addresses must grow */ 1608 assert(r.va > pg_info->va_base); 1609 1610 idx = core_mmu_va2idx(dir_info, r.va); 1611 pg_info->va_base = core_mmu_idx2va(dir_info, idx); 1612 1613 /* 1614 * Advance pgt to va_base, note that we may need to 1615 * skip multiple page tables if there are large 1616 * holes in the vm map. 1617 */ 1618 while ((*pgt)->vabase < pg_info->va_base) { 1619 *pgt = SLIST_NEXT(*pgt, link); 1620 /* We should have allocated enough */ 1621 assert(*pgt); 1622 } 1623 assert((*pgt)->vabase == pg_info->va_base); 1624 pg_info->table = (*pgt)->tbl; 1625 1626 core_mmu_set_entry(dir_info, idx, 1627 virt_to_phys(pg_info->table), 1628 pgt_attr); 1629 } 1630 1631 r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base), 1632 end - r.va); 1633 1634 if (!(*pgt)->populated && !mobj_is_paged(region->mobj)) { 1635 size_t granule = BIT(pg_info->shift); 1636 size_t offset = r.va - region->va + region->offset; 1637 1638 r.size = MIN(r.size, 1639 mobj_get_phys_granule(region->mobj)); 1640 r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE); 1641 1642 if (mobj_get_pa(region->mobj, offset, granule, 1643 &r.pa) != TEE_SUCCESS) 1644 panic("Failed to get PA of unpaged mobj"); 1645 set_region(pg_info, &r); 1646 } 1647 r.va += r.size; 1648 } 1649 } 1650 1651 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr, 1652 size_t size_left, paddr_t block_size, 1653 struct tee_mmap_region *mm __maybe_unused) 1654 { 1655 /* VA and PA are aligned to block size at current level */ 1656 if ((vaddr | paddr) & (block_size - 1)) 1657 return false; 1658 1659 /* Remainder fits into block at current level */ 1660 if (size_left < block_size) 1661 return false; 1662 1663 #ifdef CFG_WITH_PAGER 1664 /* 1665 * If pager is enabled, we need to map tee ram 1666 * regions with small pages only 1667 */ 1668 if (map_is_tee_ram(mm) && block_size != SMALL_PAGE_SIZE) 1669 return false; 1670 #endif 1671 1672 return true; 1673 } 1674 1675 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm) 1676 { 1677 struct core_mmu_table_info tbl_info; 1678 unsigned int idx; 1679 vaddr_t vaddr = mm->va; 1680 paddr_t paddr = mm->pa; 1681 ssize_t size_left = mm->size; 1682 unsigned int level; 1683 bool table_found; 1684 uint32_t old_attr; 1685 1686 assert(!((vaddr | paddr) & SMALL_PAGE_MASK)); 1687 1688 while (size_left > 0) { 1689 level = CORE_MMU_BASE_TABLE_LEVEL; 1690 1691 while (true) { 1692 paddr_t block_size = 0; 1693 1694 assert(level <= CORE_MMU_PGDIR_LEVEL); 1695 1696 table_found = core_mmu_find_table(prtn, vaddr, level, 1697 &tbl_info); 1698 if (!table_found) 1699 panic("can't find table for mapping"); 1700 1701 block_size = BIT64(tbl_info.shift); 1702 1703 idx = core_mmu_va2idx(&tbl_info, vaddr); 1704 if (!can_map_at_level(paddr, vaddr, size_left, 1705 block_size, mm)) { 1706 bool secure = mm->attr & TEE_MATTR_SECURE; 1707 1708 /* 1709 * This part of the region can't be mapped at 1710 * this level. Need to go deeper. 1711 */ 1712 if (!core_mmu_entry_to_finer_grained(&tbl_info, 1713 idx, 1714 secure)) 1715 panic("Can't divide MMU entry"); 1716 level++; 1717 continue; 1718 } 1719 1720 /* We can map part of the region at current level */ 1721 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1722 if (old_attr) 1723 panic("Page is already mapped"); 1724 1725 core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr); 1726 paddr += block_size; 1727 vaddr += block_size; 1728 size_left -= block_size; 1729 1730 break; 1731 } 1732 } 1733 } 1734 1735 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, 1736 enum teecore_memtypes memtype) 1737 { 1738 TEE_Result ret; 1739 struct core_mmu_table_info tbl_info; 1740 struct tee_mmap_region *mm; 1741 unsigned int idx; 1742 uint32_t old_attr; 1743 uint32_t exceptions; 1744 vaddr_t vaddr = vstart; 1745 size_t i; 1746 bool secure; 1747 1748 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 1749 1750 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 1751 1752 if (vaddr & SMALL_PAGE_MASK) 1753 return TEE_ERROR_BAD_PARAMETERS; 1754 1755 exceptions = mmu_lock(); 1756 1757 mm = find_map_by_va((void *)vaddr); 1758 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 1759 panic("VA does not belong to any known mm region"); 1760 1761 if (!core_mmu_is_dynamic_vaspace(mm)) 1762 panic("Trying to map into static region"); 1763 1764 for (i = 0; i < num_pages; i++) { 1765 if (pages[i] & SMALL_PAGE_MASK) { 1766 ret = TEE_ERROR_BAD_PARAMETERS; 1767 goto err; 1768 } 1769 1770 while (true) { 1771 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 1772 &tbl_info)) 1773 panic("Can't find pagetable for vaddr "); 1774 1775 idx = core_mmu_va2idx(&tbl_info, vaddr); 1776 if (tbl_info.shift == SMALL_PAGE_SHIFT) 1777 break; 1778 1779 /* This is supertable. Need to divide it. */ 1780 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 1781 secure)) 1782 panic("Failed to spread pgdir on small tables"); 1783 } 1784 1785 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1786 if (old_attr) 1787 panic("Page is already mapped"); 1788 1789 core_mmu_set_entry(&tbl_info, idx, pages[i], 1790 core_mmu_type_to_attr(memtype)); 1791 vaddr += SMALL_PAGE_SIZE; 1792 } 1793 1794 /* 1795 * Make sure all the changes to translation tables are visible 1796 * before returning. TLB doesn't need to be invalidated as we are 1797 * guaranteed that there's no valid mapping in this range. 1798 */ 1799 core_mmu_table_write_barrier(); 1800 mmu_unlock(exceptions); 1801 1802 return TEE_SUCCESS; 1803 err: 1804 mmu_unlock(exceptions); 1805 1806 if (i) 1807 core_mmu_unmap_pages(vstart, i); 1808 1809 return ret; 1810 } 1811 1812 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart, 1813 size_t num_pages, 1814 enum teecore_memtypes memtype) 1815 { 1816 struct core_mmu_table_info tbl_info = { }; 1817 struct tee_mmap_region *mm = NULL; 1818 unsigned int idx = 0; 1819 uint32_t old_attr = 0; 1820 uint32_t exceptions = 0; 1821 vaddr_t vaddr = vstart; 1822 paddr_t paddr = pstart; 1823 size_t i = 0; 1824 bool secure = false; 1825 1826 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 1827 1828 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 1829 1830 if ((vaddr | paddr) & SMALL_PAGE_MASK) 1831 return TEE_ERROR_BAD_PARAMETERS; 1832 1833 exceptions = mmu_lock(); 1834 1835 mm = find_map_by_va((void *)vaddr); 1836 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 1837 panic("VA does not belong to any known mm region"); 1838 1839 if (!core_mmu_is_dynamic_vaspace(mm)) 1840 panic("Trying to map into static region"); 1841 1842 for (i = 0; i < num_pages; i++) { 1843 while (true) { 1844 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 1845 &tbl_info)) 1846 panic("Can't find pagetable for vaddr "); 1847 1848 idx = core_mmu_va2idx(&tbl_info, vaddr); 1849 if (tbl_info.shift == SMALL_PAGE_SHIFT) 1850 break; 1851 1852 /* This is supertable. Need to divide it. */ 1853 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 1854 secure)) 1855 panic("Failed to spread pgdir on small tables"); 1856 } 1857 1858 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1859 if (old_attr) 1860 panic("Page is already mapped"); 1861 1862 core_mmu_set_entry(&tbl_info, idx, paddr, 1863 core_mmu_type_to_attr(memtype)); 1864 paddr += SMALL_PAGE_SIZE; 1865 vaddr += SMALL_PAGE_SIZE; 1866 } 1867 1868 /* 1869 * Make sure all the changes to translation tables are visible 1870 * before returning. TLB doesn't need to be invalidated as we are 1871 * guaranteed that there's no valid mapping in this range. 1872 */ 1873 core_mmu_table_write_barrier(); 1874 mmu_unlock(exceptions); 1875 1876 return TEE_SUCCESS; 1877 } 1878 1879 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages) 1880 { 1881 struct core_mmu_table_info tbl_info; 1882 struct tee_mmap_region *mm; 1883 size_t i; 1884 unsigned int idx; 1885 uint32_t exceptions; 1886 1887 exceptions = mmu_lock(); 1888 1889 mm = find_map_by_va((void *)vstart); 1890 if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1)) 1891 panic("VA does not belong to any known mm region"); 1892 1893 if (!core_mmu_is_dynamic_vaspace(mm)) 1894 panic("Trying to unmap static region"); 1895 1896 for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) { 1897 if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info)) 1898 panic("Can't find pagetable"); 1899 1900 if (tbl_info.shift != SMALL_PAGE_SHIFT) 1901 panic("Invalid pagetable level"); 1902 1903 idx = core_mmu_va2idx(&tbl_info, vstart); 1904 core_mmu_set_entry(&tbl_info, idx, 0, 0); 1905 } 1906 tlbi_all(); 1907 1908 mmu_unlock(exceptions); 1909 } 1910 1911 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, 1912 struct user_mode_ctx *uctx) 1913 { 1914 struct core_mmu_table_info pg_info = { }; 1915 struct pgt_cache *pgt_cache = &uctx->pgt_cache; 1916 struct pgt *pgt = NULL; 1917 struct pgt *p = NULL; 1918 struct vm_region *r = NULL; 1919 1920 if (TAILQ_EMPTY(&uctx->vm_info.regions)) 1921 return; /* Nothing to map */ 1922 1923 /* 1924 * Allocate all page tables in advance. 1925 */ 1926 pgt_get_all(uctx); 1927 pgt = SLIST_FIRST(pgt_cache); 1928 1929 core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL); 1930 1931 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) 1932 set_pg_region(dir_info, r, &pgt, &pg_info); 1933 /* Record that the translation tables now are populated. */ 1934 SLIST_FOREACH(p, pgt_cache, link) { 1935 p->populated = true; 1936 if (p == pgt) 1937 break; 1938 } 1939 assert(p == pgt); 1940 } 1941 1942 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr, 1943 size_t len) 1944 { 1945 struct core_mmu_table_info tbl_info = { }; 1946 struct tee_mmap_region *res_map = NULL; 1947 struct tee_mmap_region *map = NULL; 1948 paddr_t pa = virt_to_phys(addr); 1949 size_t granule = 0; 1950 ptrdiff_t i = 0; 1951 paddr_t p = 0; 1952 size_t l = 0; 1953 1954 map = find_map_by_type_and_pa(type, pa, len); 1955 if (!map) 1956 return TEE_ERROR_GENERIC; 1957 1958 res_map = find_map_by_type(MEM_AREA_RES_VASPACE); 1959 if (!res_map) 1960 return TEE_ERROR_GENERIC; 1961 if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info)) 1962 return TEE_ERROR_GENERIC; 1963 granule = BIT(tbl_info.shift); 1964 1965 if (map < static_memory_map || 1966 map >= static_memory_map + ARRAY_SIZE(static_memory_map)) 1967 return TEE_ERROR_GENERIC; 1968 i = map - static_memory_map; 1969 1970 /* Check that we have a full match */ 1971 p = ROUNDDOWN(pa, granule); 1972 l = ROUNDUP(len + pa - p, granule); 1973 if (map->pa != p || map->size != l) 1974 return TEE_ERROR_GENERIC; 1975 1976 clear_region(&tbl_info, map); 1977 tlbi_all(); 1978 1979 /* If possible remove the va range from res_map */ 1980 if (res_map->va - map->size == map->va) { 1981 res_map->va -= map->size; 1982 res_map->size += map->size; 1983 } 1984 1985 /* Remove the entry. */ 1986 memmove(map, map + 1, 1987 (ARRAY_SIZE(static_memory_map) - i - 1) * sizeof(*map)); 1988 1989 /* Clear the last new entry in case it was used */ 1990 memset(static_memory_map + ARRAY_SIZE(static_memory_map) - 1, 1991 0, sizeof(*map)); 1992 1993 return TEE_SUCCESS; 1994 } 1995 1996 struct tee_mmap_region * 1997 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len) 1998 { 1999 struct tee_mmap_region *map = NULL; 2000 struct tee_mmap_region *map_found = NULL; 2001 2002 if (!len) 2003 return NULL; 2004 2005 for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) { 2006 if (map->type != type) 2007 continue; 2008 2009 if (map_found) 2010 return NULL; 2011 2012 map_found = map; 2013 } 2014 2015 if (!map_found || map_found->size < len) 2016 return NULL; 2017 2018 return map_found; 2019 } 2020 2021 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len) 2022 { 2023 struct core_mmu_table_info tbl_info; 2024 struct tee_mmap_region *map; 2025 size_t n; 2026 size_t granule; 2027 paddr_t p; 2028 size_t l; 2029 2030 if (!len) 2031 return NULL; 2032 2033 if (!core_mmu_check_end_pa(addr, len)) 2034 return NULL; 2035 2036 /* Check if the memory is already mapped */ 2037 map = find_map_by_type_and_pa(type, addr, len); 2038 if (map && pbuf_inside_map_area(addr, len, map)) 2039 return (void *)(vaddr_t)(map->va + addr - map->pa); 2040 2041 /* Find the reserved va space used for late mappings */ 2042 map = find_map_by_type(MEM_AREA_RES_VASPACE); 2043 if (!map) 2044 return NULL; 2045 2046 if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info)) 2047 return NULL; 2048 2049 granule = BIT64(tbl_info.shift); 2050 p = ROUNDDOWN(addr, granule); 2051 l = ROUNDUP(len + addr - p, granule); 2052 2053 /* Ban overflowing virtual addresses */ 2054 if (map->size < l) 2055 return NULL; 2056 2057 /* 2058 * Something is wrong, we can't fit the va range into the selected 2059 * table. The reserved va range is possibly missaligned with 2060 * granule. 2061 */ 2062 if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries) 2063 return NULL; 2064 2065 /* Find end of the memory map */ 2066 n = 0; 2067 while (!core_mmap_is_end_of_table(static_memory_map + n)) 2068 n++; 2069 2070 if (n < (ARRAY_SIZE(static_memory_map) - 1)) { 2071 /* There's room for another entry */ 2072 static_memory_map[n].va = map->va; 2073 static_memory_map[n].size = l; 2074 static_memory_map[n + 1].type = MEM_AREA_END; 2075 map->va += l; 2076 map->size -= l; 2077 map = static_memory_map + n; 2078 } else { 2079 /* 2080 * There isn't room for another entry, steal the reserved 2081 * entry as it's not useful for anything else any longer. 2082 */ 2083 map->size = l; 2084 } 2085 map->type = type; 2086 map->region_size = granule; 2087 map->attr = core_mmu_type_to_attr(type); 2088 map->pa = p; 2089 2090 set_region(&tbl_info, map); 2091 2092 /* Make sure the new entry is visible before continuing. */ 2093 core_mmu_table_write_barrier(); 2094 2095 return (void *)(vaddr_t)(map->va + addr - map->pa); 2096 } 2097 2098 #ifdef CFG_WITH_PAGER 2099 static vaddr_t get_linear_map_end_va(void) 2100 { 2101 /* this is synced with the generic linker file kern.ld.S */ 2102 return (vaddr_t)__heap2_end; 2103 } 2104 2105 static paddr_t get_linear_map_end_pa(void) 2106 { 2107 return get_linear_map_end_va() - boot_mmu_config.map_offset; 2108 } 2109 #endif 2110 2111 #if defined(CFG_TEE_CORE_DEBUG) 2112 static void check_pa_matches_va(void *va, paddr_t pa) 2113 { 2114 TEE_Result res = TEE_ERROR_GENERIC; 2115 vaddr_t v = (vaddr_t)va; 2116 paddr_t p = 0; 2117 struct core_mmu_table_info ti __maybe_unused = { }; 2118 2119 if (core_mmu_user_va_range_is_defined()) { 2120 vaddr_t user_va_base = 0; 2121 size_t user_va_size = 0; 2122 2123 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 2124 if (v >= user_va_base && 2125 v <= (user_va_base - 1 + user_va_size)) { 2126 if (!core_mmu_user_mapping_is_active()) { 2127 if (pa) 2128 panic("issue in linear address space"); 2129 return; 2130 } 2131 2132 res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx), 2133 va, &p); 2134 if (res == TEE_ERROR_NOT_SUPPORTED) 2135 return; 2136 if (res == TEE_SUCCESS && pa != p) 2137 panic("bad pa"); 2138 if (res != TEE_SUCCESS && pa) 2139 panic("false pa"); 2140 return; 2141 } 2142 } 2143 #ifdef CFG_WITH_PAGER 2144 if (is_unpaged(va)) { 2145 if (v - boot_mmu_config.map_offset != pa) 2146 panic("issue in linear address space"); 2147 return; 2148 } 2149 2150 if (tee_pager_get_table_info(v, &ti)) { 2151 uint32_t a; 2152 2153 /* 2154 * Lookups in the page table managed by the pager is 2155 * dangerous for addresses in the paged area as those pages 2156 * changes all the time. But some ranges are safe, 2157 * rw-locked areas when the page is populated for instance. 2158 */ 2159 core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a); 2160 if (a & TEE_MATTR_VALID_BLOCK) { 2161 paddr_t mask = BIT64(ti.shift) - 1; 2162 2163 p |= v & mask; 2164 if (pa != p) 2165 panic(); 2166 } else { 2167 if (pa) 2168 panic(); 2169 } 2170 return; 2171 } 2172 #endif 2173 2174 if (!core_va2pa_helper(va, &p)) { 2175 /* Verfiy only the static mapping (case non null phys addr) */ 2176 if (p && pa != p) { 2177 DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA, 2178 va, p, pa); 2179 panic(); 2180 } 2181 } else { 2182 if (pa) { 2183 DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa); 2184 panic(); 2185 } 2186 } 2187 } 2188 #else 2189 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused) 2190 { 2191 } 2192 #endif 2193 2194 paddr_t virt_to_phys(void *va) 2195 { 2196 paddr_t pa = 0; 2197 2198 if (!arch_va2pa_helper(va, &pa)) 2199 pa = 0; 2200 check_pa_matches_va(va, pa); 2201 return pa; 2202 } 2203 2204 #if defined(CFG_TEE_CORE_DEBUG) 2205 static void check_va_matches_pa(paddr_t pa, void *va) 2206 { 2207 paddr_t p = 0; 2208 2209 if (!va) 2210 return; 2211 2212 p = virt_to_phys(va); 2213 if (p != pa) { 2214 DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa); 2215 panic(); 2216 } 2217 } 2218 #else 2219 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused) 2220 { 2221 } 2222 #endif 2223 2224 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len) 2225 { 2226 if (!core_mmu_user_mapping_is_active()) 2227 return NULL; 2228 2229 return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len); 2230 } 2231 2232 #ifdef CFG_WITH_PAGER 2233 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2234 { 2235 paddr_t end_pa = 0; 2236 2237 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 2238 return NULL; 2239 2240 if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end_pa()) { 2241 if (end_pa > get_linear_map_end_pa()) 2242 return NULL; 2243 return (void *)(vaddr_t)(pa + boot_mmu_config.map_offset); 2244 } 2245 2246 return tee_pager_phys_to_virt(pa, len); 2247 } 2248 #else 2249 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2250 { 2251 struct tee_mmap_region *mmap = NULL; 2252 2253 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len); 2254 if (!mmap) 2255 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len); 2256 if (!mmap) 2257 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len); 2258 if (!mmap) 2259 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len); 2260 if (!mmap) 2261 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len); 2262 if (!mmap) 2263 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len); 2264 /* 2265 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only 2266 * used with pager and not needed here. 2267 */ 2268 return map_pa2va(mmap, pa, len); 2269 } 2270 #endif 2271 2272 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len) 2273 { 2274 void *va = NULL; 2275 2276 switch (m) { 2277 case MEM_AREA_TS_VASPACE: 2278 va = phys_to_virt_ts_vaspace(pa, len); 2279 break; 2280 case MEM_AREA_TEE_RAM: 2281 case MEM_AREA_TEE_RAM_RX: 2282 case MEM_AREA_TEE_RAM_RO: 2283 case MEM_AREA_TEE_RAM_RW: 2284 case MEM_AREA_NEX_RAM_RO: 2285 case MEM_AREA_NEX_RAM_RW: 2286 va = phys_to_virt_tee_ram(pa, len); 2287 break; 2288 case MEM_AREA_SHM_VASPACE: 2289 /* Find VA from PA in dynamic SHM is not yet supported */ 2290 va = NULL; 2291 break; 2292 default: 2293 va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len); 2294 } 2295 if (m != MEM_AREA_SEC_RAM_OVERALL) 2296 check_va_matches_pa(pa, va); 2297 return va; 2298 } 2299 2300 void *phys_to_virt_io(paddr_t pa, size_t len) 2301 { 2302 struct tee_mmap_region *map = NULL; 2303 void *va = NULL; 2304 2305 map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len); 2306 if (!map) 2307 map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len); 2308 if (!map) 2309 return NULL; 2310 va = map_pa2va(map, pa, len); 2311 check_va_matches_pa(pa, va); 2312 return va; 2313 } 2314 2315 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len) 2316 { 2317 if (cpu_mmu_enabled()) 2318 return (vaddr_t)phys_to_virt(pa, type, len); 2319 2320 return (vaddr_t)pa; 2321 } 2322 2323 #ifdef CFG_WITH_PAGER 2324 bool is_unpaged(void *va) 2325 { 2326 vaddr_t v = (vaddr_t)va; 2327 2328 return v >= VCORE_START_VA && v < get_linear_map_end_va(); 2329 } 2330 #else 2331 bool is_unpaged(void *va __unused) 2332 { 2333 return true; 2334 } 2335 #endif 2336 2337 void core_mmu_init_virtualization(void) 2338 { 2339 paddr_t b1 = 0; 2340 paddr_size_t s1 = 0; 2341 2342 static_assert(ARRAY_SIZE(secure_only) <= 2); 2343 if (ARRAY_SIZE(secure_only) == 2) { 2344 b1 = secure_only[1].paddr; 2345 s1 = secure_only[1].size; 2346 } 2347 virt_init_memory(static_memory_map, secure_only[0].paddr, 2348 secure_only[0].size, b1, s1); 2349 } 2350 2351 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len) 2352 { 2353 assert(p->pa); 2354 if (cpu_mmu_enabled()) { 2355 if (!p->va) 2356 p->va = (vaddr_t)phys_to_virt_io(p->pa, len); 2357 assert(p->va); 2358 return p->va; 2359 } 2360 return p->pa; 2361 } 2362 2363 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len) 2364 { 2365 assert(p->pa); 2366 if (cpu_mmu_enabled()) { 2367 if (!p->va) 2368 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC, 2369 len); 2370 assert(p->va); 2371 return p->va; 2372 } 2373 return p->pa; 2374 } 2375 2376 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len) 2377 { 2378 assert(p->pa); 2379 if (cpu_mmu_enabled()) { 2380 if (!p->va) 2381 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC, 2382 len); 2383 assert(p->va); 2384 return p->va; 2385 } 2386 return p->pa; 2387 } 2388 2389 #ifdef CFG_CORE_RESERVED_SHM 2390 static TEE_Result teecore_init_pub_ram(void) 2391 { 2392 vaddr_t s = 0; 2393 vaddr_t e = 0; 2394 2395 /* get virtual addr/size of NSec shared mem allocated from teecore */ 2396 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e); 2397 2398 if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK) 2399 panic("invalid PUB RAM"); 2400 2401 /* extra check: we could rely on core_mmu_get_mem_by_type() */ 2402 if (!tee_vbuf_is_non_sec(s, e - s)) 2403 panic("PUB RAM is not non-secure"); 2404 2405 #ifdef CFG_PL310 2406 /* Allocate statically the l2cc mutex */ 2407 tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s)); 2408 s += sizeof(uint32_t); /* size of a pl310 mutex */ 2409 s = ROUNDUP(s, SMALL_PAGE_SIZE); /* keep required alignment */ 2410 #endif 2411 2412 default_nsec_shm_paddr = virt_to_phys((void *)s); 2413 default_nsec_shm_size = e - s; 2414 2415 return TEE_SUCCESS; 2416 } 2417 early_init(teecore_init_pub_ram); 2418 #endif /*CFG_CORE_RESERVED_SHM*/ 2419 2420 void core_mmu_init_ta_ram(void) 2421 { 2422 vaddr_t s = 0; 2423 vaddr_t e = 0; 2424 paddr_t ps = 0; 2425 size_t size = 0; 2426 2427 /* 2428 * Get virtual addr/size of RAM where TA are loaded/executedNSec 2429 * shared mem allocated from teecore. 2430 */ 2431 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 2432 virt_get_ta_ram(&s, &e); 2433 else 2434 core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e); 2435 2436 ps = virt_to_phys((void *)s); 2437 size = e - s; 2438 2439 if (!ps || (ps & CORE_MMU_USER_CODE_MASK) || 2440 !size || (size & CORE_MMU_USER_CODE_MASK)) 2441 panic("invalid TA RAM"); 2442 2443 /* extra check: we could rely on core_mmu_get_mem_by_type() */ 2444 if (!tee_pbuf_is_sec(ps, size)) 2445 panic("TA RAM is not secure"); 2446 2447 if (!tee_mm_is_empty(&tee_mm_sec_ddr)) 2448 panic("TA RAM pool is not empty"); 2449 2450 /* remove previous config and init TA ddr memory pool */ 2451 tee_mm_final(&tee_mm_sec_ddr); 2452 tee_mm_init(&tee_mm_sec_ddr, ps, size, CORE_MMU_USER_CODE_SHIFT, 2453 TEE_MM_POOL_NO_FLAGS); 2454 } 2455