1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, 2022 Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved. 6 */ 7 8 #include <assert.h> 9 #include <config.h> 10 #include <kernel/boot.h> 11 #include <kernel/dt.h> 12 #include <kernel/linker.h> 13 #include <kernel/panic.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/tee_l2cc_mutex.h> 16 #include <kernel/tee_misc.h> 17 #include <kernel/tlb_helpers.h> 18 #include <kernel/user_mode_ctx.h> 19 #include <kernel/virtualization.h> 20 #include <libfdt.h> 21 #include <mm/core_memprot.h> 22 #include <mm/core_mmu.h> 23 #include <mm/mobj.h> 24 #include <mm/pgt_cache.h> 25 #include <mm/tee_pager.h> 26 #include <mm/vm.h> 27 #include <platform_config.h> 28 #include <string.h> 29 #include <trace.h> 30 #include <util.h> 31 32 #ifndef DEBUG_XLAT_TABLE 33 #define DEBUG_XLAT_TABLE 0 34 #endif 35 36 #define SHM_VASPACE_SIZE (1024 * 1024 * 32) 37 38 /* 39 * These variables are initialized before .bss is cleared. To avoid 40 * resetting them when .bss is cleared we're storing them in .data instead, 41 * even if they initially are zero. 42 */ 43 44 #ifdef CFG_CORE_RESERVED_SHM 45 /* Default NSec shared memory allocated from NSec world */ 46 unsigned long default_nsec_shm_size __nex_bss; 47 unsigned long default_nsec_shm_paddr __nex_bss; 48 #endif 49 50 static struct tee_mmap_region static_memory_map[CFG_MMAP_REGIONS 51 #ifdef CFG_CORE_ASLR 52 + 1 53 #endif 54 + 1] __nex_bss; 55 56 /* Define the platform's memory layout. */ 57 struct memaccess_area { 58 paddr_t paddr; 59 size_t size; 60 }; 61 62 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s } 63 64 static struct memaccess_area secure_only[] __nex_data = { 65 #ifdef TRUSTED_SRAM_BASE 66 MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE), 67 #endif 68 MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE), 69 }; 70 71 static struct memaccess_area nsec_shared[] __nex_data = { 72 #ifdef CFG_CORE_RESERVED_SHM 73 MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE), 74 #endif 75 }; 76 77 #if defined(CFG_SECURE_DATA_PATH) 78 static const char *tz_sdp_match = "linaro,secure-heap"; 79 static struct memaccess_area sec_sdp; 80 #ifdef CFG_TEE_SDP_MEM_BASE 81 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE); 82 #endif 83 #ifdef TEE_SDP_TEST_MEM_BASE 84 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE); 85 #endif 86 #endif 87 88 #ifdef CFG_CORE_RWDATA_NOEXEC 89 register_phys_mem_ul(MEM_AREA_TEE_RAM_RO, TEE_RAM_START, 90 VCORE_UNPG_RX_PA - TEE_RAM_START); 91 register_phys_mem_ul(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA, 92 VCORE_UNPG_RX_SZ_UNSAFE); 93 register_phys_mem_ul(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA, 94 VCORE_UNPG_RO_SZ_UNSAFE); 95 96 #ifdef CFG_VIRTUALIZATION 97 register_phys_mem_ul(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA, 98 VCORE_UNPG_RW_SZ_UNSAFE); 99 register_phys_mem_ul(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA, 100 VCORE_NEX_RW_SZ_UNSAFE); 101 #else 102 register_phys_mem_ul(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA, 103 VCORE_UNPG_RW_SZ_UNSAFE); 104 #endif 105 106 #ifdef CFG_WITH_PAGER 107 register_phys_mem_ul(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA, 108 VCORE_INIT_RX_SZ_UNSAFE); 109 register_phys_mem_ul(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA, 110 VCORE_INIT_RO_SZ_UNSAFE); 111 #endif /*CFG_WITH_PAGER*/ 112 #else /*!CFG_CORE_RWDATA_NOEXEC*/ 113 register_phys_mem(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE); 114 #endif /*!CFG_CORE_RWDATA_NOEXEC*/ 115 116 #ifdef CFG_VIRTUALIZATION 117 register_phys_mem(MEM_AREA_SEC_RAM_OVERALL, TRUSTED_DRAM_BASE, 118 TRUSTED_DRAM_SIZE); 119 #endif 120 121 #if defined(CFG_CORE_SANITIZE_KADDRESS) && defined(CFG_WITH_PAGER) 122 /* Asan ram is part of MEM_AREA_TEE_RAM_RW when pager is disabled */ 123 register_phys_mem_ul(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ); 124 #endif 125 126 #ifndef CFG_VIRTUALIZATION 127 /* Every guest will have own TA RAM if virtualization support is enabled */ 128 register_phys_mem(MEM_AREA_TA_RAM, TA_RAM_START, TA_RAM_SIZE); 129 #endif 130 #ifdef CFG_CORE_RESERVED_SHM 131 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE); 132 #endif 133 134 static unsigned int mmu_spinlock; 135 136 static uint32_t mmu_lock(void) 137 { 138 return cpu_spin_lock_xsave(&mmu_spinlock); 139 } 140 141 static void mmu_unlock(uint32_t exceptions) 142 { 143 cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions); 144 } 145 146 static struct tee_mmap_region *get_memory_map(void) 147 { 148 if (IS_ENABLED(CFG_VIRTUALIZATION)) { 149 struct tee_mmap_region *map = virt_get_memory_map(); 150 151 if (map) 152 return map; 153 } 154 155 return static_memory_map; 156 } 157 158 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen, 159 paddr_t pa, size_t size) 160 { 161 size_t n; 162 163 for (n = 0; n < alen; n++) 164 if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size)) 165 return true; 166 return false; 167 } 168 169 #define pbuf_intersects(a, pa, size) \ 170 _pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size)) 171 172 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen, 173 paddr_t pa, size_t size) 174 { 175 size_t n; 176 177 for (n = 0; n < alen; n++) 178 if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size)) 179 return true; 180 return false; 181 } 182 183 #define pbuf_is_inside(a, pa, size) \ 184 _pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size)) 185 186 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len) 187 { 188 paddr_t end_pa = 0; 189 190 if (!map) 191 return false; 192 193 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 194 return false; 195 196 return (pa >= map->pa && end_pa <= map->pa + map->size - 1); 197 } 198 199 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va) 200 { 201 if (!map) 202 return false; 203 return (va >= map->va && va <= (map->va + map->size - 1)); 204 } 205 206 /* check if target buffer fits in a core default map area */ 207 static bool pbuf_inside_map_area(unsigned long p, size_t l, 208 struct tee_mmap_region *map) 209 { 210 return core_is_buffer_inside(p, l, map->pa, map->size); 211 } 212 213 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type) 214 { 215 struct tee_mmap_region *map; 216 217 for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) 218 if (map->type == type) 219 return map; 220 return NULL; 221 } 222 223 static struct tee_mmap_region * 224 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len) 225 { 226 struct tee_mmap_region *map; 227 228 for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) { 229 if (map->type != type) 230 continue; 231 if (pa_is_in_map(map, pa, len)) 232 return map; 233 } 234 return NULL; 235 } 236 237 static struct tee_mmap_region *find_map_by_va(void *va) 238 { 239 struct tee_mmap_region *map = get_memory_map(); 240 unsigned long a = (unsigned long)va; 241 242 while (!core_mmap_is_end_of_table(map)) { 243 if (a >= map->va && a <= (map->va - 1 + map->size)) 244 return map; 245 map++; 246 } 247 return NULL; 248 } 249 250 static struct tee_mmap_region *find_map_by_pa(unsigned long pa) 251 { 252 struct tee_mmap_region *map = get_memory_map(); 253 254 while (!core_mmap_is_end_of_table(map)) { 255 if (pa >= map->pa && pa <= (map->pa + map->size - 1)) 256 return map; 257 map++; 258 } 259 return NULL; 260 } 261 262 #if defined(CFG_SECURE_DATA_PATH) 263 static bool dtb_get_sdp_region(void) 264 { 265 void *fdt = NULL; 266 int node = 0; 267 int tmp_node = 0; 268 paddr_t tmp_addr = 0; 269 size_t tmp_size = 0; 270 271 if (!IS_ENABLED(CFG_EMBED_DTB)) 272 return false; 273 274 fdt = get_embedded_dt(); 275 if (!fdt) 276 panic("No DTB found"); 277 278 node = fdt_node_offset_by_compatible(fdt, -1, tz_sdp_match); 279 if (node < 0) { 280 DMSG("No %s compatible node found", tz_sdp_match); 281 return false; 282 } 283 tmp_node = node; 284 while (tmp_node >= 0) { 285 tmp_node = fdt_node_offset_by_compatible(fdt, tmp_node, 286 tz_sdp_match); 287 if (tmp_node >= 0) 288 DMSG("Ignore SDP pool node %s, supports only 1 node", 289 fdt_get_name(fdt, tmp_node, NULL)); 290 } 291 292 tmp_addr = _fdt_reg_base_address(fdt, node); 293 if (tmp_addr == DT_INFO_INVALID_REG) { 294 EMSG("%s: Unable to get base addr from DT", tz_sdp_match); 295 return false; 296 } 297 298 tmp_size = _fdt_reg_size(fdt, node); 299 if (tmp_size == DT_INFO_INVALID_REG_SIZE) { 300 EMSG("%s: Unable to get size of base addr from DT", 301 tz_sdp_match); 302 return false; 303 } 304 305 sec_sdp.paddr = tmp_addr; 306 sec_sdp.size = tmp_size; 307 308 return true; 309 } 310 #endif 311 312 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH) 313 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len, 314 const struct core_mmu_phys_mem *start, 315 const struct core_mmu_phys_mem *end) 316 { 317 const struct core_mmu_phys_mem *mem; 318 319 for (mem = start; mem < end; mem++) { 320 if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size)) 321 return true; 322 } 323 324 return false; 325 } 326 #endif 327 328 #ifdef CFG_CORE_DYN_SHM 329 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems, 330 paddr_t pa, size_t size) 331 { 332 struct core_mmu_phys_mem *m = *mem; 333 size_t n = 0; 334 335 while (true) { 336 if (n >= *nelems) { 337 DMSG("No need to carve out %#" PRIxPA " size %#zx", 338 pa, size); 339 return; 340 } 341 if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size)) 342 break; 343 if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size)) 344 panic(); 345 n++; 346 } 347 348 if (pa == m[n].addr && size == m[n].size) { 349 /* Remove this entry */ 350 (*nelems)--; 351 memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n)); 352 m = nex_realloc(m, sizeof(*m) * *nelems); 353 if (!m) 354 panic(); 355 *mem = m; 356 } else if (pa == m[n].addr) { 357 m[n].addr += size; 358 m[n].size -= size; 359 } else if ((pa + size) == (m[n].addr + m[n].size)) { 360 m[n].size -= size; 361 } else { 362 /* Need to split the memory entry */ 363 m = nex_realloc(m, sizeof(*m) * (*nelems + 1)); 364 if (!m) 365 panic(); 366 *mem = m; 367 memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n)); 368 (*nelems)++; 369 m[n].size = pa - m[n].addr; 370 m[n + 1].size -= size + m[n].size; 371 m[n + 1].addr = pa + size; 372 } 373 } 374 375 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start, 376 size_t nelems, 377 struct tee_mmap_region *map) 378 { 379 size_t n; 380 381 for (n = 0; n < nelems; n++) { 382 if (!core_is_buffer_outside(start[n].addr, start[n].size, 383 map->pa, map->size)) { 384 EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ 385 ") overlaps map (type %d %#" PRIxPA ":%#zx)", 386 start[n].addr, start[n].size, 387 map->type, map->pa, map->size); 388 panic(); 389 } 390 } 391 } 392 393 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss; 394 static size_t discovered_nsec_ddr_nelems __nex_bss; 395 396 static int cmp_pmem_by_addr(const void *a, const void *b) 397 { 398 const struct core_mmu_phys_mem *pmem_a = a; 399 const struct core_mmu_phys_mem *pmem_b = b; 400 401 return CMP_TRILEAN(pmem_a->addr, pmem_b->addr); 402 } 403 404 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start, 405 size_t nelems) 406 { 407 struct core_mmu_phys_mem *m = start; 408 size_t num_elems = nelems; 409 struct tee_mmap_region *map = static_memory_map; 410 const struct core_mmu_phys_mem __maybe_unused *pmem; 411 412 assert(!discovered_nsec_ddr_start); 413 assert(m && num_elems); 414 415 qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr); 416 417 /* 418 * Non-secure shared memory and also secure data 419 * path memory are supposed to reside inside 420 * non-secure memory. Since NSEC_SHM and SDP_MEM 421 * are used for a specific purpose make holes for 422 * those memory in the normal non-secure memory. 423 * 424 * This has to be done since for instance QEMU 425 * isn't aware of which memory range in the 426 * non-secure memory is used for NSEC_SHM. 427 */ 428 429 #ifdef CFG_SECURE_DATA_PATH 430 if (dtb_get_sdp_region()) 431 carve_out_phys_mem(&m, &num_elems, sec_sdp.paddr, sec_sdp.size); 432 433 for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++) 434 carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size); 435 #endif 436 437 carve_out_phys_mem(&m, &num_elems, TEE_RAM_START, TEE_RAM_PH_SIZE); 438 carve_out_phys_mem(&m, &num_elems, TA_RAM_START, TA_RAM_SIZE); 439 440 for (map = static_memory_map; !core_mmap_is_end_of_table(map); map++) { 441 switch (map->type) { 442 case MEM_AREA_NSEC_SHM: 443 carve_out_phys_mem(&m, &num_elems, map->pa, map->size); 444 break; 445 case MEM_AREA_EXT_DT: 446 case MEM_AREA_RES_VASPACE: 447 case MEM_AREA_SHM_VASPACE: 448 case MEM_AREA_TS_VASPACE: 449 case MEM_AREA_PAGER_VASPACE: 450 break; 451 default: 452 check_phys_mem_is_outside(m, num_elems, map); 453 } 454 } 455 456 discovered_nsec_ddr_start = m; 457 discovered_nsec_ddr_nelems = num_elems; 458 459 if (!core_mmu_check_end_pa(m[num_elems - 1].addr, 460 m[num_elems - 1].size)) 461 panic(); 462 } 463 464 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start, 465 const struct core_mmu_phys_mem **end) 466 { 467 if (!discovered_nsec_ddr_start) 468 return false; 469 470 *start = discovered_nsec_ddr_start; 471 *end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems; 472 473 return true; 474 } 475 476 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len) 477 { 478 const struct core_mmu_phys_mem *start; 479 const struct core_mmu_phys_mem *end; 480 481 if (!get_discovered_nsec_ddr(&start, &end)) 482 return false; 483 484 return pbuf_is_special_mem(pbuf, len, start, end); 485 } 486 487 bool core_mmu_nsec_ddr_is_defined(void) 488 { 489 const struct core_mmu_phys_mem *start; 490 const struct core_mmu_phys_mem *end; 491 492 if (!get_discovered_nsec_ddr(&start, &end)) 493 return false; 494 495 return start != end; 496 } 497 #else 498 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused) 499 { 500 return false; 501 } 502 #endif /*CFG_CORE_DYN_SHM*/ 503 504 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \ 505 EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \ 506 pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2)) 507 508 #ifdef CFG_SECURE_DATA_PATH 509 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len) 510 { 511 bool is_sdp_mem = false; 512 513 if (sec_sdp.size) 514 is_sdp_mem = core_is_buffer_inside(pbuf, len, sec_sdp.paddr, 515 sec_sdp.size); 516 517 if (!is_sdp_mem) 518 is_sdp_mem = pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin, 519 phys_sdp_mem_end); 520 521 return is_sdp_mem; 522 } 523 524 static struct mobj *core_sdp_mem_alloc_mobj(paddr_t pa, size_t size) 525 { 526 struct mobj *mobj = mobj_phys_alloc(pa, size, TEE_MATTR_MEM_TYPE_CACHED, 527 CORE_MEM_SDP_MEM); 528 529 if (!mobj) 530 panic("can't create SDP physical memory object"); 531 532 return mobj; 533 } 534 535 struct mobj **core_sdp_mem_create_mobjs(void) 536 { 537 const struct core_mmu_phys_mem *mem = NULL; 538 struct mobj **mobj_base = NULL; 539 struct mobj **mobj = NULL; 540 int cnt = phys_sdp_mem_end - phys_sdp_mem_begin; 541 542 if (sec_sdp.size) 543 cnt++; 544 545 /* SDP mobjs table must end with a NULL entry */ 546 mobj_base = calloc(cnt + 1, sizeof(struct mobj *)); 547 if (!mobj_base) 548 panic("Out of memory"); 549 550 mobj = mobj_base; 551 552 for (mem = phys_sdp_mem_begin; mem < phys_sdp_mem_end; mem++, mobj++) 553 *mobj = core_sdp_mem_alloc_mobj(mem->addr, mem->size); 554 555 if (sec_sdp.size) 556 *mobj = core_sdp_mem_alloc_mobj(sec_sdp.paddr, sec_sdp.size); 557 558 return mobj_base; 559 } 560 561 #else /* CFG_SECURE_DATA_PATH */ 562 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused) 563 { 564 return false; 565 } 566 567 #endif /* CFG_SECURE_DATA_PATH */ 568 569 /* Check special memories comply with registered memories */ 570 static void verify_special_mem_areas(struct tee_mmap_region *mem_map, 571 size_t len, 572 const struct core_mmu_phys_mem *start, 573 const struct core_mmu_phys_mem *end, 574 const char *area_name __maybe_unused) 575 { 576 const struct core_mmu_phys_mem *mem; 577 const struct core_mmu_phys_mem *mem2; 578 struct tee_mmap_region *mmap; 579 size_t n; 580 581 if (start == end) { 582 DMSG("No %s memory area defined", area_name); 583 return; 584 } 585 586 for (mem = start; mem < end; mem++) 587 DMSG("%s memory [%" PRIxPA " %" PRIx64 "]", 588 area_name, mem->addr, (uint64_t)mem->addr + mem->size); 589 590 /* Check memories do not intersect each other */ 591 for (mem = start; mem + 1 < end; mem++) { 592 for (mem2 = mem + 1; mem2 < end; mem2++) { 593 if (core_is_buffer_intersect(mem2->addr, mem2->size, 594 mem->addr, mem->size)) { 595 MSG_MEM_INSTERSECT(mem2->addr, mem2->size, 596 mem->addr, mem->size); 597 panic("Special memory intersection"); 598 } 599 } 600 } 601 602 /* 603 * Check memories do not intersect any mapped memory. 604 * This is called before reserved VA space is loaded in mem_map. 605 */ 606 for (mem = start; mem < end; mem++) { 607 for (mmap = mem_map, n = 0; n < len; mmap++, n++) { 608 if (core_is_buffer_intersect(mem->addr, mem->size, 609 mmap->pa, mmap->size)) { 610 MSG_MEM_INSTERSECT(mem->addr, mem->size, 611 mmap->pa, mmap->size); 612 panic("Special memory intersection"); 613 } 614 } 615 } 616 } 617 618 static void add_phys_mem(struct tee_mmap_region *memory_map, size_t num_elems, 619 const struct core_mmu_phys_mem *mem, size_t *last) 620 { 621 size_t n = 0; 622 paddr_t pa; 623 paddr_size_t size; 624 625 /* 626 * If some ranges of memory of the same type do overlap 627 * each others they are coalesced into one entry. To help this 628 * added entries are sorted by increasing physical. 629 * 630 * Note that it's valid to have the same physical memory as several 631 * different memory types, for instance the same device memory 632 * mapped as both secure and non-secure. This will probably not 633 * happen often in practice. 634 */ 635 DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ, 636 mem->name, teecore_memtype_name(mem->type), mem->addr, mem->size); 637 while (true) { 638 if (n >= (num_elems - 1)) { 639 EMSG("Out of entries (%zu) in memory_map", num_elems); 640 panic(); 641 } 642 if (n == *last) 643 break; 644 pa = memory_map[n].pa; 645 size = memory_map[n].size; 646 if (mem->type == memory_map[n].type && 647 ((pa <= (mem->addr + (mem->size - 1))) && 648 (mem->addr <= (pa + (size - 1))))) { 649 DMSG("Physical mem map overlaps 0x%" PRIxPA, mem->addr); 650 memory_map[n].pa = MIN(pa, mem->addr); 651 memory_map[n].size = MAX(size, mem->size) + 652 (pa - memory_map[n].pa); 653 return; 654 } 655 if (mem->type < memory_map[n].type || 656 (mem->type == memory_map[n].type && mem->addr < pa)) 657 break; /* found the spot where to insert this memory */ 658 n++; 659 } 660 661 memmove(memory_map + n + 1, memory_map + n, 662 sizeof(struct tee_mmap_region) * (*last - n)); 663 (*last)++; 664 memset(memory_map + n, 0, sizeof(memory_map[0])); 665 memory_map[n].type = mem->type; 666 memory_map[n].pa = mem->addr; 667 memory_map[n].size = mem->size; 668 } 669 670 static void add_va_space(struct tee_mmap_region *memory_map, size_t num_elems, 671 enum teecore_memtypes type, size_t size, size_t *last) 672 { 673 size_t n = 0; 674 675 DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size); 676 while (true) { 677 if (n >= (num_elems - 1)) { 678 EMSG("Out of entries (%zu) in memory_map", num_elems); 679 panic(); 680 } 681 if (n == *last) 682 break; 683 if (type < memory_map[n].type) 684 break; 685 n++; 686 } 687 688 memmove(memory_map + n + 1, memory_map + n, 689 sizeof(struct tee_mmap_region) * (*last - n)); 690 (*last)++; 691 memset(memory_map + n, 0, sizeof(memory_map[0])); 692 memory_map[n].type = type; 693 memory_map[n].size = size; 694 } 695 696 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t) 697 { 698 const uint32_t attr = TEE_MATTR_VALID_BLOCK; 699 const uint32_t tagged = TEE_MATTR_MEM_TYPE_TAGGED << 700 TEE_MATTR_MEM_TYPE_SHIFT; 701 const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED << 702 TEE_MATTR_MEM_TYPE_SHIFT; 703 const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV << 704 TEE_MATTR_MEM_TYPE_SHIFT; 705 706 switch (t) { 707 case MEM_AREA_TEE_RAM: 708 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | tagged; 709 case MEM_AREA_TEE_RAM_RX: 710 case MEM_AREA_INIT_RAM_RX: 711 case MEM_AREA_IDENTITY_MAP_RX: 712 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | tagged; 713 case MEM_AREA_TEE_RAM_RO: 714 case MEM_AREA_INIT_RAM_RO: 715 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | tagged; 716 case MEM_AREA_TEE_RAM_RW: 717 case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */ 718 case MEM_AREA_NEX_RAM_RW: 719 case MEM_AREA_TEE_ASAN: 720 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; 721 case MEM_AREA_TEE_COHERENT: 722 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache; 723 case MEM_AREA_TA_RAM: 724 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; 725 case MEM_AREA_NSEC_SHM: 726 return attr | TEE_MATTR_PRW | cached; 727 case MEM_AREA_EXT_DT: 728 /* 729 * If CFG_MAP_EXT_DT_SECURE is enabled map the external device 730 * tree as secure non-cached memory, otherwise, fall back to 731 * non-secure mapping. 732 */ 733 if (IS_ENABLED(CFG_MAP_EXT_DT_SECURE)) 734 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | 735 noncache; 736 fallthrough; 737 case MEM_AREA_IO_NSEC: 738 return attr | TEE_MATTR_PRW | noncache; 739 case MEM_AREA_IO_SEC: 740 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache; 741 case MEM_AREA_RAM_NSEC: 742 return attr | TEE_MATTR_PRW | cached; 743 case MEM_AREA_RAM_SEC: 744 case MEM_AREA_SEC_RAM_OVERALL: 745 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; 746 case MEM_AREA_RES_VASPACE: 747 case MEM_AREA_SHM_VASPACE: 748 return 0; 749 case MEM_AREA_PAGER_VASPACE: 750 return TEE_MATTR_SECURE; 751 default: 752 panic("invalid type"); 753 } 754 } 755 756 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm) 757 { 758 switch (mm->type) { 759 case MEM_AREA_TEE_RAM: 760 case MEM_AREA_TEE_RAM_RX: 761 case MEM_AREA_TEE_RAM_RO: 762 case MEM_AREA_TEE_RAM_RW: 763 case MEM_AREA_INIT_RAM_RX: 764 case MEM_AREA_INIT_RAM_RO: 765 case MEM_AREA_NEX_RAM_RW: 766 case MEM_AREA_NEX_RAM_RO: 767 case MEM_AREA_TEE_ASAN: 768 return true; 769 default: 770 return false; 771 } 772 } 773 774 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm) 775 { 776 return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE); 777 } 778 779 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm) 780 { 781 return mm->region_size == CORE_MMU_PGDIR_SIZE; 782 } 783 784 static int cmp_mmap_by_lower_va(const void *a, const void *b) 785 { 786 const struct tee_mmap_region *mm_a = a; 787 const struct tee_mmap_region *mm_b = b; 788 789 return CMP_TRILEAN(mm_a->va, mm_b->va); 790 } 791 792 static void dump_mmap_table(struct tee_mmap_region *memory_map) 793 { 794 struct tee_mmap_region *map; 795 796 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 797 vaddr_t __maybe_unused vstart; 798 799 vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1)); 800 DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA 801 " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)", 802 teecore_memtype_name(map->type), vstart, 803 vstart + map->size - 1, map->pa, 804 (paddr_t)(map->pa + map->size - 1), map->size, 805 map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir"); 806 } 807 } 808 809 #if DEBUG_XLAT_TABLE 810 811 static void dump_xlat_table(vaddr_t va, unsigned int level) 812 { 813 struct core_mmu_table_info tbl_info; 814 unsigned int idx = 0; 815 paddr_t pa; 816 uint32_t attr; 817 818 core_mmu_find_table(NULL, va, level, &tbl_info); 819 va = tbl_info.va_base; 820 for (idx = 0; idx < tbl_info.num_entries; idx++) { 821 core_mmu_get_entry(&tbl_info, idx, &pa, &attr); 822 if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) { 823 const char *security_bit = ""; 824 825 if (core_mmu_entry_have_security_bit(attr)) { 826 if (attr & TEE_MATTR_SECURE) 827 security_bit = "S"; 828 else 829 security_bit = "NS"; 830 } 831 832 if (attr & TEE_MATTR_TABLE) { 833 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 834 " TBL:0x%010" PRIxPA " %s", 835 level * 2, "", level, va, pa, 836 security_bit); 837 dump_xlat_table(va, level + 1); 838 } else if (attr) { 839 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 840 " PA:0x%010" PRIxPA " %s-%s-%s-%s", 841 level * 2, "", level, va, pa, 842 mattr_is_cached(attr) ? "MEM" : 843 "DEV", 844 attr & TEE_MATTR_PW ? "RW" : "RO", 845 attr & TEE_MATTR_PX ? "X " : "XN", 846 security_bit); 847 } else { 848 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 849 " INVALID\n", 850 level * 2, "", level, va); 851 } 852 } 853 va += BIT64(tbl_info.shift); 854 } 855 } 856 857 #else 858 859 static void dump_xlat_table(vaddr_t va __unused, int level __unused) 860 { 861 } 862 863 #endif 864 865 /* 866 * Reserves virtual memory space for pager usage. 867 * 868 * From the start of the first memory used by the link script + 869 * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty 870 * mapping for pager usage. This adds translation tables as needed for the 871 * pager to operate. 872 */ 873 static void add_pager_vaspace(struct tee_mmap_region *mmap, size_t num_elems, 874 size_t *last) 875 { 876 paddr_t begin = 0; 877 paddr_t end = 0; 878 size_t size = 0; 879 size_t pos = 0; 880 size_t n = 0; 881 882 if (*last >= (num_elems - 1)) { 883 EMSG("Out of entries (%zu) in memory map", num_elems); 884 panic(); 885 } 886 887 for (n = 0; !core_mmap_is_end_of_table(mmap + n); n++) { 888 if (map_is_tee_ram(mmap + n)) { 889 if (!begin) 890 begin = mmap[n].pa; 891 pos = n + 1; 892 } 893 } 894 895 end = mmap[pos - 1].pa + mmap[pos - 1].size; 896 size = TEE_RAM_VA_SIZE - (end - begin); 897 if (!size) 898 return; 899 900 assert(pos <= *last); 901 memmove(mmap + pos + 1, mmap + pos, 902 sizeof(struct tee_mmap_region) * (*last - pos)); 903 (*last)++; 904 memset(mmap + pos, 0, sizeof(mmap[0])); 905 mmap[pos].type = MEM_AREA_PAGER_VASPACE; 906 mmap[pos].va = 0; 907 mmap[pos].size = size; 908 mmap[pos].region_size = SMALL_PAGE_SIZE; 909 mmap[pos].attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE); 910 } 911 912 static void check_sec_nsec_mem_config(void) 913 { 914 size_t n = 0; 915 916 for (n = 0; n < ARRAY_SIZE(secure_only); n++) { 917 if (pbuf_intersects(nsec_shared, secure_only[n].paddr, 918 secure_only[n].size)) 919 panic("Invalid memory access config: sec/nsec"); 920 } 921 } 922 923 static size_t collect_mem_ranges(struct tee_mmap_region *memory_map, 924 size_t num_elems) 925 { 926 const struct core_mmu_phys_mem *mem = NULL; 927 size_t last = 0; 928 929 for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) { 930 struct core_mmu_phys_mem m = *mem; 931 932 /* Discard null size entries */ 933 if (!m.size) 934 continue; 935 936 /* Only unmapped virtual range may have a null phys addr */ 937 assert(m.addr || !core_mmu_type_to_attr(m.type)); 938 939 add_phys_mem(memory_map, num_elems, &m, &last); 940 } 941 942 if (IS_ENABLED(CFG_SECURE_DATA_PATH)) 943 verify_special_mem_areas(memory_map, num_elems, 944 phys_sdp_mem_begin, 945 phys_sdp_mem_end, "SDP"); 946 947 add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE, 948 CFG_RESERVED_VASPACE_SIZE, &last); 949 950 add_va_space(memory_map, num_elems, MEM_AREA_SHM_VASPACE, 951 SHM_VASPACE_SIZE, &last); 952 953 memory_map[last].type = MEM_AREA_END; 954 955 return last; 956 } 957 958 static void assign_mem_granularity(struct tee_mmap_region *memory_map) 959 { 960 struct tee_mmap_region *map = NULL; 961 962 /* 963 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses 964 * SMALL_PAGE_SIZE. 965 */ 966 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 967 paddr_t mask = map->pa | map->size; 968 969 if (!(mask & CORE_MMU_PGDIR_MASK)) 970 map->region_size = CORE_MMU_PGDIR_SIZE; 971 else if (!(mask & SMALL_PAGE_MASK)) 972 map->region_size = SMALL_PAGE_SIZE; 973 else 974 panic("Impossible memory alignment"); 975 976 if (map_is_tee_ram(map)) 977 map->region_size = SMALL_PAGE_SIZE; 978 } 979 } 980 981 static bool place_tee_ram_at_top(paddr_t paddr) 982 { 983 return paddr > BIT64(core_mmu_get_va_width()) / 2; 984 } 985 986 /* 987 * MMU arch driver shall override this function if it helps 988 * optimizing the memory footprint of the address translation tables. 989 */ 990 bool __weak core_mmu_prefer_tee_ram_at_top(paddr_t paddr) 991 { 992 return place_tee_ram_at_top(paddr); 993 } 994 995 static bool assign_mem_va_dir(vaddr_t tee_ram_va, 996 struct tee_mmap_region *memory_map, 997 bool tee_ram_at_top) 998 { 999 struct tee_mmap_region *map = NULL; 1000 vaddr_t va = 0; 1001 bool va_is_secure = true; 1002 1003 /* Clear eventual previous assignments */ 1004 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) 1005 map->va = 0; 1006 1007 /* 1008 * TEE RAM regions are always aligned with region_size. 1009 * 1010 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here 1011 * since it handles virtual memory which covers the part of the ELF 1012 * that cannot fit directly into memory. 1013 */ 1014 va = tee_ram_va; 1015 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 1016 if (map_is_tee_ram(map) || 1017 map->type == MEM_AREA_PAGER_VASPACE) { 1018 assert(!(va & (map->region_size - 1))); 1019 assert(!(map->size & (map->region_size - 1))); 1020 map->va = va; 1021 if (ADD_OVERFLOW(va, map->size, &va)) 1022 return false; 1023 if (va >= BIT64(core_mmu_get_va_width())) 1024 return false; 1025 } 1026 } 1027 1028 if (tee_ram_at_top) { 1029 /* 1030 * Map non-tee ram regions at addresses lower than the tee 1031 * ram region. 1032 */ 1033 va = tee_ram_va; 1034 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 1035 map->attr = core_mmu_type_to_attr(map->type); 1036 if (map->va) 1037 continue; 1038 1039 if (!IS_ENABLED(CFG_WITH_LPAE) && 1040 va_is_secure != map_is_secure(map)) { 1041 va_is_secure = !va_is_secure; 1042 va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE); 1043 } 1044 1045 if (SUB_OVERFLOW(va, map->size, &va)) 1046 return false; 1047 va = ROUNDDOWN(va, map->region_size); 1048 /* 1049 * Make sure that va is aligned with pa for 1050 * efficient pgdir mapping. Basically pa & 1051 * pgdir_mask should be == va & pgdir_mask 1052 */ 1053 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 1054 if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va)) 1055 return false; 1056 va += (map->pa - va) & CORE_MMU_PGDIR_MASK; 1057 } 1058 map->va = va; 1059 } 1060 } else { 1061 /* 1062 * Map non-tee ram regions at addresses higher than the tee 1063 * ram region. 1064 */ 1065 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 1066 map->attr = core_mmu_type_to_attr(map->type); 1067 if (map->va) 1068 continue; 1069 1070 if (!IS_ENABLED(CFG_WITH_LPAE) && 1071 va_is_secure != map_is_secure(map)) { 1072 va_is_secure = !va_is_secure; 1073 if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, 1074 &va)) 1075 return false; 1076 } 1077 1078 if (ROUNDUP_OVERFLOW(va, map->region_size, &va)) 1079 return false; 1080 /* 1081 * Make sure that va is aligned with pa for 1082 * efficient pgdir mapping. Basically pa & 1083 * pgdir_mask should be == va & pgdir_mask 1084 */ 1085 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 1086 vaddr_t offs = (map->pa - va) & 1087 CORE_MMU_PGDIR_MASK; 1088 1089 if (ADD_OVERFLOW(va, offs, &va)) 1090 return false; 1091 } 1092 1093 map->va = va; 1094 if (ADD_OVERFLOW(va, map->size, &va)) 1095 return false; 1096 if (va >= BIT64(core_mmu_get_va_width())) 1097 return false; 1098 } 1099 } 1100 1101 return true; 1102 } 1103 1104 static bool assign_mem_va(vaddr_t tee_ram_va, 1105 struct tee_mmap_region *memory_map) 1106 { 1107 bool tee_ram_at_top = place_tee_ram_at_top(tee_ram_va); 1108 1109 /* 1110 * Check that we're not overlapping with the user VA range. 1111 */ 1112 if (IS_ENABLED(CFG_WITH_LPAE)) { 1113 /* 1114 * User VA range is supposed to be defined after these 1115 * mappings have been established. 1116 */ 1117 assert(!core_mmu_user_va_range_is_defined()); 1118 } else { 1119 vaddr_t user_va_base = 0; 1120 size_t user_va_size = 0; 1121 1122 assert(core_mmu_user_va_range_is_defined()); 1123 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 1124 if (tee_ram_va < (user_va_base + user_va_size)) 1125 return false; 1126 } 1127 1128 if (IS_ENABLED(CFG_WITH_PAGER)) { 1129 bool prefered_dir = core_mmu_prefer_tee_ram_at_top(tee_ram_va); 1130 1131 /* Try whole mapping covered by a single base xlat entry */ 1132 if (prefered_dir != tee_ram_at_top && 1133 assign_mem_va_dir(tee_ram_va, memory_map, prefered_dir)) 1134 return true; 1135 } 1136 1137 return assign_mem_va_dir(tee_ram_va, memory_map, tee_ram_at_top); 1138 } 1139 1140 static int cmp_init_mem_map(const void *a, const void *b) 1141 { 1142 const struct tee_mmap_region *mm_a = a; 1143 const struct tee_mmap_region *mm_b = b; 1144 int rc = 0; 1145 1146 rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size); 1147 if (!rc) 1148 rc = CMP_TRILEAN(mm_a->pa, mm_b->pa); 1149 /* 1150 * 32bit MMU descriptors cannot mix secure and non-secure mapping in 1151 * the same level2 table. Hence sort secure mapping from non-secure 1152 * mapping. 1153 */ 1154 if (!rc && !IS_ENABLED(CFG_WITH_LPAE)) 1155 rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b)); 1156 1157 return rc; 1158 } 1159 1160 static bool mem_map_add_id_map(struct tee_mmap_region *memory_map, 1161 size_t num_elems, size_t *last, 1162 vaddr_t id_map_start, vaddr_t id_map_end) 1163 { 1164 struct tee_mmap_region *map = NULL; 1165 vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE); 1166 vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE); 1167 size_t len = end - start; 1168 1169 if (*last >= num_elems - 1) { 1170 EMSG("Out of entries (%zu) in memory map", num_elems); 1171 panic(); 1172 } 1173 1174 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) 1175 if (core_is_buffer_intersect(map->va, map->size, start, len)) 1176 return false; 1177 1178 *map = (struct tee_mmap_region){ 1179 .type = MEM_AREA_IDENTITY_MAP_RX, 1180 /* 1181 * Could use CORE_MMU_PGDIR_SIZE to potentially save a 1182 * translation table, at the increased risk of clashes with 1183 * the rest of the memory map. 1184 */ 1185 .region_size = SMALL_PAGE_SIZE, 1186 .pa = start, 1187 .va = start, 1188 .size = len, 1189 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1190 }; 1191 1192 (*last)++; 1193 1194 return true; 1195 } 1196 1197 static unsigned long init_mem_map(struct tee_mmap_region *memory_map, 1198 size_t num_elems, unsigned long seed) 1199 { 1200 /* 1201 * @id_map_start and @id_map_end describes a physical memory range 1202 * that must be mapped Read-Only eXecutable at identical virtual 1203 * addresses. 1204 */ 1205 vaddr_t id_map_start = (vaddr_t)__identity_map_init_start; 1206 vaddr_t id_map_end = (vaddr_t)__identity_map_init_end; 1207 unsigned long offs = 0; 1208 size_t last = 0; 1209 1210 last = collect_mem_ranges(memory_map, num_elems); 1211 assign_mem_granularity(memory_map); 1212 1213 /* 1214 * To ease mapping and lower use of xlat tables, sort mapping 1215 * description moving small-page regions after the pgdir regions. 1216 */ 1217 qsort(memory_map, last, sizeof(struct tee_mmap_region), 1218 cmp_init_mem_map); 1219 1220 add_pager_vaspace(memory_map, num_elems, &last); 1221 if (IS_ENABLED(CFG_CORE_ASLR) && seed) { 1222 vaddr_t base_addr = TEE_RAM_START + seed; 1223 const unsigned int va_width = core_mmu_get_va_width(); 1224 const vaddr_t va_mask = GENMASK_64(va_width - 1, 1225 SMALL_PAGE_SHIFT); 1226 vaddr_t ba = base_addr; 1227 size_t n = 0; 1228 1229 for (n = 0; n < 3; n++) { 1230 if (n) 1231 ba = base_addr ^ BIT64(va_width - n); 1232 ba &= va_mask; 1233 if (assign_mem_va(ba, memory_map) && 1234 mem_map_add_id_map(memory_map, num_elems, &last, 1235 id_map_start, id_map_end)) { 1236 offs = ba - TEE_RAM_START; 1237 DMSG("Mapping core at %#"PRIxVA" offs %#lx", 1238 ba, offs); 1239 goto out; 1240 } else { 1241 DMSG("Failed to map core at %#"PRIxVA, ba); 1242 } 1243 } 1244 EMSG("Failed to map core with seed %#lx", seed); 1245 } 1246 1247 if (!assign_mem_va(TEE_RAM_START, memory_map)) 1248 panic(); 1249 1250 out: 1251 qsort(memory_map, last, sizeof(struct tee_mmap_region), 1252 cmp_mmap_by_lower_va); 1253 1254 dump_mmap_table(memory_map); 1255 1256 return offs; 1257 } 1258 1259 static void check_mem_map(struct tee_mmap_region *map) 1260 { 1261 struct tee_mmap_region *m = NULL; 1262 1263 for (m = map; !core_mmap_is_end_of_table(m); m++) { 1264 switch (m->type) { 1265 case MEM_AREA_TEE_RAM: 1266 case MEM_AREA_TEE_RAM_RX: 1267 case MEM_AREA_TEE_RAM_RO: 1268 case MEM_AREA_TEE_RAM_RW: 1269 case MEM_AREA_INIT_RAM_RX: 1270 case MEM_AREA_INIT_RAM_RO: 1271 case MEM_AREA_NEX_RAM_RW: 1272 case MEM_AREA_NEX_RAM_RO: 1273 case MEM_AREA_IDENTITY_MAP_RX: 1274 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1275 panic("TEE_RAM can't fit in secure_only"); 1276 break; 1277 case MEM_AREA_TA_RAM: 1278 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1279 panic("TA_RAM can't fit in secure_only"); 1280 break; 1281 case MEM_AREA_NSEC_SHM: 1282 if (!pbuf_is_inside(nsec_shared, m->pa, m->size)) 1283 panic("NS_SHM can't fit in nsec_shared"); 1284 break; 1285 case MEM_AREA_SEC_RAM_OVERALL: 1286 case MEM_AREA_TEE_COHERENT: 1287 case MEM_AREA_TEE_ASAN: 1288 case MEM_AREA_IO_SEC: 1289 case MEM_AREA_IO_NSEC: 1290 case MEM_AREA_EXT_DT: 1291 case MEM_AREA_RAM_SEC: 1292 case MEM_AREA_RAM_NSEC: 1293 case MEM_AREA_RES_VASPACE: 1294 case MEM_AREA_SHM_VASPACE: 1295 case MEM_AREA_PAGER_VASPACE: 1296 break; 1297 default: 1298 EMSG("Uhandled memtype %d", m->type); 1299 panic(); 1300 } 1301 } 1302 } 1303 1304 static struct tee_mmap_region *get_tmp_mmap(void) 1305 { 1306 struct tee_mmap_region *tmp_mmap = (void *)__heap1_start; 1307 1308 #ifdef CFG_WITH_PAGER 1309 if (__heap1_end - __heap1_start < (ptrdiff_t)sizeof(static_memory_map)) 1310 tmp_mmap = (void *)__heap2_start; 1311 #endif 1312 1313 memset(tmp_mmap, 0, sizeof(static_memory_map)); 1314 1315 return tmp_mmap; 1316 } 1317 1318 /* 1319 * core_init_mmu_map() - init tee core default memory mapping 1320 * 1321 * This routine sets the static default TEE core mapping. If @seed is > 0 1322 * and configured with CFG_CORE_ASLR it will map tee core at a location 1323 * based on the seed and return the offset from the link address. 1324 * 1325 * If an error happened: core_init_mmu_map is expected to panic. 1326 * 1327 * Note: this function is weak just to make it possible to exclude it from 1328 * the unpaged area. 1329 */ 1330 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg) 1331 { 1332 #ifndef CFG_VIRTUALIZATION 1333 vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE); 1334 #else 1335 vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start, 1336 SMALL_PAGE_SIZE); 1337 #endif 1338 vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start; 1339 struct tee_mmap_region *tmp_mmap = get_tmp_mmap(); 1340 unsigned long offs = 0; 1341 1342 check_sec_nsec_mem_config(); 1343 1344 /* 1345 * Add a entry covering the translation tables which will be 1346 * involved in some virt_to_phys() and phys_to_virt() conversions. 1347 */ 1348 static_memory_map[0] = (struct tee_mmap_region){ 1349 .type = MEM_AREA_TEE_RAM, 1350 .region_size = SMALL_PAGE_SIZE, 1351 .pa = start, 1352 .va = start, 1353 .size = len, 1354 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1355 }; 1356 1357 COMPILE_TIME_ASSERT(CFG_MMAP_REGIONS >= 13); 1358 offs = init_mem_map(tmp_mmap, ARRAY_SIZE(static_memory_map), seed); 1359 1360 check_mem_map(tmp_mmap); 1361 core_init_mmu(tmp_mmap); 1362 dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL); 1363 core_init_mmu_regs(cfg); 1364 cfg->load_offset = offs; 1365 memcpy(static_memory_map, tmp_mmap, sizeof(static_memory_map)); 1366 } 1367 1368 bool core_mmu_mattr_is_ok(uint32_t mattr) 1369 { 1370 /* 1371 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and 1372 * core_mmu_v7.c:mattr_to_texcb 1373 */ 1374 1375 switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) { 1376 case TEE_MATTR_MEM_TYPE_DEV: 1377 case TEE_MATTR_MEM_TYPE_STRONGLY_O: 1378 case TEE_MATTR_MEM_TYPE_CACHED: 1379 case TEE_MATTR_MEM_TYPE_TAGGED: 1380 return true; 1381 default: 1382 return false; 1383 } 1384 } 1385 1386 /* 1387 * test attributes of target physical buffer 1388 * 1389 * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT). 1390 * 1391 */ 1392 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len) 1393 { 1394 struct tee_mmap_region *map; 1395 1396 /* Empty buffers complies with anything */ 1397 if (len == 0) 1398 return true; 1399 1400 switch (attr) { 1401 case CORE_MEM_SEC: 1402 return pbuf_is_inside(secure_only, pbuf, len); 1403 case CORE_MEM_NON_SEC: 1404 return pbuf_is_inside(nsec_shared, pbuf, len) || 1405 pbuf_is_nsec_ddr(pbuf, len); 1406 case CORE_MEM_TEE_RAM: 1407 return core_is_buffer_inside(pbuf, len, TEE_RAM_START, 1408 TEE_RAM_PH_SIZE); 1409 case CORE_MEM_TA_RAM: 1410 return core_is_buffer_inside(pbuf, len, TA_RAM_START, 1411 TA_RAM_SIZE); 1412 #ifdef CFG_CORE_RESERVED_SHM 1413 case CORE_MEM_NSEC_SHM: 1414 return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START, 1415 TEE_SHMEM_SIZE); 1416 #endif 1417 case CORE_MEM_SDP_MEM: 1418 return pbuf_is_sdp_mem(pbuf, len); 1419 case CORE_MEM_CACHED: 1420 map = find_map_by_pa(pbuf); 1421 if (!map || !pbuf_inside_map_area(pbuf, len, map)) 1422 return false; 1423 return mattr_is_cached(map->attr); 1424 default: 1425 return false; 1426 } 1427 } 1428 1429 /* test attributes of target virtual buffer (in core mapping) */ 1430 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len) 1431 { 1432 paddr_t p; 1433 1434 /* Empty buffers complies with anything */ 1435 if (len == 0) 1436 return true; 1437 1438 p = virt_to_phys((void *)vbuf); 1439 if (!p) 1440 return false; 1441 1442 return core_pbuf_is(attr, p, len); 1443 } 1444 1445 /* core_va2pa - teecore exported service */ 1446 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa) 1447 { 1448 struct tee_mmap_region *map; 1449 1450 map = find_map_by_va(va); 1451 if (!va_is_in_map(map, (vaddr_t)va)) 1452 return -1; 1453 1454 /* 1455 * We can calculate PA for static map. Virtual address ranges 1456 * reserved to core dynamic mapping return a 'match' (return 0;) 1457 * together with an invalid null physical address. 1458 */ 1459 if (map->pa) 1460 *pa = map->pa + (vaddr_t)va - map->va; 1461 else 1462 *pa = 0; 1463 1464 return 0; 1465 } 1466 1467 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len) 1468 { 1469 if (!pa_is_in_map(map, pa, len)) 1470 return NULL; 1471 1472 return (void *)(vaddr_t)(map->va + pa - map->pa); 1473 } 1474 1475 /* 1476 * teecore gets some memory area definitions 1477 */ 1478 void core_mmu_get_mem_by_type(unsigned int type, vaddr_t *s, vaddr_t *e) 1479 { 1480 struct tee_mmap_region *map = find_map_by_type(type); 1481 1482 if (map) { 1483 *s = map->va; 1484 *e = map->va + map->size; 1485 } else { 1486 *s = 0; 1487 *e = 0; 1488 } 1489 } 1490 1491 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa) 1492 { 1493 struct tee_mmap_region *map = find_map_by_pa(pa); 1494 1495 if (!map) 1496 return MEM_AREA_MAXTYPE; 1497 return map->type; 1498 } 1499 1500 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1501 paddr_t pa, uint32_t attr) 1502 { 1503 assert(idx < tbl_info->num_entries); 1504 core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level, 1505 idx, pa, attr); 1506 } 1507 1508 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1509 paddr_t *pa, uint32_t *attr) 1510 { 1511 assert(idx < tbl_info->num_entries); 1512 core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level, 1513 idx, pa, attr); 1514 } 1515 1516 static void clear_region(struct core_mmu_table_info *tbl_info, 1517 struct tee_mmap_region *region) 1518 { 1519 unsigned int end = 0; 1520 unsigned int idx = 0; 1521 1522 /* va, len and pa should be block aligned */ 1523 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1524 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1525 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1526 1527 idx = core_mmu_va2idx(tbl_info, region->va); 1528 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1529 1530 while (idx < end) { 1531 core_mmu_set_entry(tbl_info, idx, 0, 0); 1532 idx++; 1533 } 1534 } 1535 1536 static void set_region(struct core_mmu_table_info *tbl_info, 1537 struct tee_mmap_region *region) 1538 { 1539 unsigned int end; 1540 unsigned int idx; 1541 paddr_t pa; 1542 1543 /* va, len and pa should be block aligned */ 1544 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1545 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1546 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1547 1548 idx = core_mmu_va2idx(tbl_info, region->va); 1549 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1550 pa = region->pa; 1551 1552 while (idx < end) { 1553 core_mmu_set_entry(tbl_info, idx, pa, region->attr); 1554 idx++; 1555 pa += BIT64(tbl_info->shift); 1556 } 1557 } 1558 1559 static void set_pg_region(struct core_mmu_table_info *dir_info, 1560 struct vm_region *region, struct pgt **pgt, 1561 struct core_mmu_table_info *pg_info) 1562 { 1563 struct tee_mmap_region r = { 1564 .va = region->va, 1565 .size = region->size, 1566 .attr = region->attr, 1567 }; 1568 vaddr_t end = r.va + r.size; 1569 uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE; 1570 1571 while (r.va < end) { 1572 if (!pg_info->table || 1573 r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) { 1574 /* 1575 * We're assigning a new translation table. 1576 */ 1577 unsigned int idx; 1578 1579 /* Virtual addresses must grow */ 1580 assert(r.va > pg_info->va_base); 1581 1582 idx = core_mmu_va2idx(dir_info, r.va); 1583 pg_info->va_base = core_mmu_idx2va(dir_info, idx); 1584 1585 /* 1586 * Advance pgt to va_base, note that we may need to 1587 * skip multiple page tables if there are large 1588 * holes in the vm map. 1589 */ 1590 while ((*pgt)->vabase < pg_info->va_base) { 1591 *pgt = SLIST_NEXT(*pgt, link); 1592 /* We should have allocated enough */ 1593 assert(*pgt); 1594 } 1595 assert((*pgt)->vabase == pg_info->va_base); 1596 pg_info->table = (*pgt)->tbl; 1597 1598 core_mmu_set_entry(dir_info, idx, 1599 virt_to_phys(pg_info->table), 1600 pgt_attr); 1601 } 1602 1603 r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base), 1604 end - r.va); 1605 1606 if (!mobj_is_paged(region->mobj)) { 1607 size_t granule = BIT(pg_info->shift); 1608 size_t offset = r.va - region->va + region->offset; 1609 1610 r.size = MIN(r.size, 1611 mobj_get_phys_granule(region->mobj)); 1612 r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE); 1613 1614 if (mobj_get_pa(region->mobj, offset, granule, 1615 &r.pa) != TEE_SUCCESS) 1616 panic("Failed to get PA of unpaged mobj"); 1617 set_region(pg_info, &r); 1618 } 1619 r.va += r.size; 1620 } 1621 } 1622 1623 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr, 1624 size_t size_left, paddr_t block_size, 1625 struct tee_mmap_region *mm __maybe_unused) 1626 { 1627 /* VA and PA are aligned to block size at current level */ 1628 if ((vaddr | paddr) & (block_size - 1)) 1629 return false; 1630 1631 /* Remainder fits into block at current level */ 1632 if (size_left < block_size) 1633 return false; 1634 1635 #ifdef CFG_WITH_PAGER 1636 /* 1637 * If pager is enabled, we need to map tee ram 1638 * regions with small pages only 1639 */ 1640 if (map_is_tee_ram(mm) && block_size != SMALL_PAGE_SIZE) 1641 return false; 1642 #endif 1643 1644 return true; 1645 } 1646 1647 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm) 1648 { 1649 struct core_mmu_table_info tbl_info; 1650 unsigned int idx; 1651 vaddr_t vaddr = mm->va; 1652 paddr_t paddr = mm->pa; 1653 ssize_t size_left = mm->size; 1654 unsigned int level; 1655 bool table_found; 1656 uint32_t old_attr; 1657 1658 assert(!((vaddr | paddr) & SMALL_PAGE_MASK)); 1659 1660 while (size_left > 0) { 1661 level = CORE_MMU_BASE_TABLE_LEVEL; 1662 1663 while (true) { 1664 paddr_t block_size = 0; 1665 1666 assert(level <= CORE_MMU_PGDIR_LEVEL); 1667 1668 table_found = core_mmu_find_table(prtn, vaddr, level, 1669 &tbl_info); 1670 if (!table_found) 1671 panic("can't find table for mapping"); 1672 1673 block_size = BIT64(tbl_info.shift); 1674 1675 idx = core_mmu_va2idx(&tbl_info, vaddr); 1676 if (!can_map_at_level(paddr, vaddr, size_left, 1677 block_size, mm)) { 1678 bool secure = mm->attr & TEE_MATTR_SECURE; 1679 1680 /* 1681 * This part of the region can't be mapped at 1682 * this level. Need to go deeper. 1683 */ 1684 if (!core_mmu_entry_to_finer_grained(&tbl_info, 1685 idx, 1686 secure)) 1687 panic("Can't divide MMU entry"); 1688 level++; 1689 continue; 1690 } 1691 1692 /* We can map part of the region at current level */ 1693 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1694 if (old_attr) 1695 panic("Page is already mapped"); 1696 1697 core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr); 1698 paddr += block_size; 1699 vaddr += block_size; 1700 size_left -= block_size; 1701 1702 break; 1703 } 1704 } 1705 } 1706 1707 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, 1708 enum teecore_memtypes memtype) 1709 { 1710 TEE_Result ret; 1711 struct core_mmu_table_info tbl_info; 1712 struct tee_mmap_region *mm; 1713 unsigned int idx; 1714 uint32_t old_attr; 1715 uint32_t exceptions; 1716 vaddr_t vaddr = vstart; 1717 size_t i; 1718 bool secure; 1719 1720 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 1721 1722 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 1723 1724 if (vaddr & SMALL_PAGE_MASK) 1725 return TEE_ERROR_BAD_PARAMETERS; 1726 1727 exceptions = mmu_lock(); 1728 1729 mm = find_map_by_va((void *)vaddr); 1730 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 1731 panic("VA does not belong to any known mm region"); 1732 1733 if (!core_mmu_is_dynamic_vaspace(mm)) 1734 panic("Trying to map into static region"); 1735 1736 for (i = 0; i < num_pages; i++) { 1737 if (pages[i] & SMALL_PAGE_MASK) { 1738 ret = TEE_ERROR_BAD_PARAMETERS; 1739 goto err; 1740 } 1741 1742 while (true) { 1743 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 1744 &tbl_info)) 1745 panic("Can't find pagetable for vaddr "); 1746 1747 idx = core_mmu_va2idx(&tbl_info, vaddr); 1748 if (tbl_info.shift == SMALL_PAGE_SHIFT) 1749 break; 1750 1751 /* This is supertable. Need to divide it. */ 1752 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 1753 secure)) 1754 panic("Failed to spread pgdir on small tables"); 1755 } 1756 1757 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1758 if (old_attr) 1759 panic("Page is already mapped"); 1760 1761 core_mmu_set_entry(&tbl_info, idx, pages[i], 1762 core_mmu_type_to_attr(memtype)); 1763 vaddr += SMALL_PAGE_SIZE; 1764 } 1765 1766 /* 1767 * Make sure all the changes to translation tables are visible 1768 * before returning. TLB doesn't need to be invalidated as we are 1769 * guaranteed that there's no valid mapping in this range. 1770 */ 1771 core_mmu_table_write_barrier(); 1772 mmu_unlock(exceptions); 1773 1774 return TEE_SUCCESS; 1775 err: 1776 mmu_unlock(exceptions); 1777 1778 if (i) 1779 core_mmu_unmap_pages(vstart, i); 1780 1781 return ret; 1782 } 1783 1784 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart, 1785 size_t num_pages, 1786 enum teecore_memtypes memtype) 1787 { 1788 struct core_mmu_table_info tbl_info = { }; 1789 struct tee_mmap_region *mm = NULL; 1790 unsigned int idx = 0; 1791 uint32_t old_attr = 0; 1792 uint32_t exceptions = 0; 1793 vaddr_t vaddr = vstart; 1794 paddr_t paddr = pstart; 1795 size_t i = 0; 1796 bool secure = false; 1797 1798 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 1799 1800 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 1801 1802 if ((vaddr | paddr) & SMALL_PAGE_MASK) 1803 return TEE_ERROR_BAD_PARAMETERS; 1804 1805 exceptions = mmu_lock(); 1806 1807 mm = find_map_by_va((void *)vaddr); 1808 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 1809 panic("VA does not belong to any known mm region"); 1810 1811 if (!core_mmu_is_dynamic_vaspace(mm)) 1812 panic("Trying to map into static region"); 1813 1814 for (i = 0; i < num_pages; i++) { 1815 while (true) { 1816 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 1817 &tbl_info)) 1818 panic("Can't find pagetable for vaddr "); 1819 1820 idx = core_mmu_va2idx(&tbl_info, vaddr); 1821 if (tbl_info.shift == SMALL_PAGE_SHIFT) 1822 break; 1823 1824 /* This is supertable. Need to divide it. */ 1825 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 1826 secure)) 1827 panic("Failed to spread pgdir on small tables"); 1828 } 1829 1830 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1831 if (old_attr) 1832 panic("Page is already mapped"); 1833 1834 core_mmu_set_entry(&tbl_info, idx, paddr, 1835 core_mmu_type_to_attr(memtype)); 1836 paddr += SMALL_PAGE_SIZE; 1837 vaddr += SMALL_PAGE_SIZE; 1838 } 1839 1840 /* 1841 * Make sure all the changes to translation tables are visible 1842 * before returning. TLB doesn't need to be invalidated as we are 1843 * guaranteed that there's no valid mapping in this range. 1844 */ 1845 core_mmu_table_write_barrier(); 1846 mmu_unlock(exceptions); 1847 1848 return TEE_SUCCESS; 1849 } 1850 1851 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages) 1852 { 1853 struct core_mmu_table_info tbl_info; 1854 struct tee_mmap_region *mm; 1855 size_t i; 1856 unsigned int idx; 1857 uint32_t exceptions; 1858 1859 exceptions = mmu_lock(); 1860 1861 mm = find_map_by_va((void *)vstart); 1862 if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1)) 1863 panic("VA does not belong to any known mm region"); 1864 1865 if (!core_mmu_is_dynamic_vaspace(mm)) 1866 panic("Trying to unmap static region"); 1867 1868 for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) { 1869 if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info)) 1870 panic("Can't find pagetable"); 1871 1872 if (tbl_info.shift != SMALL_PAGE_SHIFT) 1873 panic("Invalid pagetable level"); 1874 1875 idx = core_mmu_va2idx(&tbl_info, vstart); 1876 core_mmu_set_entry(&tbl_info, idx, 0, 0); 1877 } 1878 tlbi_all(); 1879 1880 mmu_unlock(exceptions); 1881 } 1882 1883 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, 1884 struct user_mode_ctx *uctx) 1885 { 1886 struct core_mmu_table_info pg_info = { }; 1887 struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache; 1888 struct pgt *pgt = NULL; 1889 struct vm_region *r = NULL; 1890 1891 if (TAILQ_EMPTY(&uctx->vm_info.regions)) 1892 return; /* Nothing to map */ 1893 1894 /* 1895 * Allocate all page tables in advance. 1896 */ 1897 pgt_alloc(pgt_cache, uctx->ts_ctx, &uctx->vm_info); 1898 pgt = SLIST_FIRST(pgt_cache); 1899 1900 core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL); 1901 1902 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) 1903 set_pg_region(dir_info, r, &pgt, &pg_info); 1904 } 1905 1906 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr, 1907 size_t len) 1908 { 1909 struct core_mmu_table_info tbl_info = { }; 1910 struct tee_mmap_region *res_map = NULL; 1911 struct tee_mmap_region *map = NULL; 1912 paddr_t pa = virt_to_phys(addr); 1913 size_t granule = 0; 1914 ptrdiff_t i = 0; 1915 paddr_t p = 0; 1916 size_t l = 0; 1917 1918 map = find_map_by_type_and_pa(type, pa, len); 1919 if (!map) 1920 return TEE_ERROR_GENERIC; 1921 1922 res_map = find_map_by_type(MEM_AREA_RES_VASPACE); 1923 if (!res_map) 1924 return TEE_ERROR_GENERIC; 1925 if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info)) 1926 return TEE_ERROR_GENERIC; 1927 granule = BIT(tbl_info.shift); 1928 1929 if (map < static_memory_map || 1930 map >= static_memory_map + ARRAY_SIZE(static_memory_map)) 1931 return TEE_ERROR_GENERIC; 1932 i = map - static_memory_map; 1933 1934 /* Check that we have a full match */ 1935 p = ROUNDDOWN(pa, granule); 1936 l = ROUNDUP(len + pa - p, granule); 1937 if (map->pa != p || map->size != l) 1938 return TEE_ERROR_GENERIC; 1939 1940 clear_region(&tbl_info, map); 1941 tlbi_all(); 1942 1943 /* If possible remove the va range from res_map */ 1944 if (res_map->va - map->size == map->va) { 1945 res_map->va -= map->size; 1946 res_map->size += map->size; 1947 } 1948 1949 /* Remove the entry. */ 1950 memmove(map, map + 1, 1951 (ARRAY_SIZE(static_memory_map) - i - 1) * sizeof(*map)); 1952 1953 /* Clear the last new entry in case it was used */ 1954 memset(static_memory_map + ARRAY_SIZE(static_memory_map) - 1, 1955 0, sizeof(*map)); 1956 1957 return TEE_SUCCESS; 1958 } 1959 1960 struct tee_mmap_region * 1961 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len) 1962 { 1963 struct tee_mmap_region *map = NULL; 1964 struct tee_mmap_region *map_found = NULL; 1965 1966 if (!len) 1967 return NULL; 1968 1969 for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) { 1970 if (map->type != type) 1971 continue; 1972 1973 if (map_found) 1974 return NULL; 1975 1976 map_found = map; 1977 } 1978 1979 if (!map_found || map_found->size < len) 1980 return NULL; 1981 1982 return map_found; 1983 } 1984 1985 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len) 1986 { 1987 struct core_mmu_table_info tbl_info; 1988 struct tee_mmap_region *map; 1989 size_t n; 1990 size_t granule; 1991 paddr_t p; 1992 size_t l; 1993 1994 if (!len) 1995 return NULL; 1996 1997 if (!core_mmu_check_end_pa(addr, len)) 1998 return NULL; 1999 2000 /* Check if the memory is already mapped */ 2001 map = find_map_by_type_and_pa(type, addr, len); 2002 if (map && pbuf_inside_map_area(addr, len, map)) 2003 return (void *)(vaddr_t)(map->va + addr - map->pa); 2004 2005 /* Find the reserved va space used for late mappings */ 2006 map = find_map_by_type(MEM_AREA_RES_VASPACE); 2007 if (!map) 2008 return NULL; 2009 2010 if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info)) 2011 return NULL; 2012 2013 granule = BIT64(tbl_info.shift); 2014 p = ROUNDDOWN(addr, granule); 2015 l = ROUNDUP(len + addr - p, granule); 2016 2017 /* Ban overflowing virtual addresses */ 2018 if (map->size < l) 2019 return NULL; 2020 2021 /* 2022 * Something is wrong, we can't fit the va range into the selected 2023 * table. The reserved va range is possibly missaligned with 2024 * granule. 2025 */ 2026 if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries) 2027 return NULL; 2028 2029 /* Find end of the memory map */ 2030 n = 0; 2031 while (!core_mmap_is_end_of_table(static_memory_map + n)) 2032 n++; 2033 2034 if (n < (ARRAY_SIZE(static_memory_map) - 1)) { 2035 /* There's room for another entry */ 2036 static_memory_map[n].va = map->va; 2037 static_memory_map[n].size = l; 2038 static_memory_map[n + 1].type = MEM_AREA_END; 2039 map->va += l; 2040 map->size -= l; 2041 map = static_memory_map + n; 2042 } else { 2043 /* 2044 * There isn't room for another entry, steal the reserved 2045 * entry as it's not useful for anything else any longer. 2046 */ 2047 map->size = l; 2048 } 2049 map->type = type; 2050 map->region_size = granule; 2051 map->attr = core_mmu_type_to_attr(type); 2052 map->pa = p; 2053 2054 set_region(&tbl_info, map); 2055 2056 /* Make sure the new entry is visible before continuing. */ 2057 core_mmu_table_write_barrier(); 2058 2059 return (void *)(vaddr_t)(map->va + addr - map->pa); 2060 } 2061 2062 #ifdef CFG_WITH_PAGER 2063 static vaddr_t get_linear_map_end(void) 2064 { 2065 /* this is synced with the generic linker file kern.ld.S */ 2066 return (vaddr_t)__heap2_end; 2067 } 2068 #endif 2069 2070 #if defined(CFG_TEE_CORE_DEBUG) 2071 static void check_pa_matches_va(void *va, paddr_t pa) 2072 { 2073 TEE_Result res = TEE_ERROR_GENERIC; 2074 vaddr_t v = (vaddr_t)va; 2075 paddr_t p = 0; 2076 struct core_mmu_table_info ti __maybe_unused = { }; 2077 2078 if (core_mmu_user_va_range_is_defined()) { 2079 vaddr_t user_va_base = 0; 2080 size_t user_va_size = 0; 2081 2082 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 2083 if (v >= user_va_base && 2084 v <= (user_va_base - 1 + user_va_size)) { 2085 if (!core_mmu_user_mapping_is_active()) { 2086 if (pa) 2087 panic("issue in linear address space"); 2088 return; 2089 } 2090 2091 res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx), 2092 va, &p); 2093 if (res == TEE_ERROR_NOT_SUPPORTED) 2094 return; 2095 if (res == TEE_SUCCESS && pa != p) 2096 panic("bad pa"); 2097 if (res != TEE_SUCCESS && pa) 2098 panic("false pa"); 2099 return; 2100 } 2101 } 2102 #ifdef CFG_WITH_PAGER 2103 if (is_unpaged(va)) { 2104 if (v - boot_mmu_config.load_offset != pa) 2105 panic("issue in linear address space"); 2106 return; 2107 } 2108 2109 if (tee_pager_get_table_info(v, &ti)) { 2110 uint32_t a; 2111 2112 /* 2113 * Lookups in the page table managed by the pager is 2114 * dangerous for addresses in the paged area as those pages 2115 * changes all the time. But some ranges are safe, 2116 * rw-locked areas when the page is populated for instance. 2117 */ 2118 core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a); 2119 if (a & TEE_MATTR_VALID_BLOCK) { 2120 paddr_t mask = BIT64(ti.shift) - 1; 2121 2122 p |= v & mask; 2123 if (pa != p) 2124 panic(); 2125 } else { 2126 if (pa) 2127 panic(); 2128 } 2129 return; 2130 } 2131 #endif 2132 2133 if (!core_va2pa_helper(va, &p)) { 2134 /* Verfiy only the static mapping (case non null phys addr) */ 2135 if (p && pa != p) { 2136 DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA, 2137 va, p, pa); 2138 panic(); 2139 } 2140 } else { 2141 if (pa) { 2142 DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa); 2143 panic(); 2144 } 2145 } 2146 } 2147 #else 2148 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused) 2149 { 2150 } 2151 #endif 2152 2153 paddr_t virt_to_phys(void *va) 2154 { 2155 paddr_t pa = 0; 2156 2157 if (!arch_va2pa_helper(va, &pa)) 2158 pa = 0; 2159 check_pa_matches_va(va, pa); 2160 return pa; 2161 } 2162 2163 #if defined(CFG_TEE_CORE_DEBUG) 2164 static void check_va_matches_pa(paddr_t pa, void *va) 2165 { 2166 paddr_t p = 0; 2167 2168 if (!va) 2169 return; 2170 2171 p = virt_to_phys(va); 2172 if (p != pa) { 2173 DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa); 2174 panic(); 2175 } 2176 } 2177 #else 2178 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused) 2179 { 2180 } 2181 #endif 2182 2183 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len) 2184 { 2185 if (!core_mmu_user_mapping_is_active()) 2186 return NULL; 2187 2188 return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len); 2189 } 2190 2191 #ifdef CFG_WITH_PAGER 2192 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2193 { 2194 paddr_t end_pa = 0; 2195 2196 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 2197 return NULL; 2198 2199 if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end()) { 2200 if (end_pa > get_linear_map_end()) 2201 return NULL; 2202 return (void *)(vaddr_t)(pa + boot_mmu_config.load_offset); 2203 } 2204 2205 return tee_pager_phys_to_virt(pa, len); 2206 } 2207 #else 2208 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2209 { 2210 struct tee_mmap_region *mmap = NULL; 2211 2212 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len); 2213 if (!mmap) 2214 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len); 2215 if (!mmap) 2216 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len); 2217 if (!mmap) 2218 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len); 2219 if (!mmap) 2220 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len); 2221 if (!mmap) 2222 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len); 2223 /* 2224 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only 2225 * used with pager and not needed here. 2226 */ 2227 return map_pa2va(mmap, pa, len); 2228 } 2229 #endif 2230 2231 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len) 2232 { 2233 void *va = NULL; 2234 2235 switch (m) { 2236 case MEM_AREA_TS_VASPACE: 2237 va = phys_to_virt_ts_vaspace(pa, len); 2238 break; 2239 case MEM_AREA_TEE_RAM: 2240 case MEM_AREA_TEE_RAM_RX: 2241 case MEM_AREA_TEE_RAM_RO: 2242 case MEM_AREA_TEE_RAM_RW: 2243 case MEM_AREA_NEX_RAM_RO: 2244 case MEM_AREA_NEX_RAM_RW: 2245 va = phys_to_virt_tee_ram(pa, len); 2246 break; 2247 case MEM_AREA_SHM_VASPACE: 2248 /* Find VA from PA in dynamic SHM is not yet supported */ 2249 va = NULL; 2250 break; 2251 default: 2252 va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len); 2253 } 2254 if (m != MEM_AREA_SEC_RAM_OVERALL) 2255 check_va_matches_pa(pa, va); 2256 return va; 2257 } 2258 2259 void *phys_to_virt_io(paddr_t pa, size_t len) 2260 { 2261 struct tee_mmap_region *map = NULL; 2262 void *va = NULL; 2263 2264 map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len); 2265 if (!map) 2266 map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len); 2267 if (!map) 2268 return NULL; 2269 va = map_pa2va(map, pa, len); 2270 check_va_matches_pa(pa, va); 2271 return va; 2272 } 2273 2274 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len) 2275 { 2276 if (cpu_mmu_enabled()) 2277 return (vaddr_t)phys_to_virt(pa, type, len); 2278 2279 return (vaddr_t)pa; 2280 } 2281 2282 #ifdef CFG_WITH_PAGER 2283 bool is_unpaged(void *va) 2284 { 2285 vaddr_t v = (vaddr_t)va; 2286 2287 return v >= VCORE_START_VA && v < get_linear_map_end(); 2288 } 2289 #else 2290 bool is_unpaged(void *va __unused) 2291 { 2292 return true; 2293 } 2294 #endif 2295 2296 void core_mmu_init_virtualization(void) 2297 { 2298 virt_init_memory(static_memory_map); 2299 } 2300 2301 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len) 2302 { 2303 assert(p->pa); 2304 if (cpu_mmu_enabled()) { 2305 if (!p->va) 2306 p->va = (vaddr_t)phys_to_virt_io(p->pa, len); 2307 assert(p->va); 2308 return p->va; 2309 } 2310 return p->pa; 2311 } 2312 2313 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len) 2314 { 2315 assert(p->pa); 2316 if (cpu_mmu_enabled()) { 2317 if (!p->va) 2318 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC, 2319 len); 2320 assert(p->va); 2321 return p->va; 2322 } 2323 return p->pa; 2324 } 2325 2326 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len) 2327 { 2328 assert(p->pa); 2329 if (cpu_mmu_enabled()) { 2330 if (!p->va) 2331 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC, 2332 len); 2333 assert(p->va); 2334 return p->va; 2335 } 2336 return p->pa; 2337 } 2338 2339 #ifdef CFG_CORE_RESERVED_SHM 2340 static TEE_Result teecore_init_pub_ram(void) 2341 { 2342 vaddr_t s = 0; 2343 vaddr_t e = 0; 2344 2345 /* get virtual addr/size of NSec shared mem allocated from teecore */ 2346 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e); 2347 2348 if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK) 2349 panic("invalid PUB RAM"); 2350 2351 /* extra check: we could rely on core_mmu_get_mem_by_type() */ 2352 if (!tee_vbuf_is_non_sec(s, e - s)) 2353 panic("PUB RAM is not non-secure"); 2354 2355 #ifdef CFG_PL310 2356 /* Allocate statically the l2cc mutex */ 2357 tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s)); 2358 s += sizeof(uint32_t); /* size of a pl310 mutex */ 2359 s = ROUNDUP(s, SMALL_PAGE_SIZE); /* keep required alignment */ 2360 #endif 2361 2362 default_nsec_shm_paddr = virt_to_phys((void *)s); 2363 default_nsec_shm_size = e - s; 2364 2365 return TEE_SUCCESS; 2366 } 2367 early_init(teecore_init_pub_ram); 2368 #endif /*CFG_CORE_RESERVED_SHM*/ 2369 2370 void core_mmu_init_ta_ram(void) 2371 { 2372 vaddr_t s = 0; 2373 vaddr_t e = 0; 2374 paddr_t ps = 0; 2375 size_t size = 0; 2376 2377 /* 2378 * Get virtual addr/size of RAM where TA are loaded/executedNSec 2379 * shared mem allocated from teecore. 2380 */ 2381 if (IS_ENABLED(CFG_VIRTUALIZATION)) 2382 virt_get_ta_ram(&s, &e); 2383 else 2384 core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e); 2385 2386 ps = virt_to_phys((void *)s); 2387 size = e - s; 2388 2389 if (!ps || (ps & CORE_MMU_USER_CODE_MASK) || 2390 !size || (size & CORE_MMU_USER_CODE_MASK)) 2391 panic("invalid TA RAM"); 2392 2393 /* extra check: we could rely on core_mmu_get_mem_by_type() */ 2394 if (!tee_pbuf_is_sec(ps, size)) 2395 panic("TA RAM is not secure"); 2396 2397 if (!tee_mm_is_empty(&tee_mm_sec_ddr)) 2398 panic("TA RAM pool is not empty"); 2399 2400 /* remove previous config and init TA ddr memory pool */ 2401 tee_mm_final(&tee_mm_sec_ddr); 2402 tee_mm_init(&tee_mm_sec_ddr, ps, size, CORE_MMU_USER_CODE_SHIFT, 2403 TEE_MM_POOL_NO_FLAGS); 2404 } 2405