1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, 2022 Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <assert.h> 8 #include <config.h> 9 #include <kernel/boot.h> 10 #include <kernel/linker.h> 11 #include <kernel/panic.h> 12 #include <kernel/spinlock.h> 13 #include <kernel/tee_l2cc_mutex.h> 14 #include <kernel/tee_misc.h> 15 #include <kernel/tlb_helpers.h> 16 #include <kernel/user_mode_ctx.h> 17 #include <kernel/virtualization.h> 18 #include <mm/core_memprot.h> 19 #include <mm/core_mmu.h> 20 #include <mm/mobj.h> 21 #include <mm/pgt_cache.h> 22 #include <mm/tee_pager.h> 23 #include <mm/vm.h> 24 #include <platform_config.h> 25 #include <string.h> 26 #include <trace.h> 27 #include <util.h> 28 29 #ifndef DEBUG_XLAT_TABLE 30 #define DEBUG_XLAT_TABLE 0 31 #endif 32 33 #define SHM_VASPACE_SIZE (1024 * 1024 * 32) 34 35 /* 36 * These variables are initialized before .bss is cleared. To avoid 37 * resetting them when .bss is cleared we're storing them in .data instead, 38 * even if they initially are zero. 39 */ 40 41 #ifdef CFG_CORE_RESERVED_SHM 42 /* Default NSec shared memory allocated from NSec world */ 43 unsigned long default_nsec_shm_size __nex_bss; 44 unsigned long default_nsec_shm_paddr __nex_bss; 45 #endif 46 47 static struct tee_mmap_region static_memory_map[CFG_MMAP_REGIONS 48 #ifdef CFG_CORE_ASLR 49 + 1 50 #endif 51 + 1] __nex_bss; 52 53 /* Define the platform's memory layout. */ 54 struct memaccess_area { 55 paddr_t paddr; 56 size_t size; 57 }; 58 59 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s } 60 61 static struct memaccess_area secure_only[] __nex_data = { 62 #ifdef TRUSTED_SRAM_BASE 63 MEMACCESS_AREA(TRUSTED_SRAM_BASE, TRUSTED_SRAM_SIZE), 64 #endif 65 MEMACCESS_AREA(TRUSTED_DRAM_BASE, TRUSTED_DRAM_SIZE), 66 }; 67 68 static struct memaccess_area nsec_shared[] __nex_data = { 69 #ifdef CFG_CORE_RESERVED_SHM 70 MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE), 71 #endif 72 }; 73 74 #if defined(CFG_SECURE_DATA_PATH) 75 #ifdef CFG_TEE_SDP_MEM_BASE 76 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE); 77 #endif 78 #ifdef TEE_SDP_TEST_MEM_BASE 79 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE); 80 #endif 81 #endif 82 83 #ifdef CFG_CORE_RWDATA_NOEXEC 84 register_phys_mem_ul(MEM_AREA_TEE_RAM_RO, TEE_RAM_START, 85 VCORE_UNPG_RX_PA - TEE_RAM_START); 86 register_phys_mem_ul(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA, 87 VCORE_UNPG_RX_SZ_UNSAFE); 88 register_phys_mem_ul(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA, 89 VCORE_UNPG_RO_SZ_UNSAFE); 90 91 #ifdef CFG_VIRTUALIZATION 92 register_phys_mem_ul(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA, 93 VCORE_UNPG_RW_SZ_UNSAFE); 94 register_phys_mem_ul(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA, 95 VCORE_NEX_RW_SZ_UNSAFE); 96 #else 97 register_phys_mem_ul(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA, 98 VCORE_UNPG_RW_SZ_UNSAFE); 99 #endif 100 101 #ifdef CFG_WITH_PAGER 102 register_phys_mem_ul(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA, 103 VCORE_INIT_RX_SZ_UNSAFE); 104 register_phys_mem_ul(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA, 105 VCORE_INIT_RO_SZ_UNSAFE); 106 #endif /*CFG_WITH_PAGER*/ 107 #else /*!CFG_CORE_RWDATA_NOEXEC*/ 108 register_phys_mem(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE); 109 #endif /*!CFG_CORE_RWDATA_NOEXEC*/ 110 111 #ifdef CFG_VIRTUALIZATION 112 register_phys_mem(MEM_AREA_SEC_RAM_OVERALL, TRUSTED_DRAM_BASE, 113 TRUSTED_DRAM_SIZE); 114 #endif 115 116 #if defined(CFG_CORE_SANITIZE_KADDRESS) && defined(CFG_WITH_PAGER) 117 /* Asan ram is part of MEM_AREA_TEE_RAM_RW when pager is disabled */ 118 register_phys_mem_ul(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ); 119 #endif 120 121 #ifndef CFG_VIRTUALIZATION 122 /* Every guest will have own TA RAM if virtualization support is enabled */ 123 register_phys_mem(MEM_AREA_TA_RAM, TA_RAM_START, TA_RAM_SIZE); 124 #endif 125 #ifdef CFG_CORE_RESERVED_SHM 126 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE); 127 #endif 128 129 static unsigned int mmu_spinlock; 130 131 static uint32_t mmu_lock(void) 132 { 133 return cpu_spin_lock_xsave(&mmu_spinlock); 134 } 135 136 static void mmu_unlock(uint32_t exceptions) 137 { 138 cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions); 139 } 140 141 static struct tee_mmap_region *get_memory_map(void) 142 { 143 if (IS_ENABLED(CFG_VIRTUALIZATION)) { 144 struct tee_mmap_region *map = virt_get_memory_map(); 145 146 if (map) 147 return map; 148 } 149 150 return static_memory_map; 151 } 152 153 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen, 154 paddr_t pa, size_t size) 155 { 156 size_t n; 157 158 for (n = 0; n < alen; n++) 159 if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size)) 160 return true; 161 return false; 162 } 163 164 #define pbuf_intersects(a, pa, size) \ 165 _pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size)) 166 167 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen, 168 paddr_t pa, size_t size) 169 { 170 size_t n; 171 172 for (n = 0; n < alen; n++) 173 if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size)) 174 return true; 175 return false; 176 } 177 178 #define pbuf_is_inside(a, pa, size) \ 179 _pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size)) 180 181 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len) 182 { 183 paddr_t end_pa = 0; 184 185 if (!map) 186 return false; 187 188 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 189 return false; 190 191 return (pa >= map->pa && end_pa <= map->pa + map->size - 1); 192 } 193 194 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va) 195 { 196 if (!map) 197 return false; 198 return (va >= map->va && va <= (map->va + map->size - 1)); 199 } 200 201 /* check if target buffer fits in a core default map area */ 202 static bool pbuf_inside_map_area(unsigned long p, size_t l, 203 struct tee_mmap_region *map) 204 { 205 return core_is_buffer_inside(p, l, map->pa, map->size); 206 } 207 208 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type) 209 { 210 struct tee_mmap_region *map; 211 212 for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) 213 if (map->type == type) 214 return map; 215 return NULL; 216 } 217 218 static struct tee_mmap_region * 219 find_map_by_type_and_pa(enum teecore_memtypes type, paddr_t pa, size_t len) 220 { 221 struct tee_mmap_region *map; 222 223 for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) { 224 if (map->type != type) 225 continue; 226 if (pa_is_in_map(map, pa, len)) 227 return map; 228 } 229 return NULL; 230 } 231 232 static struct tee_mmap_region *find_map_by_va(void *va) 233 { 234 struct tee_mmap_region *map = get_memory_map(); 235 unsigned long a = (unsigned long)va; 236 237 while (!core_mmap_is_end_of_table(map)) { 238 if (a >= map->va && a <= (map->va - 1 + map->size)) 239 return map; 240 map++; 241 } 242 return NULL; 243 } 244 245 static struct tee_mmap_region *find_map_by_pa(unsigned long pa) 246 { 247 struct tee_mmap_region *map = get_memory_map(); 248 249 while (!core_mmap_is_end_of_table(map)) { 250 if (pa >= map->pa && pa <= (map->pa + map->size - 1)) 251 return map; 252 map++; 253 } 254 return NULL; 255 } 256 257 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH) 258 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len, 259 const struct core_mmu_phys_mem *start, 260 const struct core_mmu_phys_mem *end) 261 { 262 const struct core_mmu_phys_mem *mem; 263 264 for (mem = start; mem < end; mem++) { 265 if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size)) 266 return true; 267 } 268 269 return false; 270 } 271 #endif 272 273 #ifdef CFG_CORE_DYN_SHM 274 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems, 275 paddr_t pa, size_t size) 276 { 277 struct core_mmu_phys_mem *m = *mem; 278 size_t n = 0; 279 280 while (true) { 281 if (n >= *nelems) { 282 DMSG("No need to carve out %#" PRIxPA " size %#zx", 283 pa, size); 284 return; 285 } 286 if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size)) 287 break; 288 if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size)) 289 panic(); 290 n++; 291 } 292 293 if (pa == m[n].addr && size == m[n].size) { 294 /* Remove this entry */ 295 (*nelems)--; 296 memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n)); 297 m = nex_realloc(m, sizeof(*m) * *nelems); 298 if (!m) 299 panic(); 300 *mem = m; 301 } else if (pa == m[n].addr) { 302 m[n].addr += size; 303 m[n].size -= size; 304 } else if ((pa + size) == (m[n].addr + m[n].size)) { 305 m[n].size -= size; 306 } else { 307 /* Need to split the memory entry */ 308 m = nex_realloc(m, sizeof(*m) * (*nelems + 1)); 309 if (!m) 310 panic(); 311 *mem = m; 312 memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n)); 313 (*nelems)++; 314 m[n].size = pa - m[n].addr; 315 m[n + 1].size -= size + m[n].size; 316 m[n + 1].addr = pa + size; 317 } 318 } 319 320 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start, 321 size_t nelems, 322 struct tee_mmap_region *map) 323 { 324 size_t n; 325 326 for (n = 0; n < nelems; n++) { 327 if (!core_is_buffer_outside(start[n].addr, start[n].size, 328 map->pa, map->size)) { 329 EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ 330 ") overlaps map (type %d %#" PRIxPA ":%#zx)", 331 start[n].addr, start[n].size, 332 map->type, map->pa, map->size); 333 panic(); 334 } 335 } 336 } 337 338 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss; 339 static size_t discovered_nsec_ddr_nelems __nex_bss; 340 341 static int cmp_pmem_by_addr(const void *a, const void *b) 342 { 343 const struct core_mmu_phys_mem *pmem_a = a; 344 const struct core_mmu_phys_mem *pmem_b = b; 345 346 return CMP_TRILEAN(pmem_a->addr, pmem_b->addr); 347 } 348 349 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start, 350 size_t nelems) 351 { 352 struct core_mmu_phys_mem *m = start; 353 size_t num_elems = nelems; 354 struct tee_mmap_region *map = static_memory_map; 355 const struct core_mmu_phys_mem __maybe_unused *pmem; 356 357 assert(!discovered_nsec_ddr_start); 358 assert(m && num_elems); 359 360 qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr); 361 362 /* 363 * Non-secure shared memory and also secure data 364 * path memory are supposed to reside inside 365 * non-secure memory. Since NSEC_SHM and SDP_MEM 366 * are used for a specific purpose make holes for 367 * those memory in the normal non-secure memory. 368 * 369 * This has to be done since for instance QEMU 370 * isn't aware of which memory range in the 371 * non-secure memory is used for NSEC_SHM. 372 */ 373 374 #ifdef CFG_SECURE_DATA_PATH 375 for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++) 376 carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size); 377 #endif 378 379 carve_out_phys_mem(&m, &num_elems, TEE_RAM_START, TEE_RAM_PH_SIZE); 380 carve_out_phys_mem(&m, &num_elems, TA_RAM_START, TA_RAM_SIZE); 381 382 for (map = static_memory_map; !core_mmap_is_end_of_table(map); map++) { 383 switch (map->type) { 384 case MEM_AREA_NSEC_SHM: 385 carve_out_phys_mem(&m, &num_elems, map->pa, map->size); 386 break; 387 case MEM_AREA_EXT_DT: 388 case MEM_AREA_RES_VASPACE: 389 case MEM_AREA_SHM_VASPACE: 390 case MEM_AREA_TS_VASPACE: 391 case MEM_AREA_PAGER_VASPACE: 392 break; 393 default: 394 check_phys_mem_is_outside(m, num_elems, map); 395 } 396 } 397 398 discovered_nsec_ddr_start = m; 399 discovered_nsec_ddr_nelems = num_elems; 400 401 if (!core_mmu_check_end_pa(m[num_elems - 1].addr, 402 m[num_elems - 1].size)) 403 panic(); 404 } 405 406 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start, 407 const struct core_mmu_phys_mem **end) 408 { 409 if (!discovered_nsec_ddr_start) 410 return false; 411 412 *start = discovered_nsec_ddr_start; 413 *end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems; 414 415 return true; 416 } 417 418 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len) 419 { 420 const struct core_mmu_phys_mem *start; 421 const struct core_mmu_phys_mem *end; 422 423 if (!get_discovered_nsec_ddr(&start, &end)) 424 return false; 425 426 return pbuf_is_special_mem(pbuf, len, start, end); 427 } 428 429 bool core_mmu_nsec_ddr_is_defined(void) 430 { 431 const struct core_mmu_phys_mem *start; 432 const struct core_mmu_phys_mem *end; 433 434 if (!get_discovered_nsec_ddr(&start, &end)) 435 return false; 436 437 return start != end; 438 } 439 #else 440 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused) 441 { 442 return false; 443 } 444 #endif /*CFG_CORE_DYN_SHM*/ 445 446 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \ 447 EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \ 448 pa1, (uint64_t)pa1 + (sz1), pa2, (uint64_t)pa2 + (sz2)) 449 450 #ifdef CFG_SECURE_DATA_PATH 451 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len) 452 { 453 return pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin, 454 phys_sdp_mem_end); 455 } 456 457 struct mobj **core_sdp_mem_create_mobjs(void) 458 { 459 const struct core_mmu_phys_mem *mem; 460 struct mobj **mobj_base; 461 struct mobj **mobj; 462 int cnt = phys_sdp_mem_end - phys_sdp_mem_begin; 463 464 /* SDP mobjs table must end with a NULL entry */ 465 mobj_base = calloc(cnt + 1, sizeof(struct mobj *)); 466 if (!mobj_base) 467 panic("Out of memory"); 468 469 for (mem = phys_sdp_mem_begin, mobj = mobj_base; 470 mem < phys_sdp_mem_end; mem++, mobj++) { 471 *mobj = mobj_phys_alloc(mem->addr, mem->size, 472 TEE_MATTR_MEM_TYPE_CACHED, 473 CORE_MEM_SDP_MEM); 474 if (!*mobj) 475 panic("can't create SDP physical memory object"); 476 } 477 return mobj_base; 478 } 479 480 #else /* CFG_SECURE_DATA_PATH */ 481 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused) 482 { 483 return false; 484 } 485 486 #endif /* CFG_SECURE_DATA_PATH */ 487 488 /* Check special memories comply with registered memories */ 489 static void verify_special_mem_areas(struct tee_mmap_region *mem_map, 490 size_t len, 491 const struct core_mmu_phys_mem *start, 492 const struct core_mmu_phys_mem *end, 493 const char *area_name __maybe_unused) 494 { 495 const struct core_mmu_phys_mem *mem; 496 const struct core_mmu_phys_mem *mem2; 497 struct tee_mmap_region *mmap; 498 size_t n; 499 500 if (start == end) { 501 DMSG("No %s memory area defined", area_name); 502 return; 503 } 504 505 for (mem = start; mem < end; mem++) 506 DMSG("%s memory [%" PRIxPA " %" PRIx64 "]", 507 area_name, mem->addr, (uint64_t)mem->addr + mem->size); 508 509 /* Check memories do not intersect each other */ 510 for (mem = start; mem + 1 < end; mem++) { 511 for (mem2 = mem + 1; mem2 < end; mem2++) { 512 if (core_is_buffer_intersect(mem2->addr, mem2->size, 513 mem->addr, mem->size)) { 514 MSG_MEM_INSTERSECT(mem2->addr, mem2->size, 515 mem->addr, mem->size); 516 panic("Special memory intersection"); 517 } 518 } 519 } 520 521 /* 522 * Check memories do not intersect any mapped memory. 523 * This is called before reserved VA space is loaded in mem_map. 524 */ 525 for (mem = start; mem < end; mem++) { 526 for (mmap = mem_map, n = 0; n < len; mmap++, n++) { 527 if (core_is_buffer_intersect(mem->addr, mem->size, 528 mmap->pa, mmap->size)) { 529 MSG_MEM_INSTERSECT(mem->addr, mem->size, 530 mmap->pa, mmap->size); 531 panic("Special memory intersection"); 532 } 533 } 534 } 535 } 536 537 static void add_phys_mem(struct tee_mmap_region *memory_map, size_t num_elems, 538 const struct core_mmu_phys_mem *mem, size_t *last) 539 { 540 size_t n = 0; 541 paddr_t pa; 542 paddr_size_t size; 543 544 /* 545 * If some ranges of memory of the same type do overlap 546 * each others they are coalesced into one entry. To help this 547 * added entries are sorted by increasing physical. 548 * 549 * Note that it's valid to have the same physical memory as several 550 * different memory types, for instance the same device memory 551 * mapped as both secure and non-secure. This will probably not 552 * happen often in practice. 553 */ 554 DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ, 555 mem->name, teecore_memtype_name(mem->type), mem->addr, mem->size); 556 while (true) { 557 if (n >= (num_elems - 1)) { 558 EMSG("Out of entries (%zu) in memory_map", num_elems); 559 panic(); 560 } 561 if (n == *last) 562 break; 563 pa = memory_map[n].pa; 564 size = memory_map[n].size; 565 if (mem->type == memory_map[n].type && 566 ((pa <= (mem->addr + (mem->size - 1))) && 567 (mem->addr <= (pa + (size - 1))))) { 568 DMSG("Physical mem map overlaps 0x%" PRIxPA, mem->addr); 569 memory_map[n].pa = MIN(pa, mem->addr); 570 memory_map[n].size = MAX(size, mem->size) + 571 (pa - memory_map[n].pa); 572 return; 573 } 574 if (mem->type < memory_map[n].type || 575 (mem->type == memory_map[n].type && mem->addr < pa)) 576 break; /* found the spot where to insert this memory */ 577 n++; 578 } 579 580 memmove(memory_map + n + 1, memory_map + n, 581 sizeof(struct tee_mmap_region) * (*last - n)); 582 (*last)++; 583 memset(memory_map + n, 0, sizeof(memory_map[0])); 584 memory_map[n].type = mem->type; 585 memory_map[n].pa = mem->addr; 586 memory_map[n].size = mem->size; 587 } 588 589 static void add_va_space(struct tee_mmap_region *memory_map, size_t num_elems, 590 enum teecore_memtypes type, size_t size, size_t *last) 591 { 592 size_t n = 0; 593 594 DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size); 595 while (true) { 596 if (n >= (num_elems - 1)) { 597 EMSG("Out of entries (%zu) in memory_map", num_elems); 598 panic(); 599 } 600 if (n == *last) 601 break; 602 if (type < memory_map[n].type) 603 break; 604 n++; 605 } 606 607 memmove(memory_map + n + 1, memory_map + n, 608 sizeof(struct tee_mmap_region) * (*last - n)); 609 (*last)++; 610 memset(memory_map + n, 0, sizeof(memory_map[0])); 611 memory_map[n].type = type; 612 memory_map[n].size = size; 613 } 614 615 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t) 616 { 617 const uint32_t attr = TEE_MATTR_VALID_BLOCK; 618 const uint32_t cached = TEE_MATTR_MEM_TYPE_CACHED << 619 TEE_MATTR_MEM_TYPE_SHIFT; 620 const uint32_t noncache = TEE_MATTR_MEM_TYPE_DEV << 621 TEE_MATTR_MEM_TYPE_SHIFT; 622 623 switch (t) { 624 case MEM_AREA_TEE_RAM: 625 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | cached; 626 case MEM_AREA_TEE_RAM_RX: 627 case MEM_AREA_INIT_RAM_RX: 628 case MEM_AREA_IDENTITY_MAP_RX: 629 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | cached; 630 case MEM_AREA_TEE_RAM_RO: 631 case MEM_AREA_INIT_RAM_RO: 632 return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached; 633 case MEM_AREA_TEE_RAM_RW: 634 case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */ 635 case MEM_AREA_NEX_RAM_RW: 636 case MEM_AREA_TEE_ASAN: 637 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; 638 case MEM_AREA_TEE_COHERENT: 639 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache; 640 case MEM_AREA_TA_RAM: 641 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; 642 case MEM_AREA_NSEC_SHM: 643 return attr | TEE_MATTR_PRW | cached; 644 case MEM_AREA_EXT_DT: 645 case MEM_AREA_IO_NSEC: 646 return attr | TEE_MATTR_PRW | noncache; 647 case MEM_AREA_IO_SEC: 648 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache; 649 case MEM_AREA_RAM_NSEC: 650 return attr | TEE_MATTR_PRW | cached; 651 case MEM_AREA_RAM_SEC: 652 case MEM_AREA_SEC_RAM_OVERALL: 653 return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; 654 case MEM_AREA_RES_VASPACE: 655 case MEM_AREA_SHM_VASPACE: 656 return 0; 657 case MEM_AREA_PAGER_VASPACE: 658 return TEE_MATTR_SECURE; 659 default: 660 panic("invalid type"); 661 } 662 } 663 664 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm) 665 { 666 switch (mm->type) { 667 case MEM_AREA_TEE_RAM: 668 case MEM_AREA_TEE_RAM_RX: 669 case MEM_AREA_TEE_RAM_RO: 670 case MEM_AREA_TEE_RAM_RW: 671 case MEM_AREA_INIT_RAM_RX: 672 case MEM_AREA_INIT_RAM_RO: 673 case MEM_AREA_NEX_RAM_RW: 674 case MEM_AREA_NEX_RAM_RO: 675 case MEM_AREA_TEE_ASAN: 676 return true; 677 default: 678 return false; 679 } 680 } 681 682 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm) 683 { 684 return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE); 685 } 686 687 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm) 688 { 689 return mm->region_size == CORE_MMU_PGDIR_SIZE; 690 } 691 692 static int cmp_mmap_by_lower_va(const void *a, const void *b) 693 { 694 const struct tee_mmap_region *mm_a = a; 695 const struct tee_mmap_region *mm_b = b; 696 697 return CMP_TRILEAN(mm_a->va, mm_b->va); 698 } 699 700 static void dump_mmap_table(struct tee_mmap_region *memory_map) 701 { 702 struct tee_mmap_region *map; 703 704 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 705 vaddr_t __maybe_unused vstart; 706 707 vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1)); 708 DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA 709 " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)", 710 teecore_memtype_name(map->type), vstart, 711 vstart + map->size - 1, map->pa, 712 (paddr_t)(map->pa + map->size - 1), map->size, 713 map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir"); 714 } 715 } 716 717 #if DEBUG_XLAT_TABLE 718 719 static void dump_xlat_table(vaddr_t va, unsigned int level) 720 { 721 struct core_mmu_table_info tbl_info; 722 unsigned int idx = 0; 723 paddr_t pa; 724 uint32_t attr; 725 726 core_mmu_find_table(NULL, va, level, &tbl_info); 727 va = tbl_info.va_base; 728 for (idx = 0; idx < tbl_info.num_entries; idx++) { 729 core_mmu_get_entry(&tbl_info, idx, &pa, &attr); 730 if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) { 731 const char *security_bit = ""; 732 733 if (core_mmu_entry_have_security_bit(attr)) { 734 if (attr & TEE_MATTR_SECURE) 735 security_bit = "S"; 736 else 737 security_bit = "NS"; 738 } 739 740 if (attr & TEE_MATTR_TABLE) { 741 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 742 " TBL:0x%010" PRIxPA " %s", 743 level * 2, "", level, va, pa, 744 security_bit); 745 dump_xlat_table(va, level + 1); 746 } else if (attr) { 747 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 748 " PA:0x%010" PRIxPA " %s-%s-%s-%s", 749 level * 2, "", level, va, pa, 750 mattr_is_cached(attr) ? "MEM" : 751 "DEV", 752 attr & TEE_MATTR_PW ? "RW" : "RO", 753 attr & TEE_MATTR_PX ? "X " : "XN", 754 security_bit); 755 } else { 756 DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA 757 " INVALID\n", 758 level * 2, "", level, va); 759 } 760 } 761 va += BIT64(tbl_info.shift); 762 } 763 } 764 765 #else 766 767 static void dump_xlat_table(vaddr_t va __unused, int level __unused) 768 { 769 } 770 771 #endif 772 773 /* 774 * Reserves virtual memory space for pager usage. 775 * 776 * From the start of the first memory used by the link script + 777 * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty 778 * mapping for pager usage. This adds translation tables as needed for the 779 * pager to operate. 780 */ 781 static void add_pager_vaspace(struct tee_mmap_region *mmap, size_t num_elems, 782 size_t *last) 783 { 784 paddr_t begin = 0; 785 paddr_t end = 0; 786 size_t size = 0; 787 size_t pos = 0; 788 size_t n = 0; 789 790 if (*last >= (num_elems - 1)) { 791 EMSG("Out of entries (%zu) in memory map", num_elems); 792 panic(); 793 } 794 795 for (n = 0; !core_mmap_is_end_of_table(mmap + n); n++) { 796 if (map_is_tee_ram(mmap + n)) { 797 if (!begin) 798 begin = mmap[n].pa; 799 pos = n + 1; 800 } 801 } 802 803 end = mmap[pos - 1].pa + mmap[pos - 1].size; 804 size = TEE_RAM_VA_SIZE - (end - begin); 805 if (!size) 806 return; 807 808 assert(pos <= *last); 809 memmove(mmap + pos + 1, mmap + pos, 810 sizeof(struct tee_mmap_region) * (*last - pos)); 811 (*last)++; 812 memset(mmap + pos, 0, sizeof(mmap[0])); 813 mmap[pos].type = MEM_AREA_PAGER_VASPACE; 814 mmap[pos].va = 0; 815 mmap[pos].size = size; 816 mmap[pos].region_size = SMALL_PAGE_SIZE; 817 mmap[pos].attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE); 818 } 819 820 static void check_sec_nsec_mem_config(void) 821 { 822 size_t n = 0; 823 824 for (n = 0; n < ARRAY_SIZE(secure_only); n++) { 825 if (pbuf_intersects(nsec_shared, secure_only[n].paddr, 826 secure_only[n].size)) 827 panic("Invalid memory access config: sec/nsec"); 828 } 829 } 830 831 static size_t collect_mem_ranges(struct tee_mmap_region *memory_map, 832 size_t num_elems) 833 { 834 const struct core_mmu_phys_mem *mem = NULL; 835 size_t last = 0; 836 837 for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) { 838 struct core_mmu_phys_mem m = *mem; 839 840 /* Discard null size entries */ 841 if (!m.size) 842 continue; 843 844 /* Only unmapped virtual range may have a null phys addr */ 845 assert(m.addr || !core_mmu_type_to_attr(m.type)); 846 847 add_phys_mem(memory_map, num_elems, &m, &last); 848 } 849 850 if (IS_ENABLED(CFG_SECURE_DATA_PATH)) 851 verify_special_mem_areas(memory_map, num_elems, 852 phys_sdp_mem_begin, 853 phys_sdp_mem_end, "SDP"); 854 855 add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE, 856 CFG_RESERVED_VASPACE_SIZE, &last); 857 858 add_va_space(memory_map, num_elems, MEM_AREA_SHM_VASPACE, 859 SHM_VASPACE_SIZE, &last); 860 861 memory_map[last].type = MEM_AREA_END; 862 863 return last; 864 } 865 866 static void assign_mem_granularity(struct tee_mmap_region *memory_map) 867 { 868 struct tee_mmap_region *map = NULL; 869 870 /* 871 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses 872 * SMALL_PAGE_SIZE. 873 */ 874 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 875 paddr_t mask = map->pa | map->size; 876 877 if (!(mask & CORE_MMU_PGDIR_MASK)) 878 map->region_size = CORE_MMU_PGDIR_SIZE; 879 else if (!(mask & SMALL_PAGE_MASK)) 880 map->region_size = SMALL_PAGE_SIZE; 881 else 882 panic("Impossible memory alignment"); 883 884 if (map_is_tee_ram(map)) 885 map->region_size = SMALL_PAGE_SIZE; 886 } 887 } 888 889 static bool assign_mem_va(vaddr_t tee_ram_va, 890 struct tee_mmap_region *memory_map) 891 { 892 struct tee_mmap_region *map = NULL; 893 vaddr_t va = tee_ram_va; 894 bool va_is_secure = true; 895 896 /* 897 * Check that we're not overlapping with the user VA range. 898 */ 899 if (IS_ENABLED(CFG_WITH_LPAE)) { 900 /* 901 * User VA range is supposed to be defined after these 902 * mappings have been established. 903 */ 904 assert(!core_mmu_user_va_range_is_defined()); 905 } else { 906 vaddr_t user_va_base = 0; 907 size_t user_va_size = 0; 908 909 assert(core_mmu_user_va_range_is_defined()); 910 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 911 if (tee_ram_va < (user_va_base + user_va_size)) 912 return false; 913 } 914 915 /* Clear eventual previous assignments */ 916 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) 917 map->va = 0; 918 919 /* 920 * TEE RAM regions are always aligned with region_size. 921 * 922 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here 923 * since it handles virtual memory which covers the part of the ELF 924 * that cannot fit directly into memory. 925 */ 926 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 927 if (map_is_tee_ram(map) || 928 map->type == MEM_AREA_PAGER_VASPACE) { 929 assert(!(va & (map->region_size - 1))); 930 assert(!(map->size & (map->region_size - 1))); 931 map->va = va; 932 if (ADD_OVERFLOW(va, map->size, &va)) 933 return false; 934 if (va >= BIT64(core_mmu_get_va_width())) 935 return false; 936 } 937 } 938 939 if (core_mmu_place_tee_ram_at_top(tee_ram_va)) { 940 /* 941 * Map non-tee ram regions at addresses lower than the tee 942 * ram region. 943 */ 944 va = tee_ram_va; 945 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 946 map->attr = core_mmu_type_to_attr(map->type); 947 if (map->va) 948 continue; 949 950 if (!IS_ENABLED(CFG_WITH_LPAE) && 951 va_is_secure != map_is_secure(map)) { 952 va_is_secure = !va_is_secure; 953 va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE); 954 } 955 956 if (SUB_OVERFLOW(va, map->size, &va)) 957 return false; 958 va = ROUNDDOWN(va, map->region_size); 959 /* 960 * Make sure that va is aligned with pa for 961 * efficient pgdir mapping. Basically pa & 962 * pgdir_mask should be == va & pgdir_mask 963 */ 964 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 965 if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va)) 966 return false; 967 va += (map->pa - va) & CORE_MMU_PGDIR_MASK; 968 } 969 map->va = va; 970 } 971 } else { 972 /* 973 * Map non-tee ram regions at addresses higher than the tee 974 * ram region. 975 */ 976 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) { 977 map->attr = core_mmu_type_to_attr(map->type); 978 if (map->va) 979 continue; 980 981 if (!IS_ENABLED(CFG_WITH_LPAE) && 982 va_is_secure != map_is_secure(map)) { 983 va_is_secure = !va_is_secure; 984 if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, 985 &va)) 986 return false; 987 } 988 989 if (ROUNDUP_OVERFLOW(va, map->region_size, &va)) 990 return false; 991 /* 992 * Make sure that va is aligned with pa for 993 * efficient pgdir mapping. Basically pa & 994 * pgdir_mask should be == va & pgdir_mask 995 */ 996 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { 997 vaddr_t offs = (map->pa - va) & 998 CORE_MMU_PGDIR_MASK; 999 1000 if (ADD_OVERFLOW(va, offs, &va)) 1001 return false; 1002 } 1003 1004 map->va = va; 1005 if (ADD_OVERFLOW(va, map->size, &va)) 1006 return false; 1007 if (va >= BIT64(core_mmu_get_va_width())) 1008 return false; 1009 } 1010 } 1011 1012 return true; 1013 } 1014 1015 static int cmp_init_mem_map(const void *a, const void *b) 1016 { 1017 const struct tee_mmap_region *mm_a = a; 1018 const struct tee_mmap_region *mm_b = b; 1019 int rc = 0; 1020 1021 rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size); 1022 if (!rc) 1023 rc = CMP_TRILEAN(mm_a->pa, mm_b->pa); 1024 /* 1025 * 32bit MMU descriptors cannot mix secure and non-secure mapping in 1026 * the same level2 table. Hence sort secure mapping from non-secure 1027 * mapping. 1028 */ 1029 if (!rc && !IS_ENABLED(CFG_WITH_LPAE)) 1030 rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b)); 1031 1032 return rc; 1033 } 1034 1035 static bool mem_map_add_id_map(struct tee_mmap_region *memory_map, 1036 size_t num_elems, size_t *last, 1037 vaddr_t id_map_start, vaddr_t id_map_end) 1038 { 1039 struct tee_mmap_region *map = NULL; 1040 vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE); 1041 vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE); 1042 size_t len = end - start; 1043 1044 if (*last >= num_elems - 1) { 1045 EMSG("Out of entries (%zu) in memory map", num_elems); 1046 panic(); 1047 } 1048 1049 for (map = memory_map; !core_mmap_is_end_of_table(map); map++) 1050 if (core_is_buffer_intersect(map->va, map->size, start, len)) 1051 return false; 1052 1053 *map = (struct tee_mmap_region){ 1054 .type = MEM_AREA_IDENTITY_MAP_RX, 1055 /* 1056 * Could use CORE_MMU_PGDIR_SIZE to potentially save a 1057 * translation table, at the increased risk of clashes with 1058 * the rest of the memory map. 1059 */ 1060 .region_size = SMALL_PAGE_SIZE, 1061 .pa = start, 1062 .va = start, 1063 .size = len, 1064 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1065 }; 1066 1067 (*last)++; 1068 1069 return true; 1070 } 1071 1072 static unsigned long init_mem_map(struct tee_mmap_region *memory_map, 1073 size_t num_elems, unsigned long seed) 1074 { 1075 /* 1076 * @id_map_start and @id_map_end describes a physical memory range 1077 * that must be mapped Read-Only eXecutable at identical virtual 1078 * addresses. 1079 */ 1080 vaddr_t id_map_start = (vaddr_t)__identity_map_init_start; 1081 vaddr_t id_map_end = (vaddr_t)__identity_map_init_end; 1082 unsigned long offs = 0; 1083 size_t last = 0; 1084 1085 last = collect_mem_ranges(memory_map, num_elems); 1086 assign_mem_granularity(memory_map); 1087 1088 /* 1089 * To ease mapping and lower use of xlat tables, sort mapping 1090 * description moving small-page regions after the pgdir regions. 1091 */ 1092 qsort(memory_map, last, sizeof(struct tee_mmap_region), 1093 cmp_init_mem_map); 1094 1095 add_pager_vaspace(memory_map, num_elems, &last); 1096 if (IS_ENABLED(CFG_CORE_ASLR) && seed) { 1097 vaddr_t base_addr = TEE_RAM_START + seed; 1098 const unsigned int va_width = core_mmu_get_va_width(); 1099 const vaddr_t va_mask = GENMASK_64(va_width - 1, 1100 SMALL_PAGE_SHIFT); 1101 vaddr_t ba = base_addr; 1102 size_t n = 0; 1103 1104 for (n = 0; n < 3; n++) { 1105 if (n) 1106 ba = base_addr ^ BIT64(va_width - n); 1107 ba &= va_mask; 1108 if (assign_mem_va(ba, memory_map) && 1109 mem_map_add_id_map(memory_map, num_elems, &last, 1110 id_map_start, id_map_end)) { 1111 offs = ba - TEE_RAM_START; 1112 DMSG("Mapping core at %#"PRIxVA" offs %#lx", 1113 ba, offs); 1114 goto out; 1115 } else { 1116 DMSG("Failed to map core at %#"PRIxVA, ba); 1117 } 1118 } 1119 EMSG("Failed to map core with seed %#lx", seed); 1120 } 1121 1122 if (!assign_mem_va(TEE_RAM_START, memory_map)) 1123 panic(); 1124 1125 out: 1126 qsort(memory_map, last, sizeof(struct tee_mmap_region), 1127 cmp_mmap_by_lower_va); 1128 1129 dump_mmap_table(memory_map); 1130 1131 return offs; 1132 } 1133 1134 static void check_mem_map(struct tee_mmap_region *map) 1135 { 1136 struct tee_mmap_region *m = NULL; 1137 1138 for (m = map; !core_mmap_is_end_of_table(m); m++) { 1139 switch (m->type) { 1140 case MEM_AREA_TEE_RAM: 1141 case MEM_AREA_TEE_RAM_RX: 1142 case MEM_AREA_TEE_RAM_RO: 1143 case MEM_AREA_TEE_RAM_RW: 1144 case MEM_AREA_INIT_RAM_RX: 1145 case MEM_AREA_INIT_RAM_RO: 1146 case MEM_AREA_NEX_RAM_RW: 1147 case MEM_AREA_NEX_RAM_RO: 1148 case MEM_AREA_IDENTITY_MAP_RX: 1149 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1150 panic("TEE_RAM can't fit in secure_only"); 1151 break; 1152 case MEM_AREA_TA_RAM: 1153 if (!pbuf_is_inside(secure_only, m->pa, m->size)) 1154 panic("TA_RAM can't fit in secure_only"); 1155 break; 1156 case MEM_AREA_NSEC_SHM: 1157 if (!pbuf_is_inside(nsec_shared, m->pa, m->size)) 1158 panic("NS_SHM can't fit in nsec_shared"); 1159 break; 1160 case MEM_AREA_SEC_RAM_OVERALL: 1161 case MEM_AREA_TEE_COHERENT: 1162 case MEM_AREA_TEE_ASAN: 1163 case MEM_AREA_IO_SEC: 1164 case MEM_AREA_IO_NSEC: 1165 case MEM_AREA_EXT_DT: 1166 case MEM_AREA_RAM_SEC: 1167 case MEM_AREA_RAM_NSEC: 1168 case MEM_AREA_RES_VASPACE: 1169 case MEM_AREA_SHM_VASPACE: 1170 case MEM_AREA_PAGER_VASPACE: 1171 break; 1172 default: 1173 EMSG("Uhandled memtype %d", m->type); 1174 panic(); 1175 } 1176 } 1177 } 1178 1179 static struct tee_mmap_region *get_tmp_mmap(void) 1180 { 1181 struct tee_mmap_region *tmp_mmap = (void *)__heap1_start; 1182 1183 #ifdef CFG_WITH_PAGER 1184 if (__heap1_end - __heap1_start < (ptrdiff_t)sizeof(static_memory_map)) 1185 tmp_mmap = (void *)__heap2_start; 1186 #endif 1187 1188 memset(tmp_mmap, 0, sizeof(static_memory_map)); 1189 1190 return tmp_mmap; 1191 } 1192 1193 /* 1194 * core_init_mmu_map() - init tee core default memory mapping 1195 * 1196 * This routine sets the static default TEE core mapping. If @seed is > 0 1197 * and configured with CFG_CORE_ASLR it will map tee core at a location 1198 * based on the seed and return the offset from the link address. 1199 * 1200 * If an error happened: core_init_mmu_map is expected to panic. 1201 * 1202 * Note: this function is weak just to make it possible to exclude it from 1203 * the unpaged area. 1204 */ 1205 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg) 1206 { 1207 #ifndef CFG_VIRTUALIZATION 1208 vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE); 1209 #else 1210 vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start, 1211 SMALL_PAGE_SIZE); 1212 #endif 1213 vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start; 1214 struct tee_mmap_region *tmp_mmap = get_tmp_mmap(); 1215 unsigned long offs = 0; 1216 1217 check_sec_nsec_mem_config(); 1218 1219 /* 1220 * Add a entry covering the translation tables which will be 1221 * involved in some virt_to_phys() and phys_to_virt() conversions. 1222 */ 1223 static_memory_map[0] = (struct tee_mmap_region){ 1224 .type = MEM_AREA_TEE_RAM, 1225 .region_size = SMALL_PAGE_SIZE, 1226 .pa = start, 1227 .va = start, 1228 .size = len, 1229 .attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX), 1230 }; 1231 1232 COMPILE_TIME_ASSERT(CFG_MMAP_REGIONS >= 13); 1233 offs = init_mem_map(tmp_mmap, ARRAY_SIZE(static_memory_map), seed); 1234 1235 check_mem_map(tmp_mmap); 1236 core_init_mmu(tmp_mmap); 1237 dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL); 1238 core_init_mmu_regs(cfg); 1239 cfg->load_offset = offs; 1240 memcpy(static_memory_map, tmp_mmap, sizeof(static_memory_map)); 1241 } 1242 1243 bool core_mmu_mattr_is_ok(uint32_t mattr) 1244 { 1245 /* 1246 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and 1247 * core_mmu_v7.c:mattr_to_texcb 1248 */ 1249 1250 switch ((mattr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) { 1251 case TEE_MATTR_MEM_TYPE_DEV: 1252 case TEE_MATTR_MEM_TYPE_CACHED: 1253 return true; 1254 default: 1255 return false; 1256 } 1257 } 1258 1259 /* 1260 * test attributes of target physical buffer 1261 * 1262 * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT). 1263 * 1264 */ 1265 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len) 1266 { 1267 struct tee_mmap_region *map; 1268 1269 /* Empty buffers complies with anything */ 1270 if (len == 0) 1271 return true; 1272 1273 switch (attr) { 1274 case CORE_MEM_SEC: 1275 return pbuf_is_inside(secure_only, pbuf, len); 1276 case CORE_MEM_NON_SEC: 1277 return pbuf_is_inside(nsec_shared, pbuf, len) || 1278 pbuf_is_nsec_ddr(pbuf, len); 1279 case CORE_MEM_TEE_RAM: 1280 return core_is_buffer_inside(pbuf, len, TEE_RAM_START, 1281 TEE_RAM_PH_SIZE); 1282 case CORE_MEM_TA_RAM: 1283 return core_is_buffer_inside(pbuf, len, TA_RAM_START, 1284 TA_RAM_SIZE); 1285 #ifdef CFG_CORE_RESERVED_SHM 1286 case CORE_MEM_NSEC_SHM: 1287 return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START, 1288 TEE_SHMEM_SIZE); 1289 #endif 1290 case CORE_MEM_SDP_MEM: 1291 return pbuf_is_sdp_mem(pbuf, len); 1292 case CORE_MEM_CACHED: 1293 map = find_map_by_pa(pbuf); 1294 if (!map || !pbuf_inside_map_area(pbuf, len, map)) 1295 return false; 1296 return mattr_is_cached(map->attr); 1297 default: 1298 return false; 1299 } 1300 } 1301 1302 /* test attributes of target virtual buffer (in core mapping) */ 1303 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len) 1304 { 1305 paddr_t p; 1306 1307 /* Empty buffers complies with anything */ 1308 if (len == 0) 1309 return true; 1310 1311 p = virt_to_phys((void *)vbuf); 1312 if (!p) 1313 return false; 1314 1315 return core_pbuf_is(attr, p, len); 1316 } 1317 1318 /* core_va2pa - teecore exported service */ 1319 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa) 1320 { 1321 struct tee_mmap_region *map; 1322 1323 map = find_map_by_va(va); 1324 if (!va_is_in_map(map, (vaddr_t)va)) 1325 return -1; 1326 1327 /* 1328 * We can calculate PA for static map. Virtual address ranges 1329 * reserved to core dynamic mapping return a 'match' (return 0;) 1330 * together with an invalid null physical address. 1331 */ 1332 if (map->pa) 1333 *pa = map->pa + (vaddr_t)va - map->va; 1334 else 1335 *pa = 0; 1336 1337 return 0; 1338 } 1339 1340 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len) 1341 { 1342 if (!pa_is_in_map(map, pa, len)) 1343 return NULL; 1344 1345 return (void *)(vaddr_t)(map->va + pa - map->pa); 1346 } 1347 1348 /* 1349 * teecore gets some memory area definitions 1350 */ 1351 void core_mmu_get_mem_by_type(unsigned int type, vaddr_t *s, vaddr_t *e) 1352 { 1353 struct tee_mmap_region *map = find_map_by_type(type); 1354 1355 if (map) { 1356 *s = map->va; 1357 *e = map->va + map->size; 1358 } else { 1359 *s = 0; 1360 *e = 0; 1361 } 1362 } 1363 1364 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa) 1365 { 1366 struct tee_mmap_region *map = find_map_by_pa(pa); 1367 1368 if (!map) 1369 return MEM_AREA_MAXTYPE; 1370 return map->type; 1371 } 1372 1373 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1374 paddr_t pa, uint32_t attr) 1375 { 1376 assert(idx < tbl_info->num_entries); 1377 core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level, 1378 idx, pa, attr); 1379 } 1380 1381 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned int idx, 1382 paddr_t *pa, uint32_t *attr) 1383 { 1384 assert(idx < tbl_info->num_entries); 1385 core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level, 1386 idx, pa, attr); 1387 } 1388 1389 static void clear_region(struct core_mmu_table_info *tbl_info, 1390 struct tee_mmap_region *region) 1391 { 1392 unsigned int end = 0; 1393 unsigned int idx = 0; 1394 1395 /* va, len and pa should be block aligned */ 1396 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1397 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1398 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1399 1400 idx = core_mmu_va2idx(tbl_info, region->va); 1401 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1402 1403 while (idx < end) { 1404 core_mmu_set_entry(tbl_info, idx, 0, 0); 1405 idx++; 1406 } 1407 } 1408 1409 static void set_region(struct core_mmu_table_info *tbl_info, 1410 struct tee_mmap_region *region) 1411 { 1412 unsigned int end; 1413 unsigned int idx; 1414 paddr_t pa; 1415 1416 /* va, len and pa should be block aligned */ 1417 assert(!core_mmu_get_block_offset(tbl_info, region->va)); 1418 assert(!core_mmu_get_block_offset(tbl_info, region->size)); 1419 assert(!core_mmu_get_block_offset(tbl_info, region->pa)); 1420 1421 idx = core_mmu_va2idx(tbl_info, region->va); 1422 end = core_mmu_va2idx(tbl_info, region->va + region->size); 1423 pa = region->pa; 1424 1425 while (idx < end) { 1426 core_mmu_set_entry(tbl_info, idx, pa, region->attr); 1427 idx++; 1428 pa += BIT64(tbl_info->shift); 1429 } 1430 } 1431 1432 static void set_pg_region(struct core_mmu_table_info *dir_info, 1433 struct vm_region *region, struct pgt **pgt, 1434 struct core_mmu_table_info *pg_info) 1435 { 1436 struct tee_mmap_region r = { 1437 .va = region->va, 1438 .size = region->size, 1439 .attr = region->attr, 1440 }; 1441 vaddr_t end = r.va + r.size; 1442 uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE; 1443 1444 while (r.va < end) { 1445 if (!pg_info->table || 1446 r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) { 1447 /* 1448 * We're assigning a new translation table. 1449 */ 1450 unsigned int idx; 1451 1452 /* Virtual addresses must grow */ 1453 assert(r.va > pg_info->va_base); 1454 1455 idx = core_mmu_va2idx(dir_info, r.va); 1456 pg_info->va_base = core_mmu_idx2va(dir_info, idx); 1457 1458 #ifdef CFG_PAGED_USER_TA 1459 /* 1460 * Advance pgt to va_base, note that we may need to 1461 * skip multiple page tables if there are large 1462 * holes in the vm map. 1463 */ 1464 while ((*pgt)->vabase < pg_info->va_base) { 1465 *pgt = SLIST_NEXT(*pgt, link); 1466 /* We should have allocated enough */ 1467 assert(*pgt); 1468 } 1469 assert((*pgt)->vabase == pg_info->va_base); 1470 pg_info->table = (*pgt)->tbl; 1471 #else 1472 assert(*pgt); /* We should have allocated enough */ 1473 pg_info->table = (*pgt)->tbl; 1474 *pgt = SLIST_NEXT(*pgt, link); 1475 #endif 1476 1477 core_mmu_set_entry(dir_info, idx, 1478 virt_to_phys(pg_info->table), 1479 pgt_attr); 1480 } 1481 1482 r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base), 1483 end - r.va); 1484 1485 if (!mobj_is_paged(region->mobj)) { 1486 size_t granule = BIT(pg_info->shift); 1487 size_t offset = r.va - region->va + region->offset; 1488 1489 r.size = MIN(r.size, 1490 mobj_get_phys_granule(region->mobj)); 1491 r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE); 1492 1493 if (mobj_get_pa(region->mobj, offset, granule, 1494 &r.pa) != TEE_SUCCESS) 1495 panic("Failed to get PA of unpaged mobj"); 1496 set_region(pg_info, &r); 1497 } 1498 r.va += r.size; 1499 } 1500 } 1501 1502 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr, 1503 size_t size_left, paddr_t block_size, 1504 struct tee_mmap_region *mm __maybe_unused) 1505 { 1506 /* VA and PA are aligned to block size at current level */ 1507 if ((vaddr | paddr) & (block_size - 1)) 1508 return false; 1509 1510 /* Remainder fits into block at current level */ 1511 if (size_left < block_size) 1512 return false; 1513 1514 #ifdef CFG_WITH_PAGER 1515 /* 1516 * If pager is enabled, we need to map tee ram 1517 * regions with small pages only 1518 */ 1519 if (map_is_tee_ram(mm) && block_size != SMALL_PAGE_SIZE) 1520 return false; 1521 #endif 1522 1523 return true; 1524 } 1525 1526 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm) 1527 { 1528 struct core_mmu_table_info tbl_info; 1529 unsigned int idx; 1530 vaddr_t vaddr = mm->va; 1531 paddr_t paddr = mm->pa; 1532 ssize_t size_left = mm->size; 1533 unsigned int level; 1534 bool table_found; 1535 uint32_t old_attr; 1536 1537 assert(!((vaddr | paddr) & SMALL_PAGE_MASK)); 1538 1539 while (size_left > 0) { 1540 level = CORE_MMU_BASE_TABLE_LEVEL; 1541 1542 while (true) { 1543 paddr_t block_size = 0; 1544 1545 assert(level <= CORE_MMU_PGDIR_LEVEL); 1546 1547 table_found = core_mmu_find_table(prtn, vaddr, level, 1548 &tbl_info); 1549 if (!table_found) 1550 panic("can't find table for mapping"); 1551 1552 block_size = BIT64(tbl_info.shift); 1553 1554 idx = core_mmu_va2idx(&tbl_info, vaddr); 1555 if (!can_map_at_level(paddr, vaddr, size_left, 1556 block_size, mm)) { 1557 bool secure = mm->attr & TEE_MATTR_SECURE; 1558 1559 /* 1560 * This part of the region can't be mapped at 1561 * this level. Need to go deeper. 1562 */ 1563 if (!core_mmu_entry_to_finer_grained(&tbl_info, 1564 idx, 1565 secure)) 1566 panic("Can't divide MMU entry"); 1567 level++; 1568 continue; 1569 } 1570 1571 /* We can map part of the region at current level */ 1572 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1573 if (old_attr) 1574 panic("Page is already mapped"); 1575 1576 core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr); 1577 paddr += block_size; 1578 vaddr += block_size; 1579 size_left -= block_size; 1580 1581 break; 1582 } 1583 } 1584 } 1585 1586 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, 1587 enum teecore_memtypes memtype) 1588 { 1589 TEE_Result ret; 1590 struct core_mmu_table_info tbl_info; 1591 struct tee_mmap_region *mm; 1592 unsigned int idx; 1593 uint32_t old_attr; 1594 uint32_t exceptions; 1595 vaddr_t vaddr = vstart; 1596 size_t i; 1597 bool secure; 1598 1599 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 1600 1601 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 1602 1603 if (vaddr & SMALL_PAGE_MASK) 1604 return TEE_ERROR_BAD_PARAMETERS; 1605 1606 exceptions = mmu_lock(); 1607 1608 mm = find_map_by_va((void *)vaddr); 1609 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 1610 panic("VA does not belong to any known mm region"); 1611 1612 if (!core_mmu_is_dynamic_vaspace(mm)) 1613 panic("Trying to map into static region"); 1614 1615 for (i = 0; i < num_pages; i++) { 1616 if (pages[i] & SMALL_PAGE_MASK) { 1617 ret = TEE_ERROR_BAD_PARAMETERS; 1618 goto err; 1619 } 1620 1621 while (true) { 1622 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 1623 &tbl_info)) 1624 panic("Can't find pagetable for vaddr "); 1625 1626 idx = core_mmu_va2idx(&tbl_info, vaddr); 1627 if (tbl_info.shift == SMALL_PAGE_SHIFT) 1628 break; 1629 1630 /* This is supertable. Need to divide it. */ 1631 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 1632 secure)) 1633 panic("Failed to spread pgdir on small tables"); 1634 } 1635 1636 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1637 if (old_attr) 1638 panic("Page is already mapped"); 1639 1640 core_mmu_set_entry(&tbl_info, idx, pages[i], 1641 core_mmu_type_to_attr(memtype)); 1642 vaddr += SMALL_PAGE_SIZE; 1643 } 1644 1645 /* 1646 * Make sure all the changes to translation tables are visible 1647 * before returning. TLB doesn't need to be invalidated as we are 1648 * guaranteed that there's no valid mapping in this range. 1649 */ 1650 core_mmu_table_write_barrier(); 1651 mmu_unlock(exceptions); 1652 1653 return TEE_SUCCESS; 1654 err: 1655 mmu_unlock(exceptions); 1656 1657 if (i) 1658 core_mmu_unmap_pages(vstart, i); 1659 1660 return ret; 1661 } 1662 1663 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart, 1664 size_t num_pages, 1665 enum teecore_memtypes memtype) 1666 { 1667 struct core_mmu_table_info tbl_info = { }; 1668 struct tee_mmap_region *mm = NULL; 1669 unsigned int idx = 0; 1670 uint32_t old_attr = 0; 1671 uint32_t exceptions = 0; 1672 vaddr_t vaddr = vstart; 1673 paddr_t paddr = pstart; 1674 size_t i = 0; 1675 bool secure = false; 1676 1677 assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX)); 1678 1679 secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE; 1680 1681 if ((vaddr | paddr) & SMALL_PAGE_MASK) 1682 return TEE_ERROR_BAD_PARAMETERS; 1683 1684 exceptions = mmu_lock(); 1685 1686 mm = find_map_by_va((void *)vaddr); 1687 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) 1688 panic("VA does not belong to any known mm region"); 1689 1690 if (!core_mmu_is_dynamic_vaspace(mm)) 1691 panic("Trying to map into static region"); 1692 1693 for (i = 0; i < num_pages; i++) { 1694 while (true) { 1695 if (!core_mmu_find_table(NULL, vaddr, UINT_MAX, 1696 &tbl_info)) 1697 panic("Can't find pagetable for vaddr "); 1698 1699 idx = core_mmu_va2idx(&tbl_info, vaddr); 1700 if (tbl_info.shift == SMALL_PAGE_SHIFT) 1701 break; 1702 1703 /* This is supertable. Need to divide it. */ 1704 if (!core_mmu_entry_to_finer_grained(&tbl_info, idx, 1705 secure)) 1706 panic("Failed to spread pgdir on small tables"); 1707 } 1708 1709 core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr); 1710 if (old_attr) 1711 panic("Page is already mapped"); 1712 1713 core_mmu_set_entry(&tbl_info, idx, paddr, 1714 core_mmu_type_to_attr(memtype)); 1715 paddr += SMALL_PAGE_SIZE; 1716 vaddr += SMALL_PAGE_SIZE; 1717 } 1718 1719 /* 1720 * Make sure all the changes to translation tables are visible 1721 * before returning. TLB doesn't need to be invalidated as we are 1722 * guaranteed that there's no valid mapping in this range. 1723 */ 1724 core_mmu_table_write_barrier(); 1725 mmu_unlock(exceptions); 1726 1727 return TEE_SUCCESS; 1728 } 1729 1730 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages) 1731 { 1732 struct core_mmu_table_info tbl_info; 1733 struct tee_mmap_region *mm; 1734 size_t i; 1735 unsigned int idx; 1736 uint32_t exceptions; 1737 1738 exceptions = mmu_lock(); 1739 1740 mm = find_map_by_va((void *)vstart); 1741 if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1)) 1742 panic("VA does not belong to any known mm region"); 1743 1744 if (!core_mmu_is_dynamic_vaspace(mm)) 1745 panic("Trying to unmap static region"); 1746 1747 for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) { 1748 if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info)) 1749 panic("Can't find pagetable"); 1750 1751 if (tbl_info.shift != SMALL_PAGE_SHIFT) 1752 panic("Invalid pagetable level"); 1753 1754 idx = core_mmu_va2idx(&tbl_info, vstart); 1755 core_mmu_set_entry(&tbl_info, idx, 0, 0); 1756 } 1757 tlbi_all(); 1758 1759 mmu_unlock(exceptions); 1760 } 1761 1762 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, 1763 struct user_mode_ctx *uctx) 1764 { 1765 struct core_mmu_table_info pg_info = { }; 1766 struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache; 1767 struct pgt *pgt = NULL; 1768 struct vm_region *r = NULL; 1769 struct vm_region *r_last = NULL; 1770 1771 /* Find the first and last valid entry */ 1772 r = TAILQ_FIRST(&uctx->vm_info.regions); 1773 if (!r) 1774 return; /* Nothing to map */ 1775 r_last = TAILQ_LAST(&uctx->vm_info.regions, vm_region_head); 1776 1777 /* 1778 * Allocate all page tables in advance. 1779 */ 1780 pgt_alloc(pgt_cache, uctx->ts_ctx, r->va, 1781 r_last->va + r_last->size - 1); 1782 pgt = SLIST_FIRST(pgt_cache); 1783 1784 core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL); 1785 1786 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) 1787 set_pg_region(dir_info, r, &pgt, &pg_info); 1788 } 1789 1790 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr, 1791 size_t len) 1792 { 1793 struct core_mmu_table_info tbl_info = { }; 1794 struct tee_mmap_region *res_map = NULL; 1795 struct tee_mmap_region *map = NULL; 1796 paddr_t pa = virt_to_phys(addr); 1797 size_t granule = 0; 1798 ptrdiff_t i = 0; 1799 paddr_t p = 0; 1800 size_t l = 0; 1801 1802 map = find_map_by_type_and_pa(type, pa, len); 1803 if (!map) 1804 return TEE_ERROR_GENERIC; 1805 1806 res_map = find_map_by_type(MEM_AREA_RES_VASPACE); 1807 if (!res_map) 1808 return TEE_ERROR_GENERIC; 1809 if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info)) 1810 return TEE_ERROR_GENERIC; 1811 granule = BIT(tbl_info.shift); 1812 1813 if (map < static_memory_map || 1814 map >= static_memory_map + ARRAY_SIZE(static_memory_map)) 1815 return TEE_ERROR_GENERIC; 1816 i = map - static_memory_map; 1817 1818 /* Check that we have a full match */ 1819 p = ROUNDDOWN(pa, granule); 1820 l = ROUNDUP(len + pa - p, granule); 1821 if (map->pa != p || map->size != l) 1822 return TEE_ERROR_GENERIC; 1823 1824 clear_region(&tbl_info, map); 1825 tlbi_all(); 1826 1827 /* If possible remove the va range from res_map */ 1828 if (res_map->va - map->size == map->va) { 1829 res_map->va -= map->size; 1830 res_map->size += map->size; 1831 } 1832 1833 /* Remove the entry. */ 1834 memmove(map, map + 1, 1835 (ARRAY_SIZE(static_memory_map) - i - 1) * sizeof(*map)); 1836 1837 /* Clear the last new entry in case it was used */ 1838 memset(static_memory_map + ARRAY_SIZE(static_memory_map) - 1, 1839 0, sizeof(*map)); 1840 1841 return TEE_SUCCESS; 1842 } 1843 1844 struct tee_mmap_region * 1845 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len) 1846 { 1847 struct tee_mmap_region *map = NULL; 1848 struct tee_mmap_region *map_found = NULL; 1849 1850 if (!len) 1851 return NULL; 1852 1853 for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) { 1854 if (map->type != type) 1855 continue; 1856 1857 if (map_found) 1858 return NULL; 1859 1860 map_found = map; 1861 } 1862 1863 if (!map_found || map_found->size < len) 1864 return NULL; 1865 1866 return map_found; 1867 } 1868 1869 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len) 1870 { 1871 struct core_mmu_table_info tbl_info; 1872 struct tee_mmap_region *map; 1873 size_t n; 1874 size_t granule; 1875 paddr_t p; 1876 size_t l; 1877 1878 if (!len) 1879 return NULL; 1880 1881 if (!core_mmu_check_end_pa(addr, len)) 1882 return NULL; 1883 1884 /* Check if the memory is already mapped */ 1885 map = find_map_by_type_and_pa(type, addr, len); 1886 if (map && pbuf_inside_map_area(addr, len, map)) 1887 return (void *)(vaddr_t)(map->va + addr - map->pa); 1888 1889 /* Find the reserved va space used for late mappings */ 1890 map = find_map_by_type(MEM_AREA_RES_VASPACE); 1891 if (!map) 1892 return NULL; 1893 1894 if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info)) 1895 return NULL; 1896 1897 granule = BIT64(tbl_info.shift); 1898 p = ROUNDDOWN(addr, granule); 1899 l = ROUNDUP(len + addr - p, granule); 1900 1901 /* Ban overflowing virtual addresses */ 1902 if (map->size < l) 1903 return NULL; 1904 1905 /* 1906 * Something is wrong, we can't fit the va range into the selected 1907 * table. The reserved va range is possibly missaligned with 1908 * granule. 1909 */ 1910 if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries) 1911 return NULL; 1912 1913 /* Find end of the memory map */ 1914 n = 0; 1915 while (!core_mmap_is_end_of_table(static_memory_map + n)) 1916 n++; 1917 1918 if (n < (ARRAY_SIZE(static_memory_map) - 1)) { 1919 /* There's room for another entry */ 1920 static_memory_map[n].va = map->va; 1921 static_memory_map[n].size = l; 1922 static_memory_map[n + 1].type = MEM_AREA_END; 1923 map->va += l; 1924 map->size -= l; 1925 map = static_memory_map + n; 1926 } else { 1927 /* 1928 * There isn't room for another entry, steal the reserved 1929 * entry as it's not useful for anything else any longer. 1930 */ 1931 map->size = l; 1932 } 1933 map->type = type; 1934 map->region_size = granule; 1935 map->attr = core_mmu_type_to_attr(type); 1936 map->pa = p; 1937 1938 set_region(&tbl_info, map); 1939 1940 /* Make sure the new entry is visible before continuing. */ 1941 core_mmu_table_write_barrier(); 1942 1943 return (void *)(vaddr_t)(map->va + addr - map->pa); 1944 } 1945 1946 #ifdef CFG_WITH_PAGER 1947 static vaddr_t get_linear_map_end(void) 1948 { 1949 /* this is synced with the generic linker file kern.ld.S */ 1950 return (vaddr_t)__heap2_end; 1951 } 1952 #endif 1953 1954 #if defined(CFG_TEE_CORE_DEBUG) 1955 static void check_pa_matches_va(void *va, paddr_t pa) 1956 { 1957 TEE_Result res = TEE_ERROR_GENERIC; 1958 vaddr_t v = (vaddr_t)va; 1959 paddr_t p = 0; 1960 struct core_mmu_table_info ti __maybe_unused = { }; 1961 1962 if (core_mmu_user_va_range_is_defined()) { 1963 vaddr_t user_va_base = 0; 1964 size_t user_va_size = 0; 1965 1966 core_mmu_get_user_va_range(&user_va_base, &user_va_size); 1967 if (v >= user_va_base && 1968 v <= (user_va_base - 1 + user_va_size)) { 1969 if (!core_mmu_user_mapping_is_active()) { 1970 if (pa) 1971 panic("issue in linear address space"); 1972 return; 1973 } 1974 1975 res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx), 1976 va, &p); 1977 if (res == TEE_ERROR_NOT_SUPPORTED) 1978 return; 1979 if (res == TEE_SUCCESS && pa != p) 1980 panic("bad pa"); 1981 if (res != TEE_SUCCESS && pa) 1982 panic("false pa"); 1983 return; 1984 } 1985 } 1986 #ifdef CFG_WITH_PAGER 1987 if (is_unpaged(va)) { 1988 if (v - boot_mmu_config.load_offset != pa) 1989 panic("issue in linear address space"); 1990 return; 1991 } 1992 1993 if (tee_pager_get_table_info(v, &ti)) { 1994 uint32_t a; 1995 1996 /* 1997 * Lookups in the page table managed by the pager is 1998 * dangerous for addresses in the paged area as those pages 1999 * changes all the time. But some ranges are safe, 2000 * rw-locked areas when the page is populated for instance. 2001 */ 2002 core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a); 2003 if (a & TEE_MATTR_VALID_BLOCK) { 2004 paddr_t mask = BIT64(ti.shift) - 1; 2005 2006 p |= v & mask; 2007 if (pa != p) 2008 panic(); 2009 } else { 2010 if (pa) 2011 panic(); 2012 } 2013 return; 2014 } 2015 #endif 2016 2017 if (!core_va2pa_helper(va, &p)) { 2018 /* Verfiy only the static mapping (case non null phys addr) */ 2019 if (p && pa != p) { 2020 DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA, 2021 va, p, pa); 2022 panic(); 2023 } 2024 } else { 2025 if (pa) { 2026 DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa); 2027 panic(); 2028 } 2029 } 2030 } 2031 #else 2032 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused) 2033 { 2034 } 2035 #endif 2036 2037 paddr_t virt_to_phys(void *va) 2038 { 2039 paddr_t pa = 0; 2040 2041 if (!arch_va2pa_helper(va, &pa)) 2042 pa = 0; 2043 check_pa_matches_va(va, pa); 2044 return pa; 2045 } 2046 2047 #if defined(CFG_TEE_CORE_DEBUG) 2048 static void check_va_matches_pa(paddr_t pa, void *va) 2049 { 2050 paddr_t p = 0; 2051 2052 if (!va) 2053 return; 2054 2055 p = virt_to_phys(va); 2056 if (p != pa) { 2057 DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa); 2058 panic(); 2059 } 2060 } 2061 #else 2062 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused) 2063 { 2064 } 2065 #endif 2066 2067 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len) 2068 { 2069 if (!core_mmu_user_mapping_is_active()) 2070 return NULL; 2071 2072 return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len); 2073 } 2074 2075 #ifdef CFG_WITH_PAGER 2076 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2077 { 2078 paddr_t end_pa = 0; 2079 2080 if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa)) 2081 return NULL; 2082 2083 if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end()) { 2084 if (end_pa > get_linear_map_end()) 2085 return NULL; 2086 return (void *)(vaddr_t)(pa + boot_mmu_config.load_offset); 2087 } 2088 2089 return tee_pager_phys_to_virt(pa, len); 2090 } 2091 #else 2092 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len) 2093 { 2094 struct tee_mmap_region *mmap = NULL; 2095 2096 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len); 2097 if (!mmap) 2098 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len); 2099 if (!mmap) 2100 mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len); 2101 if (!mmap) 2102 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len); 2103 if (!mmap) 2104 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len); 2105 if (!mmap) 2106 mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len); 2107 /* 2108 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only 2109 * used with pager and not needed here. 2110 */ 2111 return map_pa2va(mmap, pa, len); 2112 } 2113 #endif 2114 2115 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len) 2116 { 2117 void *va = NULL; 2118 2119 switch (m) { 2120 case MEM_AREA_TS_VASPACE: 2121 va = phys_to_virt_ts_vaspace(pa, len); 2122 break; 2123 case MEM_AREA_TEE_RAM: 2124 case MEM_AREA_TEE_RAM_RX: 2125 case MEM_AREA_TEE_RAM_RO: 2126 case MEM_AREA_TEE_RAM_RW: 2127 case MEM_AREA_NEX_RAM_RO: 2128 case MEM_AREA_NEX_RAM_RW: 2129 va = phys_to_virt_tee_ram(pa, len); 2130 break; 2131 case MEM_AREA_SHM_VASPACE: 2132 /* Find VA from PA in dynamic SHM is not yet supported */ 2133 va = NULL; 2134 break; 2135 default: 2136 va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len); 2137 } 2138 if (m != MEM_AREA_SEC_RAM_OVERALL) 2139 check_va_matches_pa(pa, va); 2140 return va; 2141 } 2142 2143 void *phys_to_virt_io(paddr_t pa, size_t len) 2144 { 2145 struct tee_mmap_region *map = NULL; 2146 void *va = NULL; 2147 2148 map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len); 2149 if (!map) 2150 map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len); 2151 if (!map) 2152 return NULL; 2153 va = map_pa2va(map, pa, len); 2154 check_va_matches_pa(pa, va); 2155 return va; 2156 } 2157 2158 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len) 2159 { 2160 if (cpu_mmu_enabled()) 2161 return (vaddr_t)phys_to_virt(pa, type, len); 2162 2163 return (vaddr_t)pa; 2164 } 2165 2166 #ifdef CFG_WITH_PAGER 2167 bool is_unpaged(void *va) 2168 { 2169 vaddr_t v = (vaddr_t)va; 2170 2171 return v >= VCORE_START_VA && v < get_linear_map_end(); 2172 } 2173 #else 2174 bool is_unpaged(void *va __unused) 2175 { 2176 return true; 2177 } 2178 #endif 2179 2180 void core_mmu_init_virtualization(void) 2181 { 2182 virt_init_memory(static_memory_map); 2183 } 2184 2185 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len) 2186 { 2187 assert(p->pa); 2188 if (cpu_mmu_enabled()) { 2189 if (!p->va) 2190 p->va = (vaddr_t)phys_to_virt_io(p->pa, len); 2191 assert(p->va); 2192 return p->va; 2193 } 2194 return p->pa; 2195 } 2196 2197 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len) 2198 { 2199 assert(p->pa); 2200 if (cpu_mmu_enabled()) { 2201 if (!p->va) 2202 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC, 2203 len); 2204 assert(p->va); 2205 return p->va; 2206 } 2207 return p->pa; 2208 } 2209 2210 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len) 2211 { 2212 assert(p->pa); 2213 if (cpu_mmu_enabled()) { 2214 if (!p->va) 2215 p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC, 2216 len); 2217 assert(p->va); 2218 return p->va; 2219 } 2220 return p->pa; 2221 } 2222 2223 #ifdef CFG_CORE_RESERVED_SHM 2224 static TEE_Result teecore_init_pub_ram(void) 2225 { 2226 vaddr_t s = 0; 2227 vaddr_t e = 0; 2228 2229 /* get virtual addr/size of NSec shared mem allocated from teecore */ 2230 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e); 2231 2232 if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK) 2233 panic("invalid PUB RAM"); 2234 2235 /* extra check: we could rely on core_mmu_get_mem_by_type() */ 2236 if (!tee_vbuf_is_non_sec(s, e - s)) 2237 panic("PUB RAM is not non-secure"); 2238 2239 #ifdef CFG_PL310 2240 /* Allocate statically the l2cc mutex */ 2241 tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s)); 2242 s += sizeof(uint32_t); /* size of a pl310 mutex */ 2243 s = ROUNDUP(s, SMALL_PAGE_SIZE); /* keep required alignment */ 2244 #endif 2245 2246 default_nsec_shm_paddr = virt_to_phys((void *)s); 2247 default_nsec_shm_size = e - s; 2248 2249 return TEE_SUCCESS; 2250 } 2251 early_init(teecore_init_pub_ram); 2252 #endif /*CFG_CORE_RESERVED_SHM*/ 2253 2254 void core_mmu_init_ta_ram(void) 2255 { 2256 vaddr_t s = 0; 2257 vaddr_t e = 0; 2258 paddr_t ps = 0; 2259 size_t size = 0; 2260 2261 /* 2262 * Get virtual addr/size of RAM where TA are loaded/executedNSec 2263 * shared mem allocated from teecore. 2264 */ 2265 if (IS_ENABLED(CFG_VIRTUALIZATION)) 2266 virt_get_ta_ram(&s, &e); 2267 else 2268 core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e); 2269 2270 ps = virt_to_phys((void *)s); 2271 size = e - s; 2272 2273 if (!ps || (ps & CORE_MMU_USER_CODE_MASK) || 2274 !size || (size & CORE_MMU_USER_CODE_MASK)) 2275 panic("invalid TA RAM"); 2276 2277 /* extra check: we could rely on core_mmu_get_mem_by_type() */ 2278 if (!tee_pbuf_is_sec(ps, size)) 2279 panic("TA RAM is not secure"); 2280 2281 if (!tee_mm_is_empty(&tee_mm_sec_ddr)) 2282 panic("TA RAM pool is not empty"); 2283 2284 /* remove previous config and init TA ddr memory pool */ 2285 tee_mm_final(&tee_mm_sec_ddr); 2286 tee_mm_init(&tee_mm_sec_ddr, ps, size, CORE_MMU_USER_CODE_SHIFT, 2287 TEE_MM_POOL_NO_FLAGS); 2288 } 2289