Lines Matching refs:map
125 struct tee_mmap_region *old = mem_map->map; in heap_realloc_memory_map()
133 mem_map->map = m; in heap_realloc_memory_map()
140 struct tee_mmap_region *old = mem_map->map; in boot_mem_realloc_memory_map()
146 mem_map->map = m; in boot_mem_realloc_memory_map()
190 struct memory_map *map = virt_get_memory_map(); in get_memory_map() local
192 if (map) in get_memory_map()
193 return map; in get_memory_map()
227 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len) in pa_is_in_map() argument
231 if (!map) in pa_is_in_map()
237 return (pa >= map->pa && end_pa <= map->pa + map->size - 1); in pa_is_in_map()
240 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va) in va_is_in_map() argument
242 if (!map) in va_is_in_map()
244 return (va >= map->va && va <= (map->va + map->size - 1)); in va_is_in_map()
249 struct tee_mmap_region *map) in pbuf_inside_map_area() argument
251 return core_is_buffer_inside(p, l, map->pa, map->size); in pbuf_inside_map_area()
255 TEE_Result (*fn)(struct tee_mmap_region *map, in core_mmu_for_each_map() argument
263 res = fn(mem_map->map + n, ptr); in core_mmu_for_each_map()
277 if (mem_map->map[n].type == type) in find_map_by_type()
278 return mem_map->map + n; in find_map_by_type()
290 if (mem_map->map[n].type != type) in find_map_by_type_and_pa()
292 if (pa_is_in_map(mem_map->map + n, pa, len)) in find_map_by_type_and_pa()
293 return mem_map->map + n; in find_map_by_type_and_pa()
305 if (a >= mem_map->map[n].va && in find_map_by_va()
306 a <= (mem_map->map[n].va - 1 + mem_map->map[n].size)) in find_map_by_va()
307 return mem_map->map + n; in find_map_by_va()
320 if ((mem_map->map[n].attr & TEE_MATTR_VALID_BLOCK) && in find_map_by_pa()
321 pa >= mem_map->map[n].pa && in find_map_by_pa()
322 pa <= (mem_map->map[n].pa - 1 + mem_map->map[n].size)) in find_map_by_pa()
323 return mem_map->map + n; in find_map_by_pa()
448 struct tee_mmap_region *map) in check_phys_mem_is_outside() argument
454 map->pa, map->size)) { in check_phys_mem_is_outside()
458 map->type, map->pa, map->size); in check_phys_mem_is_outside()
514 switch (mem_map->map[n].type) { in core_mmu_set_discovered_nsec_ddr()
516 carve_out_phys_mem(&m, &num_elems, mem_map->map[n].pa, in core_mmu_set_discovered_nsec_ddr()
517 mem_map->map[n].size); in core_mmu_set_discovered_nsec_ddr()
531 mem_map->map + n); in core_mmu_set_discovered_nsec_ddr()
708 mem_map->map[n].type == MEM_AREA_SEC_RAM_OVERALL) in verify_special_mem_areas()
712 mem_map->map[n].pa, in verify_special_mem_areas()
713 mem_map->map[n].size)) { in verify_special_mem_areas()
715 mem_map->map[n].pa, in verify_special_mem_areas()
716 mem_map->map[n].size); in verify_special_mem_areas()
779 if (mmaps_are_mergeable(mem_map->map + n, &m0)) { in add_phys_mem()
780 merge_mmaps(mem_map->map + n, &m0); in add_phys_mem()
786 mmaps_are_mergeable(mem_map->map + n, in add_phys_mem()
787 mem_map->map + n + 1)) { in add_phys_mem()
788 merge_mmaps(mem_map->map + n, in add_phys_mem()
789 mem_map->map + n + 1); in add_phys_mem()
790 rem_array_elem(mem_map->map, mem_map->count, in add_phys_mem()
791 sizeof(*mem_map->map), n + 1); in add_phys_mem()
794 if (n > 0 && mmaps_are_mergeable(mem_map->map + n - 1, in add_phys_mem()
795 mem_map->map + n)) { in add_phys_mem()
796 merge_mmaps(mem_map->map + n - 1, in add_phys_mem()
797 mem_map->map + n); in add_phys_mem()
798 rem_array_elem(mem_map->map, mem_map->count, in add_phys_mem()
799 sizeof(*mem_map->map), n); in add_phys_mem()
804 if (mem_type < mem_map->map[n].type || in add_phys_mem()
805 (mem_type == mem_map->map[n].type && in add_phys_mem()
806 mem_addr < mem_map->map[n].pa)) in add_phys_mem()
811 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), in add_phys_mem()
822 if (type < mem_map->map[n].type) in add_va_space()
827 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), in add_va_space()
829 mem_map->map[n] = (struct tee_mmap_region){ in add_va_space()
944 struct tee_mmap_region *map __maybe_unused = mem_map->map + n; in dump_mmap_table()
948 teecore_memtype_name(map->type), map->va, in dump_mmap_table()
949 map->va + map->size - 1, map->pa, in dump_mmap_table()
950 (paddr_t)(map->pa + map->size - 1), map->size, in dump_mmap_table()
951 map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir"); in dump_mmap_table()
1029 if (map_is_tee_ram(mem_map->map + n)) { in add_pager_vaspace()
1031 begin = mem_map->map[n].pa; in add_pager_vaspace()
1036 end = mem_map->map[pos - 1].pa + mem_map->map[pos - 1].size; in add_pager_vaspace()
1041 ins_array_elem(mem_map->map, mem_map->count, sizeof(*mem_map->map), in add_pager_vaspace()
1043 mem_map->map[n] = (struct tee_mmap_region){ in add_pager_vaspace()
1235 paddr_t mask = mem_map->map[n].pa | mem_map->map[n].size; in assign_mem_granularity()
1240 if (map_is_tee_ram(mem_map->map + n)) in assign_mem_granularity()
1241 mem_map->map[n].region_size = SMALL_PAGE_SIZE; in assign_mem_granularity()
1243 mem_map->map[n].region_size = CORE_MMU_PGDIR_SIZE; in assign_mem_granularity()
1264 struct tee_mmap_region *map = NULL; in assign_mem_va_dir() local
1279 mem_map->map[n].va = 0; in assign_mem_va_dir()
1290 map = mem_map->map + n; in assign_mem_va_dir()
1291 if (map_is_tee_ram(map) || in assign_mem_va_dir()
1292 map->type == MEM_AREA_PAGER_VASPACE) { in assign_mem_va_dir()
1293 assert(!(va & (map->region_size - 1))); in assign_mem_va_dir()
1294 assert(!(map->size & (map->region_size - 1))); in assign_mem_va_dir()
1295 map->va = va; in assign_mem_va_dir()
1296 if (ADD_OVERFLOW(va, map->size, &va)) in assign_mem_va_dir()
1310 map = mem_map->map + n; in assign_mem_va_dir()
1311 map->attr = core_mmu_type_to_attr(map->type); in assign_mem_va_dir()
1312 if (map->va) in assign_mem_va_dir()
1316 va_is_secure != map_is_secure(map)) { in assign_mem_va_dir()
1320 core_mmu_type_is_nex_shared(map->type)) { in assign_mem_va_dir()
1325 if (SUB_OVERFLOW(va, map->size, &va)) in assign_mem_va_dir()
1327 va = ROUNDDOWN2(va, map->region_size); in assign_mem_va_dir()
1333 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { in assign_mem_va_dir()
1336 va += (map->pa - va) & CORE_MMU_PGDIR_MASK; in assign_mem_va_dir()
1338 map->va = va; in assign_mem_va_dir()
1346 map = mem_map->map + n; in assign_mem_va_dir()
1347 map->attr = core_mmu_type_to_attr(map->type); in assign_mem_va_dir()
1348 if (map->va) in assign_mem_va_dir()
1352 va_is_secure != map_is_secure(map)) { in assign_mem_va_dir()
1358 core_mmu_type_is_nex_shared(map->type)) { in assign_mem_va_dir()
1365 if (ROUNDUP2_OVERFLOW(va, map->region_size, &va)) in assign_mem_va_dir()
1372 if (map->size > 2 * CORE_MMU_PGDIR_SIZE) { in assign_mem_va_dir()
1373 vaddr_t offs = (map->pa - va) & in assign_mem_va_dir()
1380 map->va = va; in assign_mem_va_dir()
1381 if (ADD_OVERFLOW(va, map->size, &va)) in assign_mem_va_dir()
1465 if (core_is_buffer_intersect(mem_map->map[n].va, in mem_map_add_id_map()
1466 mem_map->map[n].size, start, len)) in mem_map_add_id_map()
1470 mem_map->map[mem_map->count - 1] = (struct tee_mmap_region){ in mem_map_add_id_map()
1508 qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region), in init_mem_map()
1538 qsort(mem_map->map, mem_map->count, sizeof(struct tee_mmap_region), in init_mem_map()
1553 m = mem_map->map + n; in check_mem_map()
1634 mem_map.map = boot_mem_alloc_tmp(mem_map.alloc_count * in core_init_mmu_map()
1635 sizeof(*mem_map.map), in core_init_mmu_map()
1636 alignof(*mem_map.map)); in core_init_mmu_map()
1640 .map = &tmp_mmap_region, in core_init_mmu_map()
1648 static_memory_map.map[0] = (struct tee_mmap_region){ in core_init_mmu_map()
1665 boot_mem_add_reloc(&static_memory_map.map); in core_init_mmu_map()
1671 size_t elem_sz = sizeof(*static_memory_map.map); in core_mmu_save_mem_map()
1677 memcpy(p, static_memory_map.map, static_memory_map.count * elem_sz); in core_mmu_save_mem_map()
1678 static_memory_map.map = p; in core_mmu_save_mem_map()
1709 struct tee_mmap_region *map; in core_pbuf_is() local
1732 map = find_map_by_pa(pbuf); in core_pbuf_is()
1733 if (!map || !pbuf_inside_map_area(pbuf, len, map)) in core_pbuf_is()
1735 return mattr_is_cached(map->attr); in core_pbuf_is()
1760 struct tee_mmap_region *map; in core_va2pa_helper() local
1762 map = find_map_by_va(va); in core_va2pa_helper()
1763 if (!va_is_in_map(map, (vaddr_t)va)) in core_va2pa_helper()
1771 if (map->pa) in core_va2pa_helper()
1772 *pa = map->pa + (vaddr_t)va - map->va; in core_va2pa_helper()
1779 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len) in map_pa2va() argument
1781 if (!pa_is_in_map(map, pa, len)) in map_pa2va()
1784 return (void *)(vaddr_t)(map->va + pa - map->pa); in map_pa2va()
1793 struct tee_mmap_region *map = find_map_by_type(type); in core_mmu_get_mem_by_type() local
1795 if (map) { in core_mmu_get_mem_by_type()
1796 *s = map->va; in core_mmu_get_mem_by_type()
1797 *e = map->va + map->size; in core_mmu_get_mem_by_type()
1806 struct tee_mmap_region *map = find_map_by_pa(pa); in core_mmu_get_type_by_pa() local
1809 if (!map || map->type == MEM_AREA_RES_VASPACE || in core_mmu_get_type_by_pa()
1810 map->type == MEM_AREA_SHM_VASPACE) in core_mmu_get_type_by_pa()
1812 return map->type; in core_mmu_get_type_by_pa()
2219 idx = mm - mem_map->map; in maybe_remove_from_mem_map()
2222 rem_array_elem(mem_map->map, mem_map->count, in maybe_remove_from_mem_map()
2223 sizeof(*mem_map->map), idx); in maybe_remove_from_mem_map()
2235 idx = mm - mem_map->map; in maybe_remove_from_mem_map()
2243 ins_array_elem(mem_map->map, mem_map->count, in maybe_remove_from_mem_map()
2244 sizeof(*mem_map->map), idx + 1, &m); in maybe_remove_from_mem_map()
2310 struct tee_mmap_region *map = NULL; in core_mmu_remove_mapping() local
2317 map = find_map_by_type_and_pa(type, pa, len); in core_mmu_remove_mapping()
2318 if (!map) in core_mmu_remove_mapping()
2328 if (map < static_memory_map.map || in core_mmu_remove_mapping()
2329 map >= static_memory_map.map + static_memory_map.count) in core_mmu_remove_mapping()
2331 i = map - static_memory_map.map; in core_mmu_remove_mapping()
2336 if (map->pa != p || map->size != l) in core_mmu_remove_mapping()
2339 clear_region(&tbl_info, map); in core_mmu_remove_mapping()
2343 if (res_map->va - map->size == map->va) { in core_mmu_remove_mapping()
2344 res_map->va -= map->size; in core_mmu_remove_mapping()
2345 res_map->size += map->size; in core_mmu_remove_mapping()
2349 rem_array_elem(static_memory_map.map, static_memory_map.count, in core_mmu_remove_mapping()
2350 sizeof(*static_memory_map.map), i); in core_mmu_remove_mapping()
2367 if (mem_map->map[n].type != type) in core_mmu_find_mapping_exclusive()
2373 map_found = mem_map->map + n; in core_mmu_find_mapping_exclusive()
2386 struct tee_mmap_region *map = NULL; in core_mmu_add_mapping() local
2398 map = find_map_by_type_and_pa(type, addr, len); in core_mmu_add_mapping()
2399 if (map && pbuf_inside_map_area(addr, len, map)) in core_mmu_add_mapping()
2400 return (void *)(vaddr_t)(map->va + addr - map->pa); in core_mmu_add_mapping()
2403 map = find_map_by_type(MEM_AREA_RES_VASPACE); in core_mmu_add_mapping()
2404 if (!map) in core_mmu_add_mapping()
2407 if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info)) in core_mmu_add_mapping()
2415 if (map->size < l) in core_mmu_add_mapping()
2423 if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries) in core_mmu_add_mapping()
2429 mem_map->map[mem_map->count] = (struct tee_mmap_region){ in core_mmu_add_mapping()
2430 .va = map->va, in core_mmu_add_mapping()
2437 map->va += l; in core_mmu_add_mapping()
2438 map->size -= l; in core_mmu_add_mapping()
2439 map = mem_map->map + mem_map->count; in core_mmu_add_mapping()
2442 set_region(&tbl_info, map); in core_mmu_add_mapping()
2447 return (void *)(vaddr_t)(map->va + addr - map->pa); in core_mmu_add_mapping()
2664 struct tee_mmap_region *map = NULL; in phys_to_virt_io() local
2667 map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len); in phys_to_virt_io()
2668 if (!map) in phys_to_virt_io()
2669 map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len); in phys_to_virt_io()
2670 if (!map) in phys_to_virt_io()
2672 va = map_pa2va(map, pa, len); in phys_to_virt_io()