| /optee_os/core/arch/arm/kernel/ |
| H A D | kern.ld.S | 70 ASSERT(!(TEE_LOAD_ADDR & (SMALL_PAGE_SIZE - 1)), 79 __flatmap_unpg_rx_start = LD_ROUNDDOWN(__text_start, SMALL_PAGE_SIZE); 112 . = ALIGN(SMALL_PAGE_SIZE); 174 . = ALIGN(SMALL_PAGE_SIZE); 216 . = ALIGN(SMALL_PAGE_SIZE); 283 . = ALIGN(SMALL_PAGE_SIZE); 295 . = ALIGN(SMALL_PAGE_SIZE); 305 ASSERT(!(__flatmap_init_rx_start & (SMALL_PAGE_SIZE - 1)), 324 . = ALIGN(SMALL_PAGE_SIZE); 346 __init_end = ALIGN(__ro_and_relro_data_init_end, SMALL_PAGE_SIZE); [all …]
|
| H A D | boot.c | 403 stats.npages_all * SMALL_PAGE_SIZE / 1024); in print_pager_pool_size() 462 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; in ro_paged_alloc() 484 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * in init_pager_runtime() 493 assert(pageable_size % SMALL_PAGE_SIZE == 0); in init_pager_runtime() 544 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { in init_pager_runtime() 546 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; in init_pager_runtime() 550 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); in init_pager_runtime() 604 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false); in init_pager_runtime() 606 (pageable_size - init_size) / SMALL_PAGE_SIZE, in init_pager_runtime() 610 SMALL_PAGE_SIZE, true); in init_pager_runtime() [all …]
|
| H A D | secure_partition.c | 353 res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE, in sp_map_shared() 375 len = reg->page_count * SMALL_PAGE_SIZE; in sp_unmap_ffa_regions() 497 size_t bb_size = ROUNDUP(BOUNCE_BUFFER_SIZE, SMALL_PAGE_SIZE); in load_binary_sp() 498 size_t bb_num_pages = bb_size / SMALL_PAGE_SIZE; in load_binary_sp() 544 if (ROUNDUP_OVERFLOW(bin_size, SMALL_PAGE_SIZE, &bin_size_rounded)) { in load_binary_sp() 549 bin_page_count = bin_size_rounded / SMALL_PAGE_SIZE; in load_binary_sp() 704 size_t total_size = ROUNDUP(fdt_totalsize(fdt), SMALL_PAGE_SIZE); in copy_and_map_fdt() 705 size_t num_pages = total_size / SMALL_PAGE_SIZE; in copy_and_map_fdt() 776 size_t total_size = ROUNDUP(CFG_SP_INIT_INFO_MAX_SIZE, SMALL_PAGE_SIZE); in create_and_map_boot_info() 777 size_t num_pages = total_size / SMALL_PAGE_SIZE; in create_and_map_boot_info() [all …]
|
| H A D | stmm_sp.c | 73 static const unsigned int stmm_heap_size = 402 * SMALL_PAGE_SIZE; 74 static const unsigned int stmm_sec_buf_size = 4 * SMALL_PAGE_SIZE; 75 static const unsigned int stmm_ns_comm_buf_size = 4 * SMALL_PAGE_SIZE; 194 size_t num_pgs = ROUNDUP_DIV(sz, SMALL_PAGE_SIZE); in alloc_and_map_sp_fobj() 204 res = vm_map(&spc->uctx, va, num_pgs * SMALL_PAGE_SIZE, in alloc_and_map_sp_fobj() 259 SMALL_PAGE_SIZE); in build_stmm_boot_hob_list() 346 SMALL_PAGE_SIZE); in load_stmm() 862 res = vm_get_prot(&spc->uctx, va, SMALL_PAGE_SIZE, &attrs); in spm_handle_get_mem_attr() 902 MUL_OVERFLOW(nr_pages, SMALL_PAGE_SIZE, &sz) || in spm_handle_set_mem_attr()
|
| /optee_os/core/arch/riscv/kernel/ |
| H A D | kern.ld.S | 75 ASSERT(!(TEE_LOAD_ADDR & (SMALL_PAGE_SIZE - 1)), 85 __flatmap_rx_start = LD_ROUNDDOWN(__text_start, SMALL_PAGE_SIZE); 102 . = ALIGN(SMALL_PAGE_SIZE); 160 . = ALIGN(SMALL_PAGE_SIZE); 221 . = ALIGN(SMALL_PAGE_SIZE); 291 __asan_map_start = (__asan_shadow_start / SMALL_PAGE_SIZE) * 292 SMALL_PAGE_SIZE; 293 __asan_map_end = ((__asan_shadow_end - 1) / SMALL_PAGE_SIZE) * 294 SMALL_PAGE_SIZE + SMALL_PAGE_SIZE;
|
| /optee_os/core/mm/ |
| H A D | fobj.c | 96 memset(va, 0, SMALL_PAGE_SIZE); in rwp_load_page() 101 NULL, 0, src, SMALL_PAGE_SIZE, va, in rwp_load_page() 126 NULL, 0, va, SMALL_PAGE_SIZE, dst, in rwp_save_page() 139 return rwp_store_base + idx * SMALL_PAGE_SIZE; in idx_to_store() 154 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) in rwp_paged_iv_alloc() 160 SMALL_PAGE_SIZE; in rwp_paged_iv_alloc() 186 uint8_t *src = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE; in rwp_paged_iv_load_page() 200 uint8_t *dst = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE; in rwp_paged_iv_save_page() 221 paddr_t pa = rwp->idx * SMALL_PAGE_SIZE + nex_phys_mem_get_ta_base(); in rwp_paged_iv_free() 268 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) in rwp_unpaged_iv_alloc() [all …]
|
| H A D | page_alloc.c | 71 mmv = tee_mm_alloc_flags(pool, vcount * SMALL_PAGE_SIZE, flags); in virt_page_alloc() 76 va += SMALL_PAGE_SIZE; in virt_page_alloc() 78 mmp = phys_mem_alloc_flags(pcount * SMALL_PAGE_SIZE, flags); in virt_page_alloc() 89 memset((void *)va, 0, pcount * SMALL_PAGE_SIZE); in virt_page_alloc()
|
| H A D | mobj.c | 64 if (granule != SMALL_PAGE_SIZE && in mobj_phys_get_pa() 269 if (granule != SMALL_PAGE_SIZE && in mobj_shm_get_pa() 348 m->mobj.phys_granule = SMALL_PAGE_SIZE; in mobj_shm_alloc() 383 m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE; in mobj_with_fobj_alloc() 384 m->mobj.phys_granule = SMALL_PAGE_SIZE; in mobj_with_fobj_alloc() 451 p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) + in mobj_with_fobj_get_pa() 452 offs % SMALL_PAGE_SIZE; in mobj_with_fobj_get_pa() 455 if (granule != SMALL_PAGE_SIZE && in mobj_with_fobj_get_pa() 562 *min_mem_align = SMALL_PAGE_SIZE; in plat_get_protmem_config()
|
| H A D | mobj_dyn_shm.c | 97 p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE] + in mobj_reg_shm_get_pa() 100 case SMALL_PAGE_SIZE: in mobj_reg_shm_get_pa() 101 p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE]; in mobj_reg_shm_get_pa() 222 sz = ROUNDUP(mobj->size + r->page_offset, SMALL_PAGE_SIZE); in mobj_reg_shm_inc_map() 230 sz / SMALL_PAGE_SIZE, in mobj_reg_shm_inc_map() 323 for (n = 0; n < r->mobj.size / SMALL_PAGE_SIZE; n++) in check_reg_shm_conflict() 325 SMALL_PAGE_SIZE)) in check_reg_shm_conflict() 354 if (!num_pages || page_offset >= SMALL_PAGE_SIZE) in mobj_reg_shm_alloc() 365 mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE - page_offset; in mobj_reg_shm_alloc() 366 mobj_reg_shm->mobj.phys_granule = SMALL_PAGE_SIZE; in mobj_reg_shm_alloc() [all …]
|
| H A D | core_mmu.c | 951 map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir"); in dump_mmap_table() 1046 .region_size = SMALL_PAGE_SIZE, in add_pager_vaspace() 1101 base, page_count * SMALL_PAGE_SIZE); in collect_device_mem_ranges() 1241 mem_map->map[n].region_size = SMALL_PAGE_SIZE; in assign_mem_granularity() 1458 vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE); in mem_map_add_id_map() 1459 vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE); in mem_map_add_id_map() 1477 .region_size = SMALL_PAGE_SIZE, in mem_map_add_id_map() 1613 vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE); in core_init_mmu_map() 1616 SMALL_PAGE_SIZE); in core_init_mmu_map() 1619 vaddr_t len = ROUNDUP(VCORE_FREE_END_PA, SMALL_PAGE_SIZE) - start; in core_init_mmu_map() [all …]
|
| H A D | boot_mem.c | 306 SMALL_PAGE_SIZE)); in boot_mem_release_unused() 311 va = ROUNDUP(boot_mem_desc->mem_start, SMALL_PAGE_SIZE); in boot_mem_release_unused() 313 tmp_va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE); in boot_mem_release_unused() 332 core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE); in boot_mem_release_unused() 358 va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE); in boot_mem_release_tmp_alloc() 373 core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE); in boot_mem_release_tmp_alloc()
|
| H A D | pgt_cache.c | 59 #define PGT_PARENT_SIZE (4 * SMALL_PAGE_SIZE) 247 idx = (b - p->vabase) / SMALL_PAGE_SIZE; in pgt_clear_range() 248 n = (e - b) / SMALL_PAGE_SIZE; in pgt_clear_range() 392 COMPILE_TIME_ASSERT(PGT_SIZE * PGT_NUM_PGT_PER_PAGE == SMALL_PAGE_SIZE); in pgt_init() 395 uint8_t *tbl = tee_pager_alloc(SMALL_PAGE_SIZE); in pgt_init() 482 tee_pager_release_phys((void *)va, SMALL_PAGE_SIZE); in push_to_free_list() 754 idx = (b - p->vabase) / SMALL_PAGE_SIZE; in clear_ctx_range_from_list() 755 n = (e - b) / SMALL_PAGE_SIZE; in clear_ctx_range_from_list()
|
| /optee_os/core/arch/arm/mm/ |
| H A D | sp_mem.c | 60 m->mobj.size = pages * SMALL_PAGE_SIZE; in sp_mem_new_mobj() 61 m->mobj.phys_granule = SMALL_PAGE_SIZE; in sp_mem_new_mobj() 72 return ROUNDUP_DIV(ms->mobj.size, SMALL_PAGE_SIZE); in get_page_count() 89 if (!tee_pbuf_is_sec(pa, num_pages * SMALL_PAGE_SIZE)) in sp_mem_add_pages() 93 num_pages * SMALL_PAGE_SIZE)) in sp_mem_add_pages() 99 ms->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE; in sp_mem_add_pages() 138 p = ms->pages[offset / SMALL_PAGE_SIZE] + in get_pa() 141 case SMALL_PAGE_SIZE: in get_pa() 142 p = ms->pages[offset / SMALL_PAGE_SIZE]; in get_pa() 267 (new_reg->page_count * SMALL_PAGE_SIZE); in sp_mem_is_shared() [all …]
|
| H A D | tee_pager.c | 142 #define TBL_NUM_ENTRIES (CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE) 214 for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE) in pager_lock_check_stack() 236 if (pa & SMALL_PAGE_MASK || len > SMALL_PAGE_SIZE) in tee_pager_phys_to_virt() 415 tlbi_va_range(smem, nbytes, SMALL_PAGE_SIZE); in tee_pager_set_alias_area() 455 .idx = (va & mask) / SMALL_PAGE_SIZE, in region_va2tblidx() 489 fobj_pgidx = (va - reg->base) / SMALL_PAGE_SIZE + reg->fobj_pgoffs; in pmem_assign_fobj_page() 578 pager_alias_next_free += SMALL_PAGE_SIZE; in pager_add_alias_page() 634 base, base + fobj->num_pages * SMALL_PAGE_SIZE, type); in tee_pager_add_core_region() 636 reg = alloc_region(base, fobj->num_pages * SMALL_PAGE_SIZE); in tee_pager_add_core_region() 742 size_t s = fobj->num_pages * SMALL_PAGE_SIZE; in pager_add_um_region() [all …]
|
| H A D | mobj_ffa.c | 179 m->mf.mobj.size = num_pages * SMALL_PAGE_SIZE; in ffa_shm_new() 180 m->mf.mobj.phys_granule = SMALL_PAGE_SIZE; in ffa_shm_new() 193 if (!num_pages || MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &sz) || in ffa_prm_new() 203 m->mf.mobj.phys_granule = SMALL_PAGE_SIZE; in ffa_prm_new() 277 return ROUNDUP_DIV(mf->mobj.size, SMALL_PAGE_SIZE); in get_page_count() 295 for (n = 0; n < shm->mf.mobj.size / SMALL_PAGE_SIZE; n++) in check_shm_overlaps_prm() 297 shm->pages[n], SMALL_PAGE_SIZE)) in check_shm_overlaps_prm() 439 !core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE)) in mobj_ffa_add_pages_at() 446 mfs->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE; in mobj_ffa_add_pages_at() 452 else if (mfr->pa != pa + *idx * SMALL_PAGE_SIZE) in mobj_ffa_add_pages_at() [all …]
|
| H A D | core_mmu_v7.c | 285 static_assert(sizeof(l2_xlat_tbl_t) * 4 == SMALL_PAGE_SIZE); in alloc_l2_table() 296 mm = phys_mem_core_alloc(SMALL_PAGE_SIZE); in alloc_l2_table() 304 SMALL_PAGE_SIZE); in alloc_l2_table() 307 p = boot_mem_alloc(SMALL_PAGE_SIZE, SMALL_PAGE_SIZE); in alloc_l2_table() 689 for (i = 0; i < NUM_L2_ENTRIES; i++, pa += SMALL_PAGE_SIZE) in core_mmu_entry_to_finer_grained()
|
| /optee_os/core/arch/arm/plat-aspeed/ |
| H A D | platform_ast2600.c | 46 register_phys_mem(MEM_AREA_IO_NSEC, CONSOLE_UART_BASE, SMALL_PAGE_SIZE); 49 register_phys_mem(MEM_AREA_IO_SEC, AHBC_BASE, SMALL_PAGE_SIZE); 50 register_phys_mem(MEM_AREA_IO_NSEC, SCU_BASE, SMALL_PAGE_SIZE); 86 MEM_AREA_IO_SEC, SMALL_PAGE_SIZE); in plat_primary_init_early()
|
| H A D | platform_ast2700.c | 14 register_phys_mem(MEM_AREA_IO_SEC, UART_BASE, SMALL_PAGE_SIZE);
|
| /optee_os/core/kernel/ |
| H A D | msg_param.c | 82 va = mobj_get_va(mobj, 0, SMALL_PAGE_SIZE); in msg_param_extract_pages() 101 va = mobj_get_va(mobj, 0, SMALL_PAGE_SIZE); in msg_param_extract_pages() 128 num_pages = (size_plus_offs - 1) / SMALL_PAGE_SIZE + 1; in msg_param_mobj_from_noncontig()
|
| /optee_os/core/arch/riscv/include/mm/ |
| H A D | generic_ram_layout.h | 163 SMALL_PAGE_SIZE) 168 SMALL_PAGE_SIZE) - TA_RAM_START)
|
| /optee_os/core/drivers/ |
| H A D | versal_sha3_384.c | 29 len = MIN(src_len, SMALL_PAGE_SIZE); in input_plaintext() 31 versal_mbox_alloc(len, src + i * SMALL_PAGE_SIZE, &p); in input_plaintext()
|
| /optee_os/core/drivers/crypto/caam/utils/ |
| H A D | utils_mem.c | 285 if (buf->length > SMALL_PAGE_SIZE) { in caam_mem_get_pa_area() 286 nb_pa_area = buf->length / SMALL_PAGE_SIZE + 1; in caam_mem_get_pa_area() 287 if (buf->length % SMALL_PAGE_SIZE) in caam_mem_get_pa_area() 321 MIN(SMALL_PAGE_SIZE - (va & SMALL_PAGE_MASK), len); in caam_mem_get_pa_area()
|
| /optee_os/core/drivers/crypto/aspeed/ |
| H A D | crypto_ast2600.c | 28 scu_virt = core_mmu_get_va(SCU_BASE, MEM_AREA_IO_NSEC, SMALL_PAGE_SIZE); in crypto_ast2600_init()
|
| /optee_os/ldelf/ |
| H A D | ta_elf.c | 501 res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0); in init_elf() 507 elf->max_addr = va + SMALL_PAGE_SIZE; in init_elf() 508 elf->max_offs = SMALL_PAGE_SIZE; in init_elf() 524 if (sz > SMALL_PAGE_SIZE) in init_elf() 532 return ROUNDUP(v, SMALL_PAGE_SIZE); in roundup() 537 return ROUNDDOWN(v, SMALL_PAGE_SIZE); in rounddown() 639 assert(seg->align >= SMALL_PAGE_SIZE); in adjust_segments() 776 return min * SMALL_PAGE_SIZE; in get_pad_begin() 781 return (min + rnd) * SMALL_PAGE_SIZE; in get_pad_begin() 826 if (vaddr + filesz < SMALL_PAGE_SIZE) { in populate_segments() [all …]
|
| /optee_os/core/arch/arm/plat-rcar/ |
| H A D | main.c | 43 register_phys_mem_pgdir(MEM_AREA_IO_SEC, PRR_BASE, SMALL_PAGE_SIZE);
|