| /optee_os/core/include/mm/ |
| H A D | fobj.h | 24 unsigned int num_pages; member 62 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages); 73 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages); 87 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes, 106 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes, 165 #define fobj_ta_mem_alloc(num_pages) fobj_rw_paged_alloc(num_pages) argument 173 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages); 175 #define fobj_ta_mem_alloc(num_pages) fobj_sec_mem_alloc(num_pages) argument
|
| H A D | mobj.h | 246 unsigned int num_pages, 251 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages, 258 paddr_t pa, unsigned int num_pages); 269 struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages, 301 struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages, 315 size_t num_pages __unused, in mobj_mapped_shm_alloc()
|
| H A D | sp_mem.h | 83 paddr_t pa, unsigned int num_pages);
|
| H A D | core_mmu.h | 542 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, 558 size_t num_pages, 566 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages);
|
| /optee_os/core/mm/ |
| H A D | fobj.c | 69 unsigned int num_pages) in fobj_init() argument 72 fobj->num_pages = num_pages; in fobj_init() 142 static struct fobj *rwp_paged_iv_alloc(unsigned int num_pages) in rwp_paged_iv_alloc() argument 154 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size)) in rwp_paged_iv_alloc() 163 num_pages * sizeof(struct rwp_state_padded)); in rwp_paged_iv_alloc() 165 fobj_init(&rwp->fobj, &ops_rwp_paged_iv, num_pages); in rwp_paged_iv_alloc() 190 assert(page_idx < fobj->num_pages); in rwp_paged_iv_load_page() 203 assert(page_idx < fobj->num_pages); in rwp_paged_iv_save_page() 237 assert(page_idx < fobj->num_pages); in rwp_paged_iv_get_iv_vaddr() 254 static struct fobj *rwp_unpaged_iv_alloc(unsigned int num_pages) in rwp_unpaged_iv_alloc() argument [all …]
|
| H A D | mobj_dyn_shm.c | 345 struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages, in mobj_reg_shm_alloc() argument 354 if (!num_pages || page_offset >= SMALL_PAGE_SIZE) in mobj_reg_shm_alloc() 357 s = mobj_reg_shm_size(num_pages); in mobj_reg_shm_alloc() 365 mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE - page_offset; in mobj_reg_shm_alloc() 371 memcpy(mobj_reg_shm->pages, pages, sizeof(*pages) * num_pages); in mobj_reg_shm_alloc() 374 for (i = 0; i < num_pages; i++) { in mobj_reg_shm_alloc() 510 struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages, in mobj_mapped_shm_alloc() argument 513 struct mobj *mobj = mobj_reg_shm_alloc(pages, num_pages, in mobj_mapped_shm_alloc()
|
| H A D | core_mmu.c | 2040 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages, in core_mmu_map_pages() argument 2063 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) in core_mmu_map_pages() 2069 for (i = 0; i < num_pages; i++) { in core_mmu_map_pages() 2118 size_t num_pages, in core_mmu_map_contiguous_pages() argument 2141 if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1)) in core_mmu_map_contiguous_pages() 2147 for (i = 0; i < num_pages; i++) { in core_mmu_map_contiguous_pages() 2184 static bool mem_range_is_in_vcore_free(vaddr_t vstart, size_t num_pages) in mem_range_is_in_vcore_free() argument 2186 return core_is_buffer_inside(vstart, num_pages * SMALL_PAGE_SIZE, in mem_range_is_in_vcore_free() 2190 static void maybe_remove_from_mem_map(vaddr_t vstart, size_t num_pages) in maybe_remove_from_mem_map() argument 2198 if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1)) in maybe_remove_from_mem_map() [all …]
|
| H A D | file.c | 97 ADD_OVERFLOW(page_offset, fse->slice.fobj->num_pages, &s)) { in file_add_slice() 189 page_offset < fs->page_offset + fs->fobj->num_pages) in file_find_slice()
|
| H A D | mobj.c | 383 m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE; in mobj_with_fobj_alloc()
|
| /optee_os/core/kernel/ |
| H A D | msg_param.c | 63 size_t num_pages) in msg_param_extract_pages() argument 85 for (cnt = 0; cnt < num_pages; cnt++, va++) { in msg_param_extract_pages() 121 size_t num_pages = 0; in msg_param_mobj_from_noncontig() local 128 num_pages = (size_plus_offs - 1) / SMALL_PAGE_SIZE + 1; in msg_param_mobj_from_noncontig() 129 if (MUL_OVERFLOW(num_pages, sizeof(paddr_t), &msize)) in msg_param_mobj_from_noncontig() 137 pages, num_pages)) in msg_param_mobj_from_noncontig() 141 mobj = mobj_mapped_shm_alloc(pages, num_pages, page_offset, in msg_param_mobj_from_noncontig() 144 mobj = mobj_reg_shm_alloc(pages, num_pages, page_offset, in msg_param_mobj_from_noncontig()
|
| H A D | ldelf_syscalls.c | 310 size_t num_pages = 0; in ldelf_syscall_map_bin() local 354 num_pages = num_rounded_bytes / SMALL_PAGE_SIZE; in ldelf_syscall_map_bin() 372 num_pages > fs->fobj->num_pages) { in ldelf_syscall_map_bin() 396 struct fobj *f = fobj_ta_mem_alloc(num_pages); in ldelf_syscall_map_bin()
|
| H A D | thread.c | 538 size_t num_pages = 0; in init_thread_stacks() local 550 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; in init_thread_stacks() 551 fobj = fobj_locked_paged_alloc(num_pages); in init_thread_stacks()
|
| /optee_os/core/arch/arm/mm/ |
| H A D | mobj_ffa.c | 152 static size_t shm_size(size_t num_pages) in shm_size() argument 156 if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s)) in shm_size() 163 static struct mobj_ffa *ffa_shm_new(unsigned int num_pages) in ffa_shm_new() argument 168 if (!num_pages) in ffa_shm_new() 171 s = shm_size(num_pages); in ffa_shm_new() 179 m->mf.mobj.size = num_pages * SMALL_PAGE_SIZE; in ffa_shm_new() 187 static struct mobj_ffa *ffa_prm_new(unsigned int num_pages, in ffa_prm_new() argument 193 if (!num_pages || MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &sz) || in ffa_prm_new() 213 unsigned int num_pages, in mobj_ffa_sel1_spmc_new() argument 230 m = ffa_shm_new(num_pages); in mobj_ffa_sel1_spmc_new() [all …]
|
| H A D | sp_mem.c | 35 static size_t mobj_sp_size(size_t num_pages) in mobj_sp_size() argument 39 if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s)) in mobj_sp_size() 77 paddr_t pa, unsigned int num_pages) in sp_mem_add_pages() argument 83 if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count) in sp_mem_add_pages() 89 if (!tee_pbuf_is_sec(pa, num_pages * SMALL_PAGE_SIZE)) in sp_mem_add_pages() 93 num_pages * SMALL_PAGE_SIZE)) in sp_mem_add_pages() 98 for (n = 0; n < num_pages; n++) in sp_mem_add_pages()
|
| H A D | tee_pager.c | 634 base, base + fobj->num_pages * SMALL_PAGE_SIZE, type); in tee_pager_add_core_region() 636 reg = alloc_region(base, fobj->num_pages * SMALL_PAGE_SIZE); in tee_pager_add_core_region() 742 size_t s = fobj->num_pages * SMALL_PAGE_SIZE; in pager_add_um_region() 2001 size_t num_pages = 0; in tee_pager_alloc() local 2012 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; in tee_pager_alloc() 2013 fobj = fobj_locked_paged_alloc(num_pages); in tee_pager_alloc() 2022 asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE); in tee_pager_alloc() 2035 fobj->num_pages * SMALL_PAGE_SIZE); in tee_pager_init_iv_region() 2043 asan_tag_access(smem, smem + fobj->num_pages * SMALL_PAGE_SIZE); in tee_pager_init_iv_region()
|
| /optee_os/scripts/ |
| H A D | mem_usage.py | 51 num_pages = 0 57 num_pages = (size - 1) / 4096 + 1 62 printf(' %d pages', num_pages)
|
| /optee_os/core/arch/arm/kernel/ |
| H A D | boot.c | 462 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE; in ro_paged_alloc() local 468 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs, in ro_paged_alloc() 471 return fobj_ro_paged_alloc(num_pages, hashes, store); in ro_paged_alloc()
|
| H A D | thread_spmc.c | 2700 unsigned int num_regions, unsigned int num_pages, in set_pages() argument 2714 if (idx != num_pages) in set_pages() 2728 unsigned int num_pages = 0; in thread_spmc_populate_mobj_from_rx() local 2759 num_pages = READ_ONCE(descr->total_page_count); in thread_spmc_populate_mobj_from_rx() 2760 mf = mobj_ffa_spmc_new(cookie, num_pages, use_case); in thread_spmc_populate_mobj_from_rx() 2765 READ_ONCE(descr->address_range_count), num_pages, mf)) { in thread_spmc_populate_mobj_from_rx()
|
| H A D | secure_partition.c | 705 size_t num_pages = total_size / SMALL_PAGE_SIZE; in copy_and_map_fdt() local 712 f = fobj_sec_mem_alloc(num_pages); in copy_and_map_fdt() 777 size_t num_pages = total_size / SMALL_PAGE_SIZE; in create_and_map_boot_info() local 784 f = fobj_sec_mem_alloc(num_pages); in create_and_map_boot_info()
|