Home
last modified time | relevance | path

Searched refs:pgt (Results 1 – 8 of 8) sorted by relevance

/optee_os/core/include/mm/
H A Dpgt_cache.h24 struct pgt { struct
38 SLIST_ENTRY(pgt) link; argument
41 SLIST_HEAD(pgt_cache, pgt);
74 static inline struct pgt *pgt_pop_from_cache_list(vaddr_t vabase __unused, in pgt_pop_from_cache_list()
77 static inline void pgt_push_to_cache_list(struct pgt *pgt __unused) { } in pgt_push_to_cache_list()
79 struct pgt *pgt_pop_from_cache_list(vaddr_t vabase, struct ts_ctx *ctx);
80 void pgt_push_to_cache_list(struct pgt *pgt);
92 static inline void pgt_inc_used_entries(struct pgt *pgt) in pgt_inc_used_entries() argument
94 pgt->num_used_entries++; in pgt_inc_used_entries()
95 assert(pgt->num_used_entries); in pgt_inc_used_entries()
[all …]
H A Dtee_pager.h225 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt);
228 tee_pager_pgt_save_and_release_entries(struct pgt *pgt __unused) in tee_pager_pgt_save_and_release_entries()
H A Dtee_mmu_types.h122 struct pgt **pgt_array;
/optee_os/core/mm/
H A Dpgt_cache.c66 static void free_pgt(struct pgt *pgt) in free_pgt() argument
73 assert(pgt && pgt->parent); in free_pgt()
74 parent = pgt->parent; in free_pgt()
90 SLIST_INSERT_HEAD(&parent->pgt_cache, pgt, link); in free_pgt()
91 pgt->vabase = 0; in free_pgt()
92 pgt->populated = false; in free_pgt()
101 struct pgt *pgt = NULL; in alloc_pgt_parent() local
106 sz = sizeof(*parent) + sizeof(*pgt) * PGT_PARENT_TBL_COUNT; in alloc_pgt_parent()
121 pgt = (struct pgt *)(parent + 1); in alloc_pgt_parent()
123 pgt[n].parent = parent; in alloc_pgt_parent()
[all …]
H A Dcore_mmu.c1875 struct vm_region *region, struct pgt **pgt, in set_pg_region() argument
1905 while ((*pgt)->vabase < pg_info->va_base) { in set_pg_region()
1906 *pgt = SLIST_NEXT(*pgt, link); in set_pg_region()
1908 assert(*pgt); in set_pg_region()
1910 assert((*pgt)->vabase == pg_info->va_base); in set_pg_region()
1911 pg_info->table = (*pgt)->tbl; in set_pg_region()
1921 if (!(*pgt)->populated && !mobj_is_paged(region->mobj)) { in set_pg_region()
2279 struct pgt *pgt = NULL; in core_mmu_populate_user_map() local
2280 struct pgt *p = NULL; in core_mmu_populate_user_map()
2290 pgt = SLIST_FIRST(pgt_cache); in core_mmu_populate_user_map()
[all …]
H A Dvm.c203 struct pgt *p = SLIST_FIRST(&uctx->pgt_cache); in set_um_region()
/optee_os/core/arch/riscv/mm/
H A Dcore_mmu_arch.c100 static struct mmu_pte *core_mmu_table_get_entry(struct mmu_pgt *pgt, in core_mmu_table_get_entry() argument
103 return &pgt->entries[idx & RISCV_MMU_VPN_MASK]; in core_mmu_table_get_entry()
172 struct mmu_pgt *pgt) in core_mmu_pgt_to_satp() argument
175 unsigned long pgt_ppn = (paddr_t)pgt >> RISCV_PGSHIFT; in core_mmu_pgt_to_satp()
302 struct mmu_pgt *pgt = NULL; in core_mmu_pgt_alloc() local
323 pgt = phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL, in core_mmu_pgt_alloc()
325 assert(pgt); in core_mmu_pgt_alloc()
327 pgt = boot_mem_alloc(RISCV_MMU_PGT_SIZE, in core_mmu_pgt_alloc()
332 RISCV_MMU_PGT_SIZE == (vaddr_t)pgt); in core_mmu_pgt_alloc()
335 prtn->pool_pgts = pgt; in core_mmu_pgt_alloc()
[all …]
/optee_os/core/arch/arm/mm/
H A Dtee_pager.c61 struct pgt *pgt; member
151 struct pgt pgt; member
305 static bool region_have_pgt(struct vm_paged_region *reg, struct pgt *pgt) in region_have_pgt() argument
310 if (reg->pgt_array[n] == pgt) in region_have_pgt()
327 .pgt = reg->pgt_array[idx / TBL_NUM_ENTRIES], in pmem_get_region_tblidx()
374 static struct pgt *find_core_pgt(vaddr_t va) in find_core_pgt()
376 return &find_pager_table(va)->pgt; in find_core_pgt()
406 pgt_dec_used_entries(&pt->pgt); in tee_pager_set_alias_area()
434 assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES); in tblidx_get_entry()
435 core_mmu_get_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx, in tblidx_get_entry()
[all …]