Lines Matching +full:- +full:pager
1 // SPDX-License-Identifier: BSD-2-Clause
21 * With pager enabled we allocate page table from the pager.
24 * using the interface provided by the pager.
28 * a page is completely unused it's returned to the pager.
30 * With pager disabled we have a static allocation of page tables instead.
34 * threads. In case a thread can't allocate the needed number of pager
73 assert(pgt && pgt->parent); in free_pgt()
74 parent = pgt->parent; in free_pgt()
75 assert(parent->num_used <= PGT_PARENT_TBL_COUNT && in free_pgt()
76 parent->num_used > 0); in free_pgt()
77 if (parent->num_used == PGT_PARENT_TBL_COUNT) in free_pgt()
79 parent->num_used--; in free_pgt()
81 if (!parent->num_used && SLIST_NEXT(SLIST_FIRST(&parent_list), link)) { in free_pgt()
87 tee_mm_free(parent->mm); in free_pgt()
90 SLIST_INSERT_HEAD(&parent->pgt_cache, pgt, link); in free_pgt()
91 pgt->vabase = 0; in free_pgt()
92 pgt->populated = false; in free_pgt()
110 parent->mm = phys_mem_ta_alloc(PGT_PARENT_SIZE); in alloc_pgt_parent()
111 if (!parent->mm) { in alloc_pgt_parent()
115 tbl = phys_to_virt(tee_mm_get_smem(parent->mm), in alloc_pgt_parent()
120 SLIST_INIT(&parent->pgt_cache); in alloc_pgt_parent()
125 SLIST_INSERT_HEAD(&parent->pgt_cache, pgt + n, link); in alloc_pgt_parent()
148 pgt = SLIST_FIRST(&parent->pgt_cache); in alloc_pgt()
149 SLIST_REMOVE_HEAD(&parent->pgt_cache, link); in alloc_pgt()
150 parent->num_used++; in alloc_pgt()
151 assert(pgt && parent->num_used <= PGT_PARENT_TBL_COUNT); in alloc_pgt()
152 if (parent->num_used == PGT_PARENT_TBL_COUNT) in alloc_pgt()
155 pgt->vabase = vabase; in alloc_pgt()
167 return core_is_buffer_inside(p->vabase, CORE_MMU_PGDIR_SIZE, begin, in pgt_entry_matches()
168 last - begin); in pgt_entry_matches()
173 struct pgt_cache *pgt_cache = &uctx->pgt_cache; in pgt_flush_range()
215 struct pgt_cache *pgt_cache = &uctx->pgt_cache; in pgt_flush()
229 struct pgt_cache *pgt_cache = &uctx->pgt_cache; in pgt_clear_range()
240 vaddr_t b = MAX(p->vabase, begin); in pgt_clear_range()
241 vaddr_t e = MIN(p->vabase + CORE_MMU_PGDIR_SIZE, end); in pgt_clear_range()
246 tbl = p->tbl; in pgt_clear_range()
247 idx = (b - p->vabase) / SMALL_PAGE_SIZE; in pgt_clear_range()
248 n = (e - b) / SMALL_PAGE_SIZE; in pgt_clear_range()
256 while (p && p->vabase < va) { in prune_before_va()
275 struct pgt_cache *pgt_cache = &uctx->pgt_cache; in pgt_check_avail()
276 struct vm_info *vm_info = &uctx->vm_info; in pgt_check_avail()
290 TAILQ_FOREACH(r, &vm_info->regions, link) { in pgt_check_avail()
291 for (va = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE); in pgt_check_avail()
292 va < r->va + r->size; va += CORE_MMU_PGDIR_SIZE) { in pgt_check_avail()
298 if (p->vabase < va) { in pgt_check_avail()
306 if (p->vabase == va) in pgt_check_avail()
314 TAILQ_FOREACH(r, &vm_info->regions, link) { in pgt_check_avail()
315 for (va = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE); in pgt_check_avail()
316 va < r->va + r->size; va += CORE_MMU_PGDIR_SIZE) { in pgt_check_avail()
317 if (p && p->vabase < va) { in pgt_check_avail()
323 if (p->vabase == va) in pgt_check_avail()
325 assert(p->vabase > va); in pgt_check_avail()
367 * Simple allocation of translation tables from pager, one translation
377 p->tbl = tee_pager_alloc(PGT_SIZE); in pgt_init()
383 * Four translation tables per page -> need to keep track of the page
384 * allocated from the pager.
402 p->tbl = tbl + m * PGT_SIZE; in pgt_init()
403 p->parent = &pgt_parents[n]; in pgt_init()
424 p->tbl = pgt_tables[n]; in pgt_init()
431 /* Simple allocation of translation tables from pager or static allocation */
438 memset(p->tbl, 0, PGT_SIZE); in pop_from_free_list()
439 p->populated = false; in pop_from_free_list()
448 tee_pager_release_phys(p->tbl, PGT_SIZE); in push_to_free_list()
453 * Four translation tables per page -> need to keep track of the page
454 * allocated from the pager.
466 memset(p->tbl, 0, PGT_SIZE); in pop_from_free_list()
467 p->populated = false; in pop_from_free_list()
476 SLIST_INSERT_HEAD(&p->parent->pgt_cache, p, link); in push_to_free_list()
477 assert(p->parent->num_used > 0); in push_to_free_list()
478 p->parent->num_used--; in push_to_free_list()
479 if (!p->parent->num_used) { in push_to_free_list()
480 vaddr_t va = (vaddr_t)p->tbl & ~SMALL_PAGE_MASK; in push_to_free_list()
494 return pgt->ctx == ctx && pgt->vabase == vabase; in match_pgt()
526 return pgt->num_used_entries; in get_num_used_entries()
579 p->ctx = NULL; in pgt_free_unlocked()
580 p->vabase = 0; in pgt_free_unlocked()
602 memset(p->tbl, 0, PGT_SIZE); in pop_from_some_list()
603 p->populated = false; in pop_from_some_list()
605 p->ctx = ctx; in pop_from_some_list()
606 p->vabase = vabase; in pop_from_some_list()
612 struct ts_ctx *ctx = uctx->ts_ctx; in pgt_flush()
622 if (p->ctx != ctx) in pgt_flush()
626 p->ctx = NULL; in pgt_flush()
627 p->vabase = 0; in pgt_flush()
636 if (p->ctx == ctx) { in pgt_flush()
639 p->ctx = NULL; in pgt_flush()
640 p->vabase = 0; in pgt_flush()
654 p->ctx = NULL; in flush_pgt_entry()
655 p->vabase = 0; in flush_pgt_entry()
663 if (p->ctx != ctx) in pgt_entry_matches()
667 if (!core_is_buffer_inside(p->vabase, CORE_MMU_PGDIR_SIZE, begin, in pgt_entry_matches()
668 last - begin)) in pgt_entry_matches()
720 struct pgt_cache *pgt_cache = &uctx->pgt_cache; in pgt_flush_range()
721 struct ts_ctx *ctx = uctx->ts_ctx; in pgt_flush_range()
745 vaddr_t b = MAX(p->vabase, begin); in clear_ctx_range_from_list()
746 vaddr_t e = MIN(p->vabase + CORE_MMU_PGDIR_SIZE, end); in clear_ctx_range_from_list()
748 if (p->ctx != ctx) in clear_ctx_range_from_list()
753 tbl = p->tbl; in clear_ctx_range_from_list()
754 idx = (b - p->vabase) / SMALL_PAGE_SIZE; in clear_ctx_range_from_list()
755 n = (e - b) / SMALL_PAGE_SIZE; in clear_ctx_range_from_list()
762 struct pgt_cache *pgt_cache = &uctx->pgt_cache; in pgt_clear_range()
763 struct ts_ctx *ctx = uctx->ts_ctx; in pgt_clear_range()
781 TAILQ_FOREACH(r, &vm_info->regions, link) { in pgt_alloc_unlocked()
782 for (va = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE); in pgt_alloc_unlocked()
783 va < r->va + r->size; va += CORE_MMU_PGDIR_SIZE) { in pgt_alloc_unlocked()
784 if (p && p->vabase == va) in pgt_alloc_unlocked()
804 struct vm_info *vm_info = &uctx->vm_info; in pgt_check_avail()
810 TAILQ_FOREACH(r, &vm_info->regions, link) { in pgt_check_avail()
811 for (va = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE); in pgt_check_avail()
812 va < r->va + r->size; va += CORE_MMU_PGDIR_SIZE) { in pgt_check_avail()
825 struct pgt_cache *pgt_cache = &uctx->pgt_cache; in pgt_get_all()
826 struct vm_info *vm_info = &uctx->vm_info; in pgt_get_all()
828 if (TAILQ_EMPTY(&vm_info->regions)) in pgt_get_all()
834 while (!pgt_alloc_unlocked(pgt_cache, uctx->ts_ctx, vm_info)) { in pgt_get_all()
846 struct pgt_cache *pgt_cache = &uctx->pgt_cache; in pgt_put_all()