11bb92983SJerome Forissier // SPDX-License-Identifier: BSD-2-Clause
2abe38974SJens Wiklander /*
3afe47fe8SJens Wiklander * Copyright (c) 2016-2021, Linaro Limited
4abe38974SJens Wiklander * Copyright (c) 2014, STMicroelectronics International N.V.
5abe38974SJens Wiklander */
6abe38974SJens Wiklander
7a884c935SJens Wiklander #include <arm.h>
88ddf5a4eSEtienne Carriere #include <assert.h>
987d96185SJens Wiklander #include <io.h>
108ddf5a4eSEtienne Carriere #include <keep.h>
1116841254SJens Wiklander #include <kernel/abort.h>
12f16a8545SJens Wiklander #include <kernel/asan.h>
13a5fef52bSJens Wiklander #include <kernel/cache_helpers.h>
14e996d189SJens Wiklander #include <kernel/linker.h>
15abe38974SJens Wiklander #include <kernel/panic.h>
161e61d77fSJerome Forissier #include <kernel/spinlock.h>
17abe38974SJens Wiklander #include <kernel/tee_misc.h>
18821f46cfSJens Wiklander #include <kernel/tee_ta_manager.h>
19821f46cfSJens Wiklander #include <kernel/thread.h>
2087d96185SJens Wiklander #include <kernel/tlb_helpers.h>
211936dfc7SJens Wiklander #include <kernel/user_mode_ctx.h>
22092a2b76SJens Wiklander #include <mm/core_memprot.h>
23ee546289SJens Wiklander #include <mm/fobj.h>
24821f46cfSJens Wiklander #include <mm/tee_mm.h>
25821f46cfSJens Wiklander #include <mm/tee_pager.h>
26821f46cfSJens Wiklander #include <stdlib.h>
2787d96185SJens Wiklander #include <sys/queue.h>
28abe38974SJens Wiklander #include <tee_api_defines.h>
29abe38974SJens Wiklander #include <trace.h>
3087d96185SJens Wiklander #include <types_ext.h>
31821f46cfSJens Wiklander #include <utee_defines.h>
3239d1f75cSPascal Brand #include <util.h>
338ddf5a4eSEtienne Carriere
34a884c935SJens Wiklander
35d5ad7ccfSJens Wiklander static struct vm_paged_region_head core_vm_regions =
36d5ad7ccfSJens Wiklander TAILQ_HEAD_INITIALIZER(core_vm_regions);
37abe38974SJens Wiklander
38f7f7b639SJens Wiklander #define INVALID_PGIDX UINT_MAX
3953a68c38SJens Wiklander #define PMEM_FLAG_DIRTY BIT(0)
4053a68c38SJens Wiklander #define PMEM_FLAG_HIDDEN BIT(1)
41f7f7b639SJens Wiklander
42abe38974SJens Wiklander /*
43abe38974SJens Wiklander * struct tee_pager_pmem - Represents a physical page used for paging.
44abe38974SJens Wiklander *
4553a68c38SJens Wiklander * @flags flags defined by PMEM_FLAG_* above
46b83c0d5fSJens Wiklander * @fobj_pgidx index of the page in the @fobj
47b83c0d5fSJens Wiklander * @fobj File object of which a page is made visible.
4821106ea2SJens Wiklander * @va_alias Virtual address where the physical page always is aliased.
4921106ea2SJens Wiklander * Used during remapping of the page when the content need to
5021106ea2SJens Wiklander * be updated before it's available at the new location.
51abe38974SJens Wiklander */
52abe38974SJens Wiklander struct tee_pager_pmem {
5353a68c38SJens Wiklander unsigned int flags;
54b83c0d5fSJens Wiklander unsigned int fobj_pgidx;
55b83c0d5fSJens Wiklander struct fobj *fobj;
5621106ea2SJens Wiklander void *va_alias;
57abe38974SJens Wiklander TAILQ_ENTRY(tee_pager_pmem) link;
58abe38974SJens Wiklander };
59abe38974SJens Wiklander
605ca851ecSJens Wiklander struct tblidx {
615ca851ecSJens Wiklander struct pgt *pgt;
625ca851ecSJens Wiklander unsigned int idx;
635ca851ecSJens Wiklander };
645ca851ecSJens Wiklander
65abe38974SJens Wiklander /* The list of physical pages. The first page in the list is the oldest */
66abe38974SJens Wiklander TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
67abe38974SJens Wiklander
68abe38974SJens Wiklander static struct tee_pager_pmem_head tee_pager_pmem_head =
69abe38974SJens Wiklander TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
70abe38974SJens Wiklander
71092a2b76SJens Wiklander static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
72092a2b76SJens Wiklander TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
73092a2b76SJens Wiklander
74abe38974SJens Wiklander /* number of pages hidden */
75abe38974SJens Wiklander #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
76abe38974SJens Wiklander
77abe38974SJens Wiklander /* Number of registered physical pages, used hiding pages. */
78abe38974SJens Wiklander static size_t tee_pager_npages;
79abe38974SJens Wiklander
8013616e88SJens Wiklander /* This area covers the IVs for all fobjs with paged IVs */
81d5ad7ccfSJens Wiklander static struct vm_paged_region *pager_iv_region;
8213616e88SJens Wiklander /* Used by make_iv_available(), see make_iv_available() for details. */
8313616e88SJens Wiklander static struct tee_pager_pmem *pager_spare_pmem;
8413616e88SJens Wiklander
8593074435SPascal Brand #ifdef CFG_WITH_STATS
8693074435SPascal Brand static struct tee_pager_stats pager_stats;
8793074435SPascal Brand
incr_ro_hits(void)8893074435SPascal Brand static inline void incr_ro_hits(void)
8993074435SPascal Brand {
9093074435SPascal Brand pager_stats.ro_hits++;
9193074435SPascal Brand }
9293074435SPascal Brand
incr_rw_hits(void)9393074435SPascal Brand static inline void incr_rw_hits(void)
9493074435SPascal Brand {
9593074435SPascal Brand pager_stats.rw_hits++;
9693074435SPascal Brand }
9793074435SPascal Brand
incr_hidden_hits(void)9893074435SPascal Brand static inline void incr_hidden_hits(void)
9993074435SPascal Brand {
10093074435SPascal Brand pager_stats.hidden_hits++;
10193074435SPascal Brand }
10293074435SPascal Brand
incr_zi_released(void)10393074435SPascal Brand static inline void incr_zi_released(void)
10493074435SPascal Brand {
10593074435SPascal Brand pager_stats.zi_released++;
10693074435SPascal Brand }
10793074435SPascal Brand
incr_npages_all(void)10893074435SPascal Brand static inline void incr_npages_all(void)
10993074435SPascal Brand {
11093074435SPascal Brand pager_stats.npages_all++;
11193074435SPascal Brand }
11293074435SPascal Brand
set_npages(void)11393074435SPascal Brand static inline void set_npages(void)
11493074435SPascal Brand {
11593074435SPascal Brand pager_stats.npages = tee_pager_npages;
11693074435SPascal Brand }
11793074435SPascal Brand
tee_pager_get_stats(struct tee_pager_stats * stats)11893074435SPascal Brand void tee_pager_get_stats(struct tee_pager_stats *stats)
11993074435SPascal Brand {
12093074435SPascal Brand *stats = pager_stats;
12193074435SPascal Brand
12293074435SPascal Brand pager_stats.hidden_hits = 0;
12393074435SPascal Brand pager_stats.ro_hits = 0;
12493074435SPascal Brand pager_stats.rw_hits = 0;
12593074435SPascal Brand pager_stats.zi_released = 0;
12693074435SPascal Brand }
12793074435SPascal Brand
12893074435SPascal Brand #else /* CFG_WITH_STATS */
incr_ro_hits(void)12993074435SPascal Brand static inline void incr_ro_hits(void) { }
incr_rw_hits(void)13093074435SPascal Brand static inline void incr_rw_hits(void) { }
incr_hidden_hits(void)13193074435SPascal Brand static inline void incr_hidden_hits(void) { }
incr_zi_released(void)13293074435SPascal Brand static inline void incr_zi_released(void) { }
incr_npages_all(void)13393074435SPascal Brand static inline void incr_npages_all(void) { }
set_npages(void)13493074435SPascal Brand static inline void set_npages(void) { }
13593074435SPascal Brand
tee_pager_get_stats(struct tee_pager_stats * stats)13693074435SPascal Brand void tee_pager_get_stats(struct tee_pager_stats *stats)
13793074435SPascal Brand {
13893074435SPascal Brand memset(stats, 0, sizeof(struct tee_pager_stats));
13993074435SPascal Brand }
14093074435SPascal Brand #endif /* CFG_WITH_STATS */
14193074435SPascal Brand
142b2087a20SJens Wiklander #define TBL_NUM_ENTRIES (CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
143b2087a20SJens Wiklander #define TBL_LEVEL CORE_MMU_PGDIR_LEVEL
144b2087a20SJens Wiklander #define TBL_SHIFT SMALL_PAGE_SHIFT
145b2087a20SJens Wiklander
146b2087a20SJens Wiklander #define EFFECTIVE_VA_SIZE \
147ff207c8dSJens Wiklander (ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \
148ff207c8dSJens Wiklander ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE))
149b2087a20SJens Wiklander
150b2087a20SJens Wiklander static struct pager_table {
151b2087a20SJens Wiklander struct pgt pgt;
152b2087a20SJens Wiklander struct core_mmu_table_info tbl_info;
153ff207c8dSJens Wiklander } *pager_tables;
154ff207c8dSJens Wiklander static unsigned int num_pager_tables;
155abe38974SJens Wiklander
156a257edb4SJens Wiklander static unsigned pager_spinlock = SPINLOCK_UNLOCK;
15721106ea2SJens Wiklander
15821106ea2SJens Wiklander /* Defines the range of the alias area */
15921106ea2SJens Wiklander static tee_mm_entry_t *pager_alias_area;
16021106ea2SJens Wiklander /*
16121106ea2SJens Wiklander * Physical pages are added in a stack like fashion to the alias area,
16221106ea2SJens Wiklander * @pager_alias_next_free gives the address of next free entry if
16321106ea2SJens Wiklander * @pager_alias_next_free is != 0
16421106ea2SJens Wiklander */
16521106ea2SJens Wiklander static uintptr_t pager_alias_next_free;
16621106ea2SJens Wiklander
1673078da83SJens Wiklander #ifdef CFG_TEE_CORE_DEBUG
1683078da83SJens Wiklander #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
1693078da83SJens Wiklander
pager_lock_dldetect(const char * func,const int line,struct abort_info * ai)1703078da83SJens Wiklander static uint32_t pager_lock_dldetect(const char *func, const int line,
1713078da83SJens Wiklander struct abort_info *ai)
1723078da83SJens Wiklander {
1733078da83SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1743078da83SJens Wiklander unsigned int retries = 0;
1753078da83SJens Wiklander unsigned int reminder = 0;
1763078da83SJens Wiklander
1773078da83SJens Wiklander while (!cpu_spin_trylock(&pager_spinlock)) {
1783078da83SJens Wiklander retries++;
1793078da83SJens Wiklander if (!retries) {
1803078da83SJens Wiklander /* wrapped, time to report */
1813078da83SJens Wiklander trace_printf(func, line, TRACE_ERROR, true,
1823078da83SJens Wiklander "possible spinlock deadlock reminder %u",
1833078da83SJens Wiklander reminder);
1843078da83SJens Wiklander if (reminder < UINT_MAX)
1853078da83SJens Wiklander reminder++;
1863078da83SJens Wiklander if (ai)
1873078da83SJens Wiklander abort_print(ai);
1883078da83SJens Wiklander }
1893078da83SJens Wiklander }
1903078da83SJens Wiklander
1913078da83SJens Wiklander return exceptions;
1923078da83SJens Wiklander }
1933078da83SJens Wiklander #else
pager_lock(struct abort_info __unused * ai)1943078da83SJens Wiklander static uint32_t pager_lock(struct abort_info __unused *ai)
195a257edb4SJens Wiklander {
19640ea51deSVolodymyr Babchuk return cpu_spin_lock_xsave(&pager_spinlock);
197a257edb4SJens Wiklander }
1983078da83SJens Wiklander #endif
199a257edb4SJens Wiklander
pager_lock_check_stack(size_t stack_size)20087d96185SJens Wiklander static uint32_t pager_lock_check_stack(size_t stack_size)
20187d96185SJens Wiklander {
20287d96185SJens Wiklander if (stack_size) {
20387d96185SJens Wiklander int8_t buf[stack_size];
20487d96185SJens Wiklander size_t n;
20587d96185SJens Wiklander
20687d96185SJens Wiklander /*
20787d96185SJens Wiklander * Make sure to touch all pages of the stack that we expect
20887d96185SJens Wiklander * to use with this lock held. We need to take eventual
20987d96185SJens Wiklander * page faults before the lock is taken or we'll deadlock
21087d96185SJens Wiklander * the pager. The pages that are populated in this way will
21187d96185SJens Wiklander * eventually be released at certain save transitions of
21287d96185SJens Wiklander * the thread.
21387d96185SJens Wiklander */
21487d96185SJens Wiklander for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
215918bb3a5SEtienne Carriere io_write8((vaddr_t)buf + n, 1);
216918bb3a5SEtienne Carriere io_write8((vaddr_t)buf + stack_size - 1, 1);
21787d96185SJens Wiklander }
21887d96185SJens Wiklander
21987d96185SJens Wiklander return pager_lock(NULL);
22087d96185SJens Wiklander }
22187d96185SJens Wiklander
pager_unlock(uint32_t exceptions)222a257edb4SJens Wiklander static void pager_unlock(uint32_t exceptions)
223a257edb4SJens Wiklander {
22440ea51deSVolodymyr Babchuk cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
225a257edb4SJens Wiklander }
226a257edb4SJens Wiklander
tee_pager_phys_to_virt(paddr_t pa,size_t len)227c2e4eb43SAnton Rybakov void *tee_pager_phys_to_virt(paddr_t pa, size_t len)
22853dcd8f7SJens Wiklander {
229b2087a20SJens Wiklander struct core_mmu_table_info ti;
23053dcd8f7SJens Wiklander unsigned idx;
23153dcd8f7SJens Wiklander uint32_t a;
23253dcd8f7SJens Wiklander paddr_t p;
233b2087a20SJens Wiklander vaddr_t v;
234b2087a20SJens Wiklander size_t n;
23553dcd8f7SJens Wiklander
236c2e4eb43SAnton Rybakov if (pa & SMALL_PAGE_MASK || len > SMALL_PAGE_SIZE)
237c2e4eb43SAnton Rybakov return NULL;
238c2e4eb43SAnton Rybakov
239b2087a20SJens Wiklander /*
240b2087a20SJens Wiklander * Most addresses are mapped lineary, try that first if possible.
241b2087a20SJens Wiklander */
242b2087a20SJens Wiklander if (!tee_pager_get_table_info(pa, &ti))
243b2087a20SJens Wiklander return NULL; /* impossible pa */
244b2087a20SJens Wiklander idx = core_mmu_va2idx(&ti, pa);
245b2087a20SJens Wiklander core_mmu_get_entry(&ti, idx, &p, &a);
24653dcd8f7SJens Wiklander if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
247b2087a20SJens Wiklander return (void *)core_mmu_idx2va(&ti, idx);
248b2087a20SJens Wiklander
249b2087a20SJens Wiklander n = 0;
250ee34e7eaSJens Wiklander idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_START);
251b2087a20SJens Wiklander while (true) {
252b2087a20SJens Wiklander while (idx < TBL_NUM_ENTRIES) {
253b2087a20SJens Wiklander v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
254ee34e7eaSJens Wiklander if (v >= (TEE_RAM_START + TEE_RAM_VA_SIZE))
255b2087a20SJens Wiklander return NULL;
256b2087a20SJens Wiklander
257b2087a20SJens Wiklander core_mmu_get_entry(&pager_tables[n].tbl_info,
258b2087a20SJens Wiklander idx, &p, &a);
259b2087a20SJens Wiklander if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
260b2087a20SJens Wiklander return (void *)v;
261b2087a20SJens Wiklander idx++;
26253dcd8f7SJens Wiklander }
26353dcd8f7SJens Wiklander
264b2087a20SJens Wiklander n++;
265ff207c8dSJens Wiklander if (n >= num_pager_tables)
266b2087a20SJens Wiklander return NULL;
267b2087a20SJens Wiklander idx = 0;
26853dcd8f7SJens Wiklander }
26953dcd8f7SJens Wiklander
27053dcd8f7SJens Wiklander return NULL;
27153dcd8f7SJens Wiklander }
27253dcd8f7SJens Wiklander
pmem_is_hidden(struct tee_pager_pmem * pmem)27353a68c38SJens Wiklander static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
27453a68c38SJens Wiklander {
27553a68c38SJens Wiklander return pmem->flags & PMEM_FLAG_HIDDEN;
27653a68c38SJens Wiklander }
27753a68c38SJens Wiklander
pmem_is_dirty(struct tee_pager_pmem * pmem)27853a68c38SJens Wiklander static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
27953a68c38SJens Wiklander {
28053a68c38SJens Wiklander return pmem->flags & PMEM_FLAG_DIRTY;
28153a68c38SJens Wiklander }
28253a68c38SJens Wiklander
pmem_is_covered_by_region(struct tee_pager_pmem * pmem,struct vm_paged_region * reg)283d5ad7ccfSJens Wiklander static bool pmem_is_covered_by_region(struct tee_pager_pmem *pmem,
284d5ad7ccfSJens Wiklander struct vm_paged_region *reg)
285b83c0d5fSJens Wiklander {
286d5ad7ccfSJens Wiklander if (pmem->fobj != reg->fobj)
287b83c0d5fSJens Wiklander return false;
288d5ad7ccfSJens Wiklander if (pmem->fobj_pgidx < reg->fobj_pgoffs)
289b83c0d5fSJens Wiklander return false;
290d5ad7ccfSJens Wiklander if ((pmem->fobj_pgidx - reg->fobj_pgoffs) >=
291d5ad7ccfSJens Wiklander (reg->size >> SMALL_PAGE_SHIFT))
292b83c0d5fSJens Wiklander return false;
293b83c0d5fSJens Wiklander
294b83c0d5fSJens Wiklander return true;
295b83c0d5fSJens Wiklander }
296b83c0d5fSJens Wiklander
get_pgt_count(vaddr_t base,size_t size)2974a3f6ad0SJens Wiklander static size_t get_pgt_count(vaddr_t base, size_t size)
2984a3f6ad0SJens Wiklander {
2994a3f6ad0SJens Wiklander assert(size);
3004a3f6ad0SJens Wiklander
3014a3f6ad0SJens Wiklander return (base + size - 1) / CORE_MMU_PGDIR_SIZE + 1 -
3024a3f6ad0SJens Wiklander base / CORE_MMU_PGDIR_SIZE;
3034a3f6ad0SJens Wiklander }
3044a3f6ad0SJens Wiklander
region_have_pgt(struct vm_paged_region * reg,struct pgt * pgt)305d5ad7ccfSJens Wiklander static bool region_have_pgt(struct vm_paged_region *reg, struct pgt *pgt)
3064a3f6ad0SJens Wiklander {
3074a3f6ad0SJens Wiklander size_t n = 0;
3084a3f6ad0SJens Wiklander
309d5ad7ccfSJens Wiklander for (n = 0; n < get_pgt_count(reg->base, reg->size); n++)
310d5ad7ccfSJens Wiklander if (reg->pgt_array[n] == pgt)
3114a3f6ad0SJens Wiklander return true;
3124a3f6ad0SJens Wiklander
3134a3f6ad0SJens Wiklander return false;
3144a3f6ad0SJens Wiklander }
3154a3f6ad0SJens Wiklander
pmem_get_region_tblidx(struct tee_pager_pmem * pmem,struct vm_paged_region * reg)316d5ad7ccfSJens Wiklander static struct tblidx pmem_get_region_tblidx(struct tee_pager_pmem *pmem,
317d5ad7ccfSJens Wiklander struct vm_paged_region *reg)
318b83c0d5fSJens Wiklander {
319d5ad7ccfSJens Wiklander size_t tbloffs = (reg->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
320d5ad7ccfSJens Wiklander size_t idx = pmem->fobj_pgidx - reg->fobj_pgoffs + tbloffs;
321b83c0d5fSJens Wiklander
3225ca851ecSJens Wiklander assert(pmem->fobj && pmem->fobj_pgidx != INVALID_PGIDX);
323d5ad7ccfSJens Wiklander assert(idx / TBL_NUM_ENTRIES < get_pgt_count(reg->base, reg->size));
3245ca851ecSJens Wiklander
3254a3f6ad0SJens Wiklander return (struct tblidx){
3264a3f6ad0SJens Wiklander .idx = idx % TBL_NUM_ENTRIES,
327d5ad7ccfSJens Wiklander .pgt = reg->pgt_array[idx / TBL_NUM_ENTRIES],
3284a3f6ad0SJens Wiklander };
329b83c0d5fSJens Wiklander }
330b83c0d5fSJens Wiklander
find_pager_table_may_fail(vaddr_t va)331b2087a20SJens Wiklander static struct pager_table *find_pager_table_may_fail(vaddr_t va)
332b2087a20SJens Wiklander {
333b2087a20SJens Wiklander size_t n;
334b2087a20SJens Wiklander const vaddr_t mask = CORE_MMU_PGDIR_MASK;
335b2087a20SJens Wiklander
336ff207c8dSJens Wiklander if (!pager_tables)
337ff207c8dSJens Wiklander return NULL;
338ff207c8dSJens Wiklander
339b2087a20SJens Wiklander n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
340b2087a20SJens Wiklander CORE_MMU_PGDIR_SHIFT;
341ff207c8dSJens Wiklander if (n >= num_pager_tables)
342b2087a20SJens Wiklander return NULL;
343b2087a20SJens Wiklander
344b2087a20SJens Wiklander assert(va >= pager_tables[n].tbl_info.va_base &&
345b2087a20SJens Wiklander va <= (pager_tables[n].tbl_info.va_base | mask));
346b2087a20SJens Wiklander
347b2087a20SJens Wiklander return pager_tables + n;
348b2087a20SJens Wiklander }
349b2087a20SJens Wiklander
find_pager_table(vaddr_t va)350b2087a20SJens Wiklander static struct pager_table *find_pager_table(vaddr_t va)
351b2087a20SJens Wiklander {
352b2087a20SJens Wiklander struct pager_table *pt = find_pager_table_may_fail(va);
353b2087a20SJens Wiklander
354b2087a20SJens Wiklander assert(pt);
355b2087a20SJens Wiklander return pt;
356b2087a20SJens Wiklander }
357b2087a20SJens Wiklander
tee_pager_get_table_info(vaddr_t va,struct core_mmu_table_info * ti)35853dcd8f7SJens Wiklander bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
35953dcd8f7SJens Wiklander {
360b2087a20SJens Wiklander struct pager_table *pt = find_pager_table_may_fail(va);
361b2087a20SJens Wiklander
362b2087a20SJens Wiklander if (!pt)
363b2087a20SJens Wiklander return false;
364b2087a20SJens Wiklander
365b2087a20SJens Wiklander *ti = pt->tbl_info;
36653dcd8f7SJens Wiklander return true;
36753dcd8f7SJens Wiklander }
36853dcd8f7SJens Wiklander
find_table_info(vaddr_t va)369b2087a20SJens Wiklander static struct core_mmu_table_info *find_table_info(vaddr_t va)
370b2087a20SJens Wiklander {
371b2087a20SJens Wiklander return &find_pager_table(va)->tbl_info;
372b2087a20SJens Wiklander }
373b2087a20SJens Wiklander
find_core_pgt(vaddr_t va)374b2087a20SJens Wiklander static struct pgt *find_core_pgt(vaddr_t va)
375b2087a20SJens Wiklander {
376b2087a20SJens Wiklander return &find_pager_table(va)->pgt;
37753dcd8f7SJens Wiklander }
37853dcd8f7SJens Wiklander
tee_pager_set_alias_area(tee_mm_entry_t * mm)379b8d0b26eSJens Wiklander void tee_pager_set_alias_area(tee_mm_entry_t *mm)
38021106ea2SJens Wiklander {
381b2087a20SJens Wiklander struct pager_table *pt;
38221106ea2SJens Wiklander unsigned idx;
38321106ea2SJens Wiklander vaddr_t smem = tee_mm_get_smem(mm);
38421106ea2SJens Wiklander size_t nbytes = tee_mm_get_bytes(mm);
385b2087a20SJens Wiklander vaddr_t v;
386b83c0d5fSJens Wiklander uint32_t a = 0;
38721106ea2SJens Wiklander
38821106ea2SJens Wiklander DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
38921106ea2SJens Wiklander
390b2087a20SJens Wiklander assert(!pager_alias_area);
39121106ea2SJens Wiklander pager_alias_area = mm;
39221106ea2SJens Wiklander pager_alias_next_free = smem;
39321106ea2SJens Wiklander
39421106ea2SJens Wiklander /* Clear all mapping in the alias area */
395b2087a20SJens Wiklander pt = find_pager_table(smem);
396b2087a20SJens Wiklander idx = core_mmu_va2idx(&pt->tbl_info, smem);
397ff207c8dSJens Wiklander while (pt <= (pager_tables + num_pager_tables - 1)) {
398b2087a20SJens Wiklander while (idx < TBL_NUM_ENTRIES) {
399b2087a20SJens Wiklander v = core_mmu_idx2va(&pt->tbl_info, idx);
40058cd4887SJens Wiklander if (v >= (smem + nbytes))
401b2087a20SJens Wiklander goto out;
402f7f7b639SJens Wiklander
403b83c0d5fSJens Wiklander core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
404b2087a20SJens Wiklander core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
405b83c0d5fSJens Wiklander if (a & TEE_MATTR_VALID_BLOCK)
406b83c0d5fSJens Wiklander pgt_dec_used_entries(&pt->pgt);
407b2087a20SJens Wiklander idx++;
408b2087a20SJens Wiklander }
409b2087a20SJens Wiklander
410b2087a20SJens Wiklander pt++;
411b2087a20SJens Wiklander idx = 0;
412b2087a20SJens Wiklander }
413b2087a20SJens Wiklander
414b2087a20SJens Wiklander out:
415fe16b87bSAlvin Chang tlbi_va_range(smem, nbytes, SMALL_PAGE_SIZE);
41621106ea2SJens Wiklander }
41721106ea2SJens Wiklander
tbl_usage_count(struct core_mmu_table_info * ti)418b2087a20SJens Wiklander static size_t tbl_usage_count(struct core_mmu_table_info *ti)
419c4ab3f26SJens Wiklander {
420c4ab3f26SJens Wiklander size_t n;
421985e1822SJens Wiklander uint32_t a = 0;
422c4ab3f26SJens Wiklander size_t usage = 0;
423c4ab3f26SJens Wiklander
424b2087a20SJens Wiklander for (n = 0; n < ti->num_entries; n++) {
425985e1822SJens Wiklander core_mmu_get_entry(ti, n, NULL, &a);
426985e1822SJens Wiklander if (a & TEE_MATTR_VALID_BLOCK)
427c4ab3f26SJens Wiklander usage++;
428c4ab3f26SJens Wiklander }
429c4ab3f26SJens Wiklander return usage;
430c4ab3f26SJens Wiklander }
431c4ab3f26SJens Wiklander
tblidx_get_entry(struct tblidx tblidx,paddr_t * pa,uint32_t * attr)4325ca851ecSJens Wiklander static void tblidx_get_entry(struct tblidx tblidx, paddr_t *pa, uint32_t *attr)
433c4ab3f26SJens Wiklander {
4345ca851ecSJens Wiklander assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES);
4355ca851ecSJens Wiklander core_mmu_get_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx,
4365ca851ecSJens Wiklander pa, attr);
437c4ab3f26SJens Wiklander }
438c4ab3f26SJens Wiklander
tblidx_set_entry(struct tblidx tblidx,paddr_t pa,uint32_t attr)4395ca851ecSJens Wiklander static void tblidx_set_entry(struct tblidx tblidx, paddr_t pa, uint32_t attr)
440c4ab3f26SJens Wiklander {
4415ca851ecSJens Wiklander assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES);
4425ca851ecSJens Wiklander core_mmu_set_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx,
4435ca851ecSJens Wiklander pa, attr);
444c4ab3f26SJens Wiklander }
445c4ab3f26SJens Wiklander
region_va2tblidx(struct vm_paged_region * reg,vaddr_t va)446d5ad7ccfSJens Wiklander static struct tblidx region_va2tblidx(struct vm_paged_region *reg, vaddr_t va)
447c4ab3f26SJens Wiklander {
4485ca851ecSJens Wiklander paddr_t mask = CORE_MMU_PGDIR_MASK;
4494a3f6ad0SJens Wiklander size_t n = 0;
4505ca851ecSJens Wiklander
451d5ad7ccfSJens Wiklander assert(va >= reg->base && va < (reg->base + reg->size));
452d5ad7ccfSJens Wiklander n = (va - (reg->base & ~mask)) / CORE_MMU_PGDIR_SIZE;
4535ca851ecSJens Wiklander
4545ca851ecSJens Wiklander return (struct tblidx){
4555ca851ecSJens Wiklander .idx = (va & mask) / SMALL_PAGE_SIZE,
456d5ad7ccfSJens Wiklander .pgt = reg->pgt_array[n],
4575ca851ecSJens Wiklander };
458c4ab3f26SJens Wiklander }
459c4ab3f26SJens Wiklander
tblidx2va(struct tblidx tblidx)4605ca851ecSJens Wiklander static vaddr_t tblidx2va(struct tblidx tblidx)
461c4ab3f26SJens Wiklander {
4625ca851ecSJens Wiklander return tblidx.pgt->vabase + (tblidx.idx << SMALL_PAGE_SHIFT);
463c4ab3f26SJens Wiklander }
464c4ab3f26SJens Wiklander
tblidx_tlbi_entry(struct tblidx tblidx)4655ca851ecSJens Wiklander static void tblidx_tlbi_entry(struct tblidx tblidx)
466e9c7ea67SJens Wiklander {
4675ca851ecSJens Wiklander vaddr_t va = tblidx2va(tblidx);
4685ca851ecSJens Wiklander
469e9c7ea67SJens Wiklander #if defined(CFG_PAGED_USER_TA)
4705ca851ecSJens Wiklander if (tblidx.pgt->ctx) {
4715ca851ecSJens Wiklander uint32_t asid = to_user_mode_ctx(tblidx.pgt->ctx)->vm_info.asid;
472e9c7ea67SJens Wiklander
473fe16b87bSAlvin Chang tlbi_va_asid(va, asid);
474e9c7ea67SJens Wiklander return;
475e9c7ea67SJens Wiklander }
476e9c7ea67SJens Wiklander #endif
477fe16b87bSAlvin Chang tlbi_va_allasid(va);
478e9c7ea67SJens Wiklander }
479e9c7ea67SJens Wiklander
pmem_assign_fobj_page(struct tee_pager_pmem * pmem,struct vm_paged_region * reg,vaddr_t va)48013616e88SJens Wiklander static void pmem_assign_fobj_page(struct tee_pager_pmem *pmem,
481d5ad7ccfSJens Wiklander struct vm_paged_region *reg, vaddr_t va)
48213616e88SJens Wiklander {
48313616e88SJens Wiklander struct tee_pager_pmem *p = NULL;
48413616e88SJens Wiklander unsigned int fobj_pgidx = 0;
48513616e88SJens Wiklander
48613616e88SJens Wiklander assert(!pmem->fobj && pmem->fobj_pgidx == INVALID_PGIDX);
48713616e88SJens Wiklander
488d5ad7ccfSJens Wiklander assert(va >= reg->base && va < (reg->base + reg->size));
489d5ad7ccfSJens Wiklander fobj_pgidx = (va - reg->base) / SMALL_PAGE_SIZE + reg->fobj_pgoffs;
49013616e88SJens Wiklander
49113616e88SJens Wiklander TAILQ_FOREACH(p, &tee_pager_pmem_head, link)
492d5ad7ccfSJens Wiklander assert(p->fobj != reg->fobj || p->fobj_pgidx != fobj_pgidx);
49313616e88SJens Wiklander
494d5ad7ccfSJens Wiklander pmem->fobj = reg->fobj;
49513616e88SJens Wiklander pmem->fobj_pgidx = fobj_pgidx;
49613616e88SJens Wiklander }
49713616e88SJens Wiklander
pmem_clear(struct tee_pager_pmem * pmem)498afe47fe8SJens Wiklander static void pmem_clear(struct tee_pager_pmem *pmem)
499afe47fe8SJens Wiklander {
500afe47fe8SJens Wiklander pmem->fobj = NULL;
501afe47fe8SJens Wiklander pmem->fobj_pgidx = INVALID_PGIDX;
502afe47fe8SJens Wiklander pmem->flags = 0;
503afe47fe8SJens Wiklander }
504afe47fe8SJens Wiklander
pmem_unmap(struct tee_pager_pmem * pmem,struct pgt * only_this_pgt)505b83c0d5fSJens Wiklander static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
506b83c0d5fSJens Wiklander {
507d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
5085ca851ecSJens Wiklander struct tblidx tblidx = { };
509b83c0d5fSJens Wiklander uint32_t a = 0;
510b83c0d5fSJens Wiklander
511d5ad7ccfSJens Wiklander TAILQ_FOREACH(reg, &pmem->fobj->regions, fobj_link) {
512b83c0d5fSJens Wiklander /*
513b83c0d5fSJens Wiklander * If only_this_pgt points to a pgt then the pgt of this
514d5ad7ccfSJens Wiklander * region has to match or we'll skip over it.
515b83c0d5fSJens Wiklander */
516d5ad7ccfSJens Wiklander if (only_this_pgt && !region_have_pgt(reg, only_this_pgt))
517b83c0d5fSJens Wiklander continue;
518d5ad7ccfSJens Wiklander if (!pmem_is_covered_by_region(pmem, reg))
519b83c0d5fSJens Wiklander continue;
520d5ad7ccfSJens Wiklander tblidx = pmem_get_region_tblidx(pmem, reg);
5214a3f6ad0SJens Wiklander if (!tblidx.pgt)
5224a3f6ad0SJens Wiklander continue;
5235ca851ecSJens Wiklander tblidx_get_entry(tblidx, NULL, &a);
524b83c0d5fSJens Wiklander if (a & TEE_MATTR_VALID_BLOCK) {
5255ca851ecSJens Wiklander tblidx_set_entry(tblidx, 0, 0);
5265ca851ecSJens Wiklander pgt_dec_used_entries(tblidx.pgt);
5275ca851ecSJens Wiklander tblidx_tlbi_entry(tblidx);
528b83c0d5fSJens Wiklander }
529b83c0d5fSJens Wiklander }
530b83c0d5fSJens Wiklander }
531b83c0d5fSJens Wiklander
tee_pager_early_init(void)53253dcd8f7SJens Wiklander void tee_pager_early_init(void)
53353dcd8f7SJens Wiklander {
534ff207c8dSJens Wiklander size_t n = 0;
535ff207c8dSJens Wiklander
536ff207c8dSJens Wiklander num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE;
537ff207c8dSJens Wiklander pager_tables = calloc(num_pager_tables, sizeof(*pager_tables));
538ff207c8dSJens Wiklander if (!pager_tables)
539ff207c8dSJens Wiklander panic("Cannot allocate pager_tables");
540b2087a20SJens Wiklander
541b2087a20SJens Wiklander /*
542b2087a20SJens Wiklander * Note that this depends on add_pager_vaspace() adding vaspace
543b2087a20SJens Wiklander * after end of memory.
544b2087a20SJens Wiklander */
545ff207c8dSJens Wiklander for (n = 0; n < num_pager_tables; n++) {
546e996d189SJens Wiklander if (!core_mmu_find_table(NULL, VCORE_START_VA +
547b2087a20SJens Wiklander n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
548b2087a20SJens Wiklander &pager_tables[n].tbl_info))
54953dcd8f7SJens Wiklander panic("can't find mmu tables");
55053dcd8f7SJens Wiklander
551b2087a20SJens Wiklander if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
55253dcd8f7SJens Wiklander panic("Unsupported page size in translation table");
553b2087a20SJens Wiklander assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
554b2087a20SJens Wiklander assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
555b2087a20SJens Wiklander
556b2087a20SJens Wiklander pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
557c2906418SJens Wiklander pager_tables[n].pgt.vabase = pager_tables[n].tbl_info.va_base;
558b2087a20SJens Wiklander pgt_set_used_entries(&pager_tables[n].pgt,
559b2087a20SJens Wiklander tbl_usage_count(&pager_tables[n].tbl_info));
560b2087a20SJens Wiklander }
56153dcd8f7SJens Wiklander }
56253dcd8f7SJens Wiklander
pager_add_alias_page(paddr_t pa)56321106ea2SJens Wiklander static void *pager_add_alias_page(paddr_t pa)
56421106ea2SJens Wiklander {
56521106ea2SJens Wiklander unsigned idx;
566b2087a20SJens Wiklander struct core_mmu_table_info *ti;
567da033e69SEtienne Carriere /* Alias pages mapped without write permission: runtime will care */
5688b427282SJelle Sels uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
5698b427282SJelle Sels TEE_MATTR_PR | (TEE_MATTR_MEM_TYPE_CACHED <<
5708b427282SJelle Sels TEE_MATTR_MEM_TYPE_SHIFT);
57121106ea2SJens Wiklander
57221106ea2SJens Wiklander DMSG("0x%" PRIxPA, pa);
57321106ea2SJens Wiklander
574b2087a20SJens Wiklander ti = find_table_info(pager_alias_next_free);
575d5d0e72aSJens Wiklander idx = core_mmu_va2idx(ti, pager_alias_next_free);
576d5d0e72aSJens Wiklander core_mmu_set_entry(ti, idx, pa, attr);
577b2087a20SJens Wiklander pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
57821106ea2SJens Wiklander pager_alias_next_free += SMALL_PAGE_SIZE;
57921106ea2SJens Wiklander if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
58021106ea2SJens Wiklander tee_mm_get_bytes(pager_alias_area)))
58121106ea2SJens Wiklander pager_alias_next_free = 0;
582d5d0e72aSJens Wiklander return (void *)core_mmu_idx2va(ti, idx);
58321106ea2SJens Wiklander }
58421106ea2SJens Wiklander
region_insert(struct vm_paged_region_head * regions,struct vm_paged_region * reg,struct vm_paged_region * r_prev)585d5ad7ccfSJens Wiklander static void region_insert(struct vm_paged_region_head *regions,
586d5ad7ccfSJens Wiklander struct vm_paged_region *reg,
587d5ad7ccfSJens Wiklander struct vm_paged_region *r_prev)
588092a2b76SJens Wiklander {
58987d96185SJens Wiklander uint32_t exceptions = pager_lock_check_stack(8);
590092a2b76SJens Wiklander
591d5ad7ccfSJens Wiklander if (r_prev)
592d5ad7ccfSJens Wiklander TAILQ_INSERT_AFTER(regions, r_prev, reg, link);
5937d2b71d6SJens Wiklander else
594d5ad7ccfSJens Wiklander TAILQ_INSERT_HEAD(regions, reg, link);
595d5ad7ccfSJens Wiklander TAILQ_INSERT_TAIL(®->fobj->regions, reg, fobj_link);
596092a2b76SJens Wiklander
597a257edb4SJens Wiklander pager_unlock(exceptions);
598092a2b76SJens Wiklander }
599d5ad7ccfSJens Wiklander DECLARE_KEEP_PAGER(region_insert);
600092a2b76SJens Wiklander
alloc_region(vaddr_t base,size_t size)601d5ad7ccfSJens Wiklander static struct vm_paged_region *alloc_region(vaddr_t base, size_t size)
6024a3f6ad0SJens Wiklander {
603d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
6044a3f6ad0SJens Wiklander
6054a3f6ad0SJens Wiklander if ((base & SMALL_PAGE_MASK) || !size) {
606d5ad7ccfSJens Wiklander EMSG("invalid pager region [%" PRIxVA " +0x%zx]", base, size);
6074a3f6ad0SJens Wiklander panic();
6084a3f6ad0SJens Wiklander }
6094a3f6ad0SJens Wiklander
610d5ad7ccfSJens Wiklander reg = calloc(1, sizeof(*reg));
611d5ad7ccfSJens Wiklander if (!reg)
6124a3f6ad0SJens Wiklander return NULL;
613d5ad7ccfSJens Wiklander reg->pgt_array = calloc(get_pgt_count(base, size),
6144a3f6ad0SJens Wiklander sizeof(struct pgt *));
615d5ad7ccfSJens Wiklander if (!reg->pgt_array) {
616d5ad7ccfSJens Wiklander free(reg);
6174a3f6ad0SJens Wiklander return NULL;
6184a3f6ad0SJens Wiklander }
6194a3f6ad0SJens Wiklander
620d5ad7ccfSJens Wiklander reg->base = base;
621d5ad7ccfSJens Wiklander reg->size = size;
622d5ad7ccfSJens Wiklander return reg;
6234a3f6ad0SJens Wiklander }
6244a3f6ad0SJens Wiklander
tee_pager_add_core_region(vaddr_t base,enum vm_paged_region_type type,struct fobj * fobj)625d5ad7ccfSJens Wiklander void tee_pager_add_core_region(vaddr_t base, enum vm_paged_region_type type,
62671e2b567SJens Wiklander struct fobj *fobj)
627abe38974SJens Wiklander {
628d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
6294a3f6ad0SJens Wiklander size_t n = 0;
630abe38974SJens Wiklander
6313f37fffbSSander Visser assert(fobj);
6323f37fffbSSander Visser
6334a3f6ad0SJens Wiklander DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d",
6344a3f6ad0SJens Wiklander base, base + fobj->num_pages * SMALL_PAGE_SIZE, type);
635092a2b76SJens Wiklander
636d5ad7ccfSJens Wiklander reg = alloc_region(base, fobj->num_pages * SMALL_PAGE_SIZE);
637d5ad7ccfSJens Wiklander if (!reg)
638d5ad7ccfSJens Wiklander panic("alloc_region");
6394a3f6ad0SJens Wiklander
640d5ad7ccfSJens Wiklander reg->fobj = fobj_get(fobj);
641d5ad7ccfSJens Wiklander reg->fobj_pgoffs = 0;
642d5ad7ccfSJens Wiklander reg->type = type;
643abe38974SJens Wiklander
64471e2b567SJens Wiklander switch (type) {
645d5ad7ccfSJens Wiklander case PAGED_REGION_TYPE_RO:
646d5ad7ccfSJens Wiklander reg->flags = TEE_MATTR_PRX;
64771e2b567SJens Wiklander break;
648d5ad7ccfSJens Wiklander case PAGED_REGION_TYPE_RW:
649d5ad7ccfSJens Wiklander case PAGED_REGION_TYPE_LOCK:
650d5ad7ccfSJens Wiklander reg->flags = TEE_MATTR_PRW;
65171e2b567SJens Wiklander break;
65271e2b567SJens Wiklander default:
65371e2b567SJens Wiklander panic();
6542bb1139bSJens Wiklander }
6552bb1139bSJens Wiklander
656d5ad7ccfSJens Wiklander for (n = 0; n < get_pgt_count(reg->base, reg->size); n++)
657d5ad7ccfSJens Wiklander reg->pgt_array[n] = find_core_pgt(base +
6584a3f6ad0SJens Wiklander n * CORE_MMU_PGDIR_SIZE);
659d5ad7ccfSJens Wiklander region_insert(&core_vm_regions, reg, NULL);
660abe38974SJens Wiklander }
661abe38974SJens Wiklander
find_region(struct vm_paged_region_head * regions,vaddr_t va)662d5ad7ccfSJens Wiklander static struct vm_paged_region *find_region(struct vm_paged_region_head *regions,
663a884c935SJens Wiklander vaddr_t va)
664a884c935SJens Wiklander {
665d5ad7ccfSJens Wiklander struct vm_paged_region *reg;
666a884c935SJens Wiklander
667d5ad7ccfSJens Wiklander if (!regions)
668a884c935SJens Wiklander return NULL;
669a884c935SJens Wiklander
670d5ad7ccfSJens Wiklander TAILQ_FOREACH(reg, regions, link) {
671d5ad7ccfSJens Wiklander if (core_is_buffer_inside(va, 1, reg->base, reg->size))
672d5ad7ccfSJens Wiklander return reg;
673abe38974SJens Wiklander }
674abe38974SJens Wiklander return NULL;
675abe38974SJens Wiklander }
676abe38974SJens Wiklander
677a884c935SJens Wiklander #ifdef CFG_PAGED_USER_TA
find_uta_region(vaddr_t va)678d5ad7ccfSJens Wiklander static struct vm_paged_region *find_uta_region(vaddr_t va)
679abe38974SJens Wiklander {
6803560d990SJens Wiklander struct ts_ctx *ctx = thread_get_tsd()->ctx;
681a884c935SJens Wiklander
6821936dfc7SJens Wiklander if (!is_user_mode_ctx(ctx))
683a884c935SJens Wiklander return NULL;
684d5ad7ccfSJens Wiklander return find_region(to_user_mode_ctx(ctx)->regions, va);
685a884c935SJens Wiklander }
686a884c935SJens Wiklander #else
find_uta_region(vaddr_t va __unused)687d5ad7ccfSJens Wiklander static struct vm_paged_region *find_uta_region(vaddr_t va __unused)
688a884c935SJens Wiklander {
689a884c935SJens Wiklander return NULL;
690a884c935SJens Wiklander }
691a884c935SJens Wiklander #endif /*CFG_PAGED_USER_TA*/
692a884c935SJens Wiklander
693a884c935SJens Wiklander
get_region_mattr(uint32_t reg_flags)694d5ad7ccfSJens Wiklander static uint32_t get_region_mattr(uint32_t reg_flags)
695a884c935SJens Wiklander {
696a884c935SJens Wiklander uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
6978b427282SJelle Sels TEE_MATTR_MEM_TYPE_CACHED << TEE_MATTR_MEM_TYPE_SHIFT |
698d5ad7ccfSJens Wiklander (reg_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
699a884c935SJens Wiklander
700a884c935SJens Wiklander return attr;
701abe38974SJens Wiklander }
702abe38974SJens Wiklander
get_pmem_pa(struct tee_pager_pmem * pmem)703f7f7b639SJens Wiklander static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
704f7f7b639SJens Wiklander {
705b2087a20SJens Wiklander struct core_mmu_table_info *ti;
706f7f7b639SJens Wiklander paddr_t pa;
707f7f7b639SJens Wiklander unsigned idx;
708f7f7b639SJens Wiklander
709b2087a20SJens Wiklander ti = find_table_info((vaddr_t)pmem->va_alias);
710b2087a20SJens Wiklander idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
711b2087a20SJens Wiklander core_mmu_get_entry(ti, idx, &pa, NULL);
712f7f7b639SJens Wiklander return pa;
713f7f7b639SJens Wiklander }
714f7f7b639SJens Wiklander
715a884c935SJens Wiklander #ifdef CFG_PAGED_USER_TA
unlink_region(struct vm_paged_region_head * regions,struct vm_paged_region * reg)716d5ad7ccfSJens Wiklander static void unlink_region(struct vm_paged_region_head *regions,
717d5ad7ccfSJens Wiklander struct vm_paged_region *reg)
718b83c0d5fSJens Wiklander {
719b83c0d5fSJens Wiklander uint32_t exceptions = pager_lock_check_stack(64);
720b83c0d5fSJens Wiklander
721d5ad7ccfSJens Wiklander TAILQ_REMOVE(regions, reg, link);
722d5ad7ccfSJens Wiklander TAILQ_REMOVE(®->fobj->regions, reg, fobj_link);
723b83c0d5fSJens Wiklander
724b83c0d5fSJens Wiklander pager_unlock(exceptions);
725b83c0d5fSJens Wiklander }
726d5ad7ccfSJens Wiklander DECLARE_KEEP_PAGER(unlink_region);
727b83c0d5fSJens Wiklander
free_region(struct vm_paged_region * reg)728d5ad7ccfSJens Wiklander static void free_region(struct vm_paged_region *reg)
729eae80401SJens Wiklander {
730d5ad7ccfSJens Wiklander fobj_put(reg->fobj);
731d5ad7ccfSJens Wiklander free(reg->pgt_array);
732d5ad7ccfSJens Wiklander free(reg);
733eae80401SJens Wiklander }
734eae80401SJens Wiklander
pager_add_um_region(struct user_mode_ctx * uctx,vaddr_t base,struct fobj * fobj,uint32_t prot)735d5ad7ccfSJens Wiklander static TEE_Result pager_add_um_region(struct user_mode_ctx *uctx, vaddr_t base,
7362616b103SJens Wiklander struct fobj *fobj, uint32_t prot)
737eae80401SJens Wiklander {
738d5ad7ccfSJens Wiklander struct vm_paged_region *r_prev = NULL;
739d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
740eae80401SJens Wiklander vaddr_t b = base;
741b83c0d5fSJens Wiklander size_t fobj_pgoffs = 0;
742ae02ae98SJens Wiklander size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
743eae80401SJens Wiklander
744d5ad7ccfSJens Wiklander if (!uctx->regions) {
745d5ad7ccfSJens Wiklander uctx->regions = malloc(sizeof(*uctx->regions));
746d5ad7ccfSJens Wiklander if (!uctx->regions)
747ae02ae98SJens Wiklander return TEE_ERROR_OUT_OF_MEMORY;
748d5ad7ccfSJens Wiklander TAILQ_INIT(uctx->regions);
749eae80401SJens Wiklander }
750eae80401SJens Wiklander
751d5ad7ccfSJens Wiklander reg = TAILQ_FIRST(uctx->regions);
752d5ad7ccfSJens Wiklander while (reg) {
753d5ad7ccfSJens Wiklander if (core_is_buffer_intersect(b, s, reg->base, reg->size))
7547d2b71d6SJens Wiklander return TEE_ERROR_BAD_PARAMETERS;
755d5ad7ccfSJens Wiklander if (b < reg->base)
7567d2b71d6SJens Wiklander break;
757d5ad7ccfSJens Wiklander r_prev = reg;
758d5ad7ccfSJens Wiklander reg = TAILQ_NEXT(reg, link);
7597d2b71d6SJens Wiklander }
7607d2b71d6SJens Wiklander
761d5ad7ccfSJens Wiklander reg = alloc_region(b, s);
762d5ad7ccfSJens Wiklander if (!reg)
763ae02ae98SJens Wiklander return TEE_ERROR_OUT_OF_MEMORY;
764eae80401SJens Wiklander
765eae80401SJens Wiklander /* Table info will be set when the context is activated. */
766d5ad7ccfSJens Wiklander reg->fobj = fobj_get(fobj);
767d5ad7ccfSJens Wiklander reg->fobj_pgoffs = fobj_pgoffs;
768d5ad7ccfSJens Wiklander reg->type = PAGED_REGION_TYPE_RW;
769d5ad7ccfSJens Wiklander reg->flags = prot;
7702bb1139bSJens Wiklander
771d5ad7ccfSJens Wiklander region_insert(uctx->regions, reg, r_prev);
7727d2b71d6SJens Wiklander
7734a3f6ad0SJens Wiklander return TEE_SUCCESS;
774eae80401SJens Wiklander }
775eae80401SJens Wiklander
map_pgts(struct vm_paged_region * reg)776d5ad7ccfSJens Wiklander static void map_pgts(struct vm_paged_region *reg)
7774a3f6ad0SJens Wiklander {
7784a3f6ad0SJens Wiklander struct core_mmu_table_info dir_info = { NULL };
7794a3f6ad0SJens Wiklander size_t n = 0;
7804a3f6ad0SJens Wiklander
7814a3f6ad0SJens Wiklander core_mmu_get_user_pgdir(&dir_info);
7824a3f6ad0SJens Wiklander
783d5ad7ccfSJens Wiklander for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) {
784d5ad7ccfSJens Wiklander struct pgt *pgt = reg->pgt_array[n];
7854a3f6ad0SJens Wiklander uint32_t attr = 0;
7864a3f6ad0SJens Wiklander paddr_t pa = 0;
7874a3f6ad0SJens Wiklander size_t idx = 0;
7884a3f6ad0SJens Wiklander
7894a3f6ad0SJens Wiklander idx = core_mmu_va2idx(&dir_info, pgt->vabase);
7904a3f6ad0SJens Wiklander core_mmu_get_entry(&dir_info, idx, &pa, &attr);
7914a3f6ad0SJens Wiklander
7924a3f6ad0SJens Wiklander /*
7934a3f6ad0SJens Wiklander * Check if the page table already is used, if it is, it's
7944a3f6ad0SJens Wiklander * already registered.
7954a3f6ad0SJens Wiklander */
7964a3f6ad0SJens Wiklander if (pgt->num_used_entries) {
7974a3f6ad0SJens Wiklander assert(attr & TEE_MATTR_TABLE);
7984a3f6ad0SJens Wiklander assert(pa == virt_to_phys(pgt->tbl));
7994a3f6ad0SJens Wiklander continue;
8004a3f6ad0SJens Wiklander }
8014a3f6ad0SJens Wiklander
8024a3f6ad0SJens Wiklander attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
8034a3f6ad0SJens Wiklander pa = virt_to_phys(pgt->tbl);
8044a3f6ad0SJens Wiklander assert(pa);
8054a3f6ad0SJens Wiklander /*
8064a3f6ad0SJens Wiklander * Note that the update of the table entry is guaranteed to
8074a3f6ad0SJens Wiklander * be atomic.
8084a3f6ad0SJens Wiklander */
8094a3f6ad0SJens Wiklander core_mmu_set_entry(&dir_info, idx, pa, attr);
8104a3f6ad0SJens Wiklander }
811eae80401SJens Wiklander }
812eae80401SJens Wiklander
tee_pager_add_um_region(struct user_mode_ctx * uctx,vaddr_t base,struct fobj * fobj,uint32_t prot)813d5ad7ccfSJens Wiklander TEE_Result tee_pager_add_um_region(struct user_mode_ctx *uctx, vaddr_t base,
8142616b103SJens Wiklander struct fobj *fobj, uint32_t prot)
815eae80401SJens Wiklander {
816ae02ae98SJens Wiklander TEE_Result res = TEE_SUCCESS;
817d8555bddSJens Wiklander struct thread_specific_data *tsd = thread_get_tsd();
818d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
819d8555bddSJens Wiklander
820d5ad7ccfSJens Wiklander res = pager_add_um_region(uctx, base, fobj, prot);
8214a3f6ad0SJens Wiklander if (res)
822ae02ae98SJens Wiklander return res;
823d8555bddSJens Wiklander
8244a3f6ad0SJens Wiklander if (uctx->ts_ctx == tsd->ctx) {
825d8555bddSJens Wiklander /*
8264a3f6ad0SJens Wiklander * We're chaning the currently active utc. Assign page
827d5ad7ccfSJens Wiklander * tables to the new regions and make sure that the page
828d8555bddSJens Wiklander * tables are registered in the upper table.
829d8555bddSJens Wiklander */
8301936dfc7SJens Wiklander tee_pager_assign_um_tables(uctx);
831d5ad7ccfSJens Wiklander TAILQ_FOREACH(reg, uctx->regions, link)
832d5ad7ccfSJens Wiklander map_pgts(reg);
833d8555bddSJens Wiklander }
834d8555bddSJens Wiklander
835ae02ae98SJens Wiklander return TEE_SUCCESS;
836d8555bddSJens Wiklander }
837d8555bddSJens Wiklander
split_region(struct vm_paged_region * reg,struct vm_paged_region * r2,vaddr_t va)83894b58775SJens Wiklander static void split_region(struct vm_paged_region *reg,
839d5ad7ccfSJens Wiklander struct vm_paged_region *r2, vaddr_t va)
8407d2b71d6SJens Wiklander {
8417d2b71d6SJens Wiklander uint32_t exceptions = pager_lock_check_stack(64);
842d5ad7ccfSJens Wiklander size_t diff = va - reg->base;
843d5ad7ccfSJens Wiklander size_t r2_pgt_count = 0;
8445e620504SJens Wiklander size_t reg_pgt_count = 0;
8454a3f6ad0SJens Wiklander size_t n0 = 0;
8464a3f6ad0SJens Wiklander size_t n = 0;
8474a3f6ad0SJens Wiklander
848d5ad7ccfSJens Wiklander assert(r2->base == va);
849d5ad7ccfSJens Wiklander assert(r2->size == reg->size - diff);
8507d2b71d6SJens Wiklander
851d5ad7ccfSJens Wiklander r2->fobj = fobj_get(reg->fobj);
852d5ad7ccfSJens Wiklander r2->fobj_pgoffs = reg->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
853d5ad7ccfSJens Wiklander r2->type = reg->type;
854d5ad7ccfSJens Wiklander r2->flags = reg->flags;
8554a3f6ad0SJens Wiklander
856d5ad7ccfSJens Wiklander r2_pgt_count = get_pgt_count(r2->base, r2->size);
8575e620504SJens Wiklander reg_pgt_count = get_pgt_count(reg->base, reg->size);
8585e620504SJens Wiklander n0 = reg_pgt_count - r2_pgt_count;
8595e620504SJens Wiklander for (n = n0; n < reg_pgt_count; n++)
860d5ad7ccfSJens Wiklander r2->pgt_array[n - n0] = reg->pgt_array[n];
861d5ad7ccfSJens Wiklander reg->size = diff;
8627d2b71d6SJens Wiklander
86394b58775SJens Wiklander TAILQ_INSERT_BEFORE(reg, r2, link);
864d5ad7ccfSJens Wiklander TAILQ_INSERT_AFTER(®->fobj->regions, reg, r2, fobj_link);
8657d2b71d6SJens Wiklander
8667d2b71d6SJens Wiklander pager_unlock(exceptions);
8677d2b71d6SJens Wiklander }
868d5ad7ccfSJens Wiklander DECLARE_KEEP_PAGER(split_region);
8697d2b71d6SJens Wiklander
tee_pager_split_um_region(struct user_mode_ctx * uctx,vaddr_t va)8707d2b71d6SJens Wiklander TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
8717d2b71d6SJens Wiklander {
872d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
873d5ad7ccfSJens Wiklander struct vm_paged_region *r2 = NULL;
8747d2b71d6SJens Wiklander
8757d2b71d6SJens Wiklander if (va & SMALL_PAGE_MASK)
8767d2b71d6SJens Wiklander return TEE_ERROR_BAD_PARAMETERS;
8777d2b71d6SJens Wiklander
878d5ad7ccfSJens Wiklander TAILQ_FOREACH(reg, uctx->regions, link) {
879d5ad7ccfSJens Wiklander if (va == reg->base || va == reg->base + reg->size)
8807d2b71d6SJens Wiklander return TEE_SUCCESS;
881d5ad7ccfSJens Wiklander if (va > reg->base && va < reg->base + reg->size) {
882d5ad7ccfSJens Wiklander size_t diff = va - reg->base;
8834a3f6ad0SJens Wiklander
884d5ad7ccfSJens Wiklander r2 = alloc_region(va, reg->size - diff);
885d5ad7ccfSJens Wiklander if (!r2)
8867d2b71d6SJens Wiklander return TEE_ERROR_OUT_OF_MEMORY;
88794b58775SJens Wiklander split_region(reg, r2, va);
8887d2b71d6SJens Wiklander return TEE_SUCCESS;
8897d2b71d6SJens Wiklander }
8907d2b71d6SJens Wiklander }
8917d2b71d6SJens Wiklander
8927d2b71d6SJens Wiklander return TEE_SUCCESS;
8937d2b71d6SJens Wiklander }
8947d2b71d6SJens Wiklander
895d5ad7ccfSJens Wiklander static struct pgt **
merge_region_with_next(struct vm_paged_region_head * regions,struct vm_paged_region * reg,struct vm_paged_region * r_next,struct pgt ** pgt_array)896d5ad7ccfSJens Wiklander merge_region_with_next(struct vm_paged_region_head *regions,
897d5ad7ccfSJens Wiklander struct vm_paged_region *reg,
898d5ad7ccfSJens Wiklander struct vm_paged_region *r_next, struct pgt **pgt_array)
8997d2b71d6SJens Wiklander {
9007d2b71d6SJens Wiklander uint32_t exceptions = pager_lock_check_stack(64);
901d5ad7ccfSJens Wiklander struct pgt **old_pgt_array = reg->pgt_array;
9027d2b71d6SJens Wiklander
903d5ad7ccfSJens Wiklander reg->pgt_array = pgt_array;
904d5ad7ccfSJens Wiklander TAILQ_REMOVE(regions, r_next, link);
905d5ad7ccfSJens Wiklander TAILQ_REMOVE(&r_next->fobj->regions, r_next, fobj_link);
9067d2b71d6SJens Wiklander
9077d2b71d6SJens Wiklander pager_unlock(exceptions);
9084a3f6ad0SJens Wiklander return old_pgt_array;
9097d2b71d6SJens Wiklander }
910d5ad7ccfSJens Wiklander DECLARE_KEEP_PAGER(merge_region_with_next);
9117d2b71d6SJens Wiklander
alloc_merged_pgt_array(struct vm_paged_region * a,struct vm_paged_region * a_next)912d5ad7ccfSJens Wiklander static struct pgt **alloc_merged_pgt_array(struct vm_paged_region *a,
913d5ad7ccfSJens Wiklander struct vm_paged_region *a_next)
9144a3f6ad0SJens Wiklander {
9154a3f6ad0SJens Wiklander size_t a_next_pgt_count = get_pgt_count(a_next->base, a_next->size);
9164a3f6ad0SJens Wiklander size_t a_pgt_count = get_pgt_count(a->base, a->size);
91760e36714SJens Wiklander size_t pgt_count = get_pgt_count(a->base, a->size + a_next->size);
9184a3f6ad0SJens Wiklander struct pgt **pgt_array = NULL;
91960e36714SJens Wiklander bool have_shared_pgt = false;
92060e36714SJens Wiklander
92160e36714SJens Wiklander have_shared_pgt = ((a->base + a->size) & ~CORE_MMU_PGDIR_MASK) ==
92260e36714SJens Wiklander (a_next->base & ~CORE_MMU_PGDIR_MASK);
92360e36714SJens Wiklander
92460e36714SJens Wiklander if (have_shared_pgt)
92560e36714SJens Wiklander assert(pgt_count == a_pgt_count + a_next_pgt_count - 1);
92660e36714SJens Wiklander else
92760e36714SJens Wiklander assert(pgt_count == a_pgt_count + a_next_pgt_count);
9284a3f6ad0SJens Wiklander
9294a3f6ad0SJens Wiklander /* In case there's a shared pgt they must match */
93060e36714SJens Wiklander if (have_shared_pgt &&
9314a3f6ad0SJens Wiklander a->pgt_array[a_pgt_count - 1] != a_next->pgt_array[0])
9324a3f6ad0SJens Wiklander return NULL;
9334a3f6ad0SJens Wiklander
934ae9b4197SGatien Chevallier pgt_array = calloc(pgt_count, sizeof(struct pgt *));
9354a3f6ad0SJens Wiklander if (!pgt_array)
9364a3f6ad0SJens Wiklander return NULL;
9374a3f6ad0SJens Wiklander
9384a3f6ad0SJens Wiklander /*
9394a3f6ad0SJens Wiklander * Copy and merge the two pgt_arrays, note the special case
9404a3f6ad0SJens Wiklander * where a pgt is shared.
9414a3f6ad0SJens Wiklander */
9424a3f6ad0SJens Wiklander memcpy(pgt_array, a->pgt_array, a_pgt_count * sizeof(struct pgt *));
94360e36714SJens Wiklander if (have_shared_pgt)
9444a3f6ad0SJens Wiklander memcpy(pgt_array + a_pgt_count, a_next->pgt_array + 1,
9454a3f6ad0SJens Wiklander (a_next_pgt_count - 1) * sizeof(struct pgt *));
9464a3f6ad0SJens Wiklander else
9474a3f6ad0SJens Wiklander memcpy(pgt_array + a_pgt_count, a_next->pgt_array,
9484a3f6ad0SJens Wiklander a_next_pgt_count * sizeof(struct pgt *));
9494a3f6ad0SJens Wiklander
9504a3f6ad0SJens Wiklander return pgt_array;
9514a3f6ad0SJens Wiklander }
9524a3f6ad0SJens Wiklander
tee_pager_merge_um_region(struct user_mode_ctx * uctx,vaddr_t va,size_t len)9537d2b71d6SJens Wiklander void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
9547d2b71d6SJens Wiklander size_t len)
9557d2b71d6SJens Wiklander {
956d5ad7ccfSJens Wiklander struct vm_paged_region *r_next = NULL;
957d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
9584a3f6ad0SJens Wiklander struct pgt **pgt_array = NULL;
9594c666179SJens Wiklander vaddr_t end_va = 0;
9607d2b71d6SJens Wiklander
9617d2b71d6SJens Wiklander if ((va | len) & SMALL_PAGE_MASK)
9627d2b71d6SJens Wiklander return;
9634c666179SJens Wiklander if (ADD_OVERFLOW(va, len, &end_va))
9644c666179SJens Wiklander return;
9657d2b71d6SJens Wiklander
966d5ad7ccfSJens Wiklander for (reg = TAILQ_FIRST(uctx->regions);; reg = r_next) {
967d5ad7ccfSJens Wiklander r_next = TAILQ_NEXT(reg, link);
968d5ad7ccfSJens Wiklander if (!r_next)
9697d2b71d6SJens Wiklander return;
9707d2b71d6SJens Wiklander
9717d2b71d6SJens Wiklander /* Try merging with the area just before va */
972d5ad7ccfSJens Wiklander if (reg->base + reg->size < va)
9737d2b71d6SJens Wiklander continue;
9747d2b71d6SJens Wiklander
9757d2b71d6SJens Wiklander /*
976d5ad7ccfSJens Wiklander * If reg->base is well past our range we're done.
9777d2b71d6SJens Wiklander * Note that if it's just the page after our range we'll
9787d2b71d6SJens Wiklander * try to merge.
9797d2b71d6SJens Wiklander */
980d5ad7ccfSJens Wiklander if (reg->base > end_va)
9817d2b71d6SJens Wiklander return;
9827d2b71d6SJens Wiklander
983d5ad7ccfSJens Wiklander if (reg->base + reg->size != r_next->base)
9847d2b71d6SJens Wiklander continue;
985d5ad7ccfSJens Wiklander if (reg->fobj != r_next->fobj || reg->type != r_next->type ||
986d5ad7ccfSJens Wiklander reg->flags != r_next->flags)
9877d2b71d6SJens Wiklander continue;
988d5ad7ccfSJens Wiklander if (reg->fobj_pgoffs + reg->size / SMALL_PAGE_SIZE !=
989d5ad7ccfSJens Wiklander r_next->fobj_pgoffs)
9907d2b71d6SJens Wiklander continue;
9917d2b71d6SJens Wiklander
992d5ad7ccfSJens Wiklander pgt_array = alloc_merged_pgt_array(reg, r_next);
9934a3f6ad0SJens Wiklander if (!pgt_array)
9944a3f6ad0SJens Wiklander continue;
9954a3f6ad0SJens Wiklander
9964a3f6ad0SJens Wiklander /*
997d5ad7ccfSJens Wiklander * merge_region_with_next() returns the old pgt array which
998d5ad7ccfSJens Wiklander * was replaced in reg. We don't want to call free()
999d5ad7ccfSJens Wiklander * directly from merge_region_with_next() that would pull
1000d5ad7ccfSJens Wiklander * free() and its dependencies into the unpaged area.
10014a3f6ad0SJens Wiklander */
1002d5ad7ccfSJens Wiklander free(merge_region_with_next(uctx->regions, reg, r_next,
1003d5ad7ccfSJens Wiklander pgt_array));
1004d5ad7ccfSJens Wiklander free_region(r_next);
1005d5ad7ccfSJens Wiklander r_next = reg;
10067d2b71d6SJens Wiklander }
10077d2b71d6SJens Wiklander }
10087d2b71d6SJens Wiklander
rem_region(struct vm_paged_region_head * regions,struct vm_paged_region * reg)1009d5ad7ccfSJens Wiklander static void rem_region(struct vm_paged_region_head *regions,
1010d5ad7ccfSJens Wiklander struct vm_paged_region *reg)
1011db28cbebSJens Wiklander {
1012db28cbebSJens Wiklander struct tee_pager_pmem *pmem;
1013d5ad7ccfSJens Wiklander size_t last_pgoffs = reg->fobj_pgoffs +
1014d5ad7ccfSJens Wiklander (reg->size >> SMALL_PAGE_SHIFT) - 1;
1015db28cbebSJens Wiklander uint32_t exceptions;
10165ca851ecSJens Wiklander struct tblidx tblidx = { };
1017b83c0d5fSJens Wiklander uint32_t a = 0;
1018db28cbebSJens Wiklander
101987d96185SJens Wiklander exceptions = pager_lock_check_stack(64);
1020db28cbebSJens Wiklander
1021d5ad7ccfSJens Wiklander TAILQ_REMOVE(regions, reg, link);
1022d5ad7ccfSJens Wiklander TAILQ_REMOVE(®->fobj->regions, reg, fobj_link);
1023db28cbebSJens Wiklander
1024db28cbebSJens Wiklander TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1025d5ad7ccfSJens Wiklander if (pmem->fobj != reg->fobj ||
1026d5ad7ccfSJens Wiklander pmem->fobj_pgidx < reg->fobj_pgoffs ||
1027b83c0d5fSJens Wiklander pmem->fobj_pgidx > last_pgoffs)
1028b83c0d5fSJens Wiklander continue;
1029b83c0d5fSJens Wiklander
1030d5ad7ccfSJens Wiklander tblidx = pmem_get_region_tblidx(pmem, reg);
10315ca851ecSJens Wiklander tblidx_get_entry(tblidx, NULL, &a);
1032b83c0d5fSJens Wiklander if (!(a & TEE_MATTR_VALID_BLOCK))
1033b83c0d5fSJens Wiklander continue;
1034b83c0d5fSJens Wiklander
10355ca851ecSJens Wiklander tblidx_set_entry(tblidx, 0, 0);
10365ca851ecSJens Wiklander tblidx_tlbi_entry(tblidx);
10375ca851ecSJens Wiklander pgt_dec_used_entries(tblidx.pgt);
1038db28cbebSJens Wiklander }
1039db28cbebSJens Wiklander
1040db28cbebSJens Wiklander pager_unlock(exceptions);
1041db28cbebSJens Wiklander }
1042d5ad7ccfSJens Wiklander DECLARE_KEEP_PAGER(rem_region);
1043db28cbebSJens Wiklander
tee_pager_rem_um_region(struct user_mode_ctx * uctx,vaddr_t base,size_t size)10441936dfc7SJens Wiklander void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
1045d8555bddSJens Wiklander size_t size)
1046d8555bddSJens Wiklander {
1047d5ad7ccfSJens Wiklander struct vm_paged_region *reg;
1048d5ad7ccfSJens Wiklander struct vm_paged_region *r_next;
1049d8555bddSJens Wiklander size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
1050d8555bddSJens Wiklander
1051d5ad7ccfSJens Wiklander TAILQ_FOREACH_SAFE(reg, uctx->regions, link, r_next) {
10527b4c4c81SJens Wiklander if (core_is_buffer_inside(reg->base, reg->size, base, s)) {
1053d5ad7ccfSJens Wiklander rem_region(uctx->regions, reg);
10547b4c4c81SJens Wiklander free_region(reg);
10557b4c4c81SJens Wiklander }
1056d8555bddSJens Wiklander }
10571936dfc7SJens Wiklander tlbi_asid(uctx->vm_info.asid);
1058d8555bddSJens Wiklander }
1059d8555bddSJens Wiklander
tee_pager_rem_um_regions(struct user_mode_ctx * uctx)1060d5ad7ccfSJens Wiklander void tee_pager_rem_um_regions(struct user_mode_ctx *uctx)
1061eae80401SJens Wiklander {
1062d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
1063eae80401SJens Wiklander
1064d5ad7ccfSJens Wiklander if (!uctx->regions)
1065eae80401SJens Wiklander return;
1066eae80401SJens Wiklander
1067eae80401SJens Wiklander while (true) {
1068d5ad7ccfSJens Wiklander reg = TAILQ_FIRST(uctx->regions);
1069d5ad7ccfSJens Wiklander if (!reg)
1070eae80401SJens Wiklander break;
1071d5ad7ccfSJens Wiklander unlink_region(uctx->regions, reg);
1072d5ad7ccfSJens Wiklander free_region(reg);
1073eae80401SJens Wiklander }
1074eae80401SJens Wiklander
1075d5ad7ccfSJens Wiklander free(uctx->regions);
1076eae80401SJens Wiklander }
1077eae80401SJens Wiklander
same_context(struct tee_pager_pmem * pmem)107853716c0cSJens Wiklander static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
107953716c0cSJens Wiklander {
1080d5ad7ccfSJens Wiklander struct vm_paged_region *reg = TAILQ_FIRST(&pmem->fobj->regions);
1081d5ad7ccfSJens Wiklander void *ctx = reg->pgt_array[0]->ctx;
108253716c0cSJens Wiklander
108353716c0cSJens Wiklander do {
1084d5ad7ccfSJens Wiklander reg = TAILQ_NEXT(reg, fobj_link);
1085d5ad7ccfSJens Wiklander if (!reg)
108653716c0cSJens Wiklander return true;
1087d5ad7ccfSJens Wiklander } while (reg->pgt_array[0]->ctx == ctx);
108853716c0cSJens Wiklander
108953716c0cSJens Wiklander return false;
109053716c0cSJens Wiklander }
109153716c0cSJens Wiklander
tee_pager_set_um_region_attr(struct user_mode_ctx * uctx,vaddr_t base,size_t size,uint32_t flags)1092d5ad7ccfSJens Wiklander bool tee_pager_set_um_region_attr(struct user_mode_ctx *uctx, vaddr_t base,
1093c6706e12SJens Wiklander size_t size, uint32_t flags)
1094a884c935SJens Wiklander {
109553a68c38SJens Wiklander bool ret = false;
1096a884c935SJens Wiklander vaddr_t b = base;
1097a884c935SJens Wiklander size_t s = size;
109853a68c38SJens Wiklander size_t s2 = 0;
1099d5ad7ccfSJens Wiklander struct vm_paged_region *reg = find_region(uctx->regions, b);
110053a68c38SJens Wiklander uint32_t exceptions = 0;
110153a68c38SJens Wiklander struct tee_pager_pmem *pmem = NULL;
110253a68c38SJens Wiklander uint32_t a = 0;
110353a68c38SJens Wiklander uint32_t f = 0;
11042e84663dSJens Wiklander uint32_t mattr = 0;
110553a68c38SJens Wiklander uint32_t f2 = 0;
11065ca851ecSJens Wiklander struct tblidx tblidx = { };
1107a884c935SJens Wiklander
1108a884c935SJens Wiklander f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1109a884c935SJens Wiklander if (f & TEE_MATTR_UW)
1110a884c935SJens Wiklander f |= TEE_MATTR_PW;
1111d5ad7ccfSJens Wiklander mattr = get_region_mattr(f);
1112a884c935SJens Wiklander
1113f16a8545SJens Wiklander exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1114a884c935SJens Wiklander
1115a884c935SJens Wiklander while (s) {
11166b743a2dSJens Wiklander if (!reg) {
1117a884c935SJens Wiklander ret = false;
1118a884c935SJens Wiklander goto out;
1119a884c935SJens Wiklander }
11206b743a2dSJens Wiklander s2 = MIN(reg->size, s);
1121a884c935SJens Wiklander b += s2;
1122a884c935SJens Wiklander s -= s2;
1123a884c935SJens Wiklander
1124d5ad7ccfSJens Wiklander if (reg->flags == f)
1125d5ad7ccfSJens Wiklander goto next_region;
11262e84663dSJens Wiklander
1127a884c935SJens Wiklander TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1128d5ad7ccfSJens Wiklander if (!pmem_is_covered_by_region(pmem, reg))
1129a884c935SJens Wiklander continue;
1130b83c0d5fSJens Wiklander
1131d5ad7ccfSJens Wiklander tblidx = pmem_get_region_tblidx(pmem, reg);
11325ca851ecSJens Wiklander tblidx_get_entry(tblidx, NULL, &a);
1133a884c935SJens Wiklander if (a == f)
1134a884c935SJens Wiklander continue;
11355ca851ecSJens Wiklander tblidx_set_entry(tblidx, 0, 0);
11365ca851ecSJens Wiklander tblidx_tlbi_entry(tblidx);
113797f9e0ddSJens Wiklander
113853a68c38SJens Wiklander pmem->flags &= ~PMEM_FLAG_HIDDEN;
113953a68c38SJens Wiklander if (pmem_is_dirty(pmem))
11402e84663dSJens Wiklander f2 = mattr;
114153a68c38SJens Wiklander else
11422e84663dSJens Wiklander f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
11435ca851ecSJens Wiklander tblidx_set_entry(tblidx, get_pmem_pa(pmem), f2);
1144b83c0d5fSJens Wiklander if (!(a & TEE_MATTR_VALID_BLOCK))
11454a3f6ad0SJens Wiklander pgt_inc_used_entries(tblidx.pgt);
114605348de0SJens Wiklander /*
114705348de0SJens Wiklander * Make sure the table update is visible before
114805348de0SJens Wiklander * continuing.
114905348de0SJens Wiklander */
115005348de0SJens Wiklander dsb_ishst();
115197f9e0ddSJens Wiklander
1152b83c0d5fSJens Wiklander /*
1153b83c0d5fSJens Wiklander * Here's a problem if this page already is shared.
1154b83c0d5fSJens Wiklander * We need do icache invalidate for each context
1155b83c0d5fSJens Wiklander * in which it is shared. In practice this will
1156b83c0d5fSJens Wiklander * never happen.
1157b83c0d5fSJens Wiklander */
115897f9e0ddSJens Wiklander if (flags & TEE_MATTR_UX) {
11595ca851ecSJens Wiklander void *va = (void *)tblidx2va(tblidx);
1160b83c0d5fSJens Wiklander
1161b83c0d5fSJens Wiklander /* Assert that the pmem isn't shared. */
116253716c0cSJens Wiklander assert(same_context(pmem));
116397f9e0ddSJens Wiklander
1164a5fef52bSJens Wiklander dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1165c4a57390SJens Wiklander icache_inv_user_range(va, SMALL_PAGE_SIZE);
116697f9e0ddSJens Wiklander }
1167a884c935SJens Wiklander }
1168a884c935SJens Wiklander
1169d5ad7ccfSJens Wiklander reg->flags = f;
1170d5ad7ccfSJens Wiklander next_region:
1171d5ad7ccfSJens Wiklander reg = TAILQ_NEXT(reg, link);
1172a884c935SJens Wiklander }
1173a884c935SJens Wiklander
1174a884c935SJens Wiklander ret = true;
1175a884c935SJens Wiklander out:
1176a257edb4SJens Wiklander pager_unlock(exceptions);
1177a884c935SJens Wiklander return ret;
1178a884c935SJens Wiklander }
11791936dfc7SJens Wiklander
1180d5ad7ccfSJens Wiklander DECLARE_KEEP_PAGER(tee_pager_set_um_region_attr);
1181a884c935SJens Wiklander #endif /*CFG_PAGED_USER_TA*/
1182a884c935SJens Wiklander
tee_pager_invalidate_fobj(struct fobj * fobj)1183b83c0d5fSJens Wiklander void tee_pager_invalidate_fobj(struct fobj *fobj)
1184b83c0d5fSJens Wiklander {
1185b83c0d5fSJens Wiklander struct tee_pager_pmem *pmem;
1186b83c0d5fSJens Wiklander uint32_t exceptions;
1187b83c0d5fSJens Wiklander
1188b83c0d5fSJens Wiklander exceptions = pager_lock_check_stack(64);
1189b83c0d5fSJens Wiklander
1190afe47fe8SJens Wiklander TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1191afe47fe8SJens Wiklander if (pmem->fobj == fobj)
1192afe47fe8SJens Wiklander pmem_clear(pmem);
1193b83c0d5fSJens Wiklander
1194b83c0d5fSJens Wiklander pager_unlock(exceptions);
1195b83c0d5fSJens Wiklander }
11963639b55fSJerome Forissier DECLARE_KEEP_PAGER(tee_pager_invalidate_fobj);
1197b83c0d5fSJens Wiklander
pmem_find(struct vm_paged_region * reg,vaddr_t va)1198d5ad7ccfSJens Wiklander static struct tee_pager_pmem *pmem_find(struct vm_paged_region *reg, vaddr_t va)
119953a68c38SJens Wiklander {
120053a68c38SJens Wiklander struct tee_pager_pmem *pmem = NULL;
12015ca851ecSJens Wiklander size_t fobj_pgidx = 0;
12025ca851ecSJens Wiklander
1203d5ad7ccfSJens Wiklander assert(va >= reg->base && va < (reg->base + reg->size));
1204d5ad7ccfSJens Wiklander fobj_pgidx = (va - reg->base) / SMALL_PAGE_SIZE + reg->fobj_pgoffs;
120553a68c38SJens Wiklander
120653a68c38SJens Wiklander TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1207d5ad7ccfSJens Wiklander if (pmem->fobj == reg->fobj && pmem->fobj_pgidx == fobj_pgidx)
120853a68c38SJens Wiklander return pmem;
120953a68c38SJens Wiklander
121053a68c38SJens Wiklander return NULL;
121153a68c38SJens Wiklander }
121253a68c38SJens Wiklander
tee_pager_unhide_page(struct vm_paged_region * reg,vaddr_t page_va)1213d5ad7ccfSJens Wiklander static bool tee_pager_unhide_page(struct vm_paged_region *reg, vaddr_t page_va)
1214abe38974SJens Wiklander {
1215d5ad7ccfSJens Wiklander struct tblidx tblidx = region_va2tblidx(reg, page_va);
1216d5ad7ccfSJens Wiklander struct tee_pager_pmem *pmem = pmem_find(reg, page_va);
1217d5ad7ccfSJens Wiklander uint32_t a = get_region_mattr(reg->flags);
1218b83c0d5fSJens Wiklander uint32_t attr = 0;
1219c4a57390SJens Wiklander paddr_t pa = 0;
1220aa06d687SJens Wiklander
1221b83c0d5fSJens Wiklander if (!pmem)
1222b83c0d5fSJens Wiklander return false;
1223b83c0d5fSJens Wiklander
12245ca851ecSJens Wiklander tblidx_get_entry(tblidx, NULL, &attr);
1225b83c0d5fSJens Wiklander if (attr & TEE_MATTR_VALID_BLOCK)
1226aa06d687SJens Wiklander return false;
1227092a2b76SJens Wiklander
1228c4a57390SJens Wiklander /*
1229c4a57390SJens Wiklander * The page is hidden, or not not mapped yet. Unhide the page and
1230c4a57390SJens Wiklander * move it to the tail.
1231c4a57390SJens Wiklander *
1232c4a57390SJens Wiklander * Since the page isn't mapped there doesn't exist a valid TLB entry
1233c4a57390SJens Wiklander * for this address, so no TLB invalidation is required after setting
1234c4a57390SJens Wiklander * the new entry. A DSB is needed though, to make the write visible.
1235c4a57390SJens Wiklander *
1236c4a57390SJens Wiklander * For user executable pages it's more complicated. Those pages can
1237c4a57390SJens Wiklander * be shared between multiple TA mappings and thus populated by
1238c4a57390SJens Wiklander * another TA. The reference manual states that:
1239c4a57390SJens Wiklander *
1240c4a57390SJens Wiklander * "instruction cache maintenance is required only after writing
1241c4a57390SJens Wiklander * new data to a physical address that holds an instruction."
1242c4a57390SJens Wiklander *
1243c4a57390SJens Wiklander * So for hidden pages we would not need to invalidate i-cache, but
1244c4a57390SJens Wiklander * for newly populated pages we do. Since we don't know which we
1245c4a57390SJens Wiklander * have to assume the worst and always invalidate the i-cache. We
1246c4a57390SJens Wiklander * don't need to clean the d-cache though, since that has already
1247c4a57390SJens Wiklander * been done earlier.
1248c4a57390SJens Wiklander *
1249c4a57390SJens Wiklander * Additional bookkeeping to tell if the i-cache invalidation is
1250c4a57390SJens Wiklander * needed or not is left as a future optimization.
1251c4a57390SJens Wiklander */
12528c9d9445SEtienne Carriere
1253aa06d687SJens Wiklander /* If it's not a dirty block, then it should be read only. */
125453a68c38SJens Wiklander if (!pmem_is_dirty(pmem))
1255092a2b76SJens Wiklander a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1256a884c935SJens Wiklander
1257c4a57390SJens Wiklander pa = get_pmem_pa(pmem);
125853a68c38SJens Wiklander pmem->flags &= ~PMEM_FLAG_HIDDEN;
1259d5ad7ccfSJens Wiklander if (reg->flags & TEE_MATTR_UX) {
12605ca851ecSJens Wiklander void *va = (void *)tblidx2va(tblidx);
1261c4a57390SJens Wiklander
1262c4a57390SJens Wiklander /* Set a temporary read-only mapping */
1263c4a57390SJens Wiklander assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
12645ca851ecSJens Wiklander tblidx_set_entry(tblidx, pa, a & ~TEE_MATTR_UX);
126584c40296SJens Wiklander dsb_ishst();
1266abe38974SJens Wiklander
1267c4a57390SJens Wiklander icache_inv_user_range(va, SMALL_PAGE_SIZE);
1268c4a57390SJens Wiklander
1269c4a57390SJens Wiklander /* Set the final mapping */
12705ca851ecSJens Wiklander tblidx_set_entry(tblidx, pa, a);
12715ca851ecSJens Wiklander tblidx_tlbi_entry(tblidx);
1272c4a57390SJens Wiklander } else {
12735ca851ecSJens Wiklander tblidx_set_entry(tblidx, pa, a);
1274c4a57390SJens Wiklander dsb_ishst();
1275c4a57390SJens Wiklander }
12765ca851ecSJens Wiklander pgt_inc_used_entries(tblidx.pgt);
1277c4a57390SJens Wiklander
1278abe38974SJens Wiklander TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1279abe38974SJens Wiklander TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
128093074435SPascal Brand incr_hidden_hits();
1281abe38974SJens Wiklander return true;
1282abe38974SJens Wiklander }
1283abe38974SJens Wiklander
tee_pager_hide_pages(void)1284abe38974SJens Wiklander static void tee_pager_hide_pages(void)
1285abe38974SJens Wiklander {
1286b83c0d5fSJens Wiklander struct tee_pager_pmem *pmem = NULL;
1287abe38974SJens Wiklander size_t n = 0;
1288abe38974SJens Wiklander
1289abe38974SJens Wiklander TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1290abe38974SJens Wiklander if (n >= TEE_PAGER_NHIDE)
1291abe38974SJens Wiklander break;
1292abe38974SJens Wiklander n++;
129339d1f75cSPascal Brand
1294b83c0d5fSJens Wiklander /* we cannot hide pages when pmem->fobj is not defined. */
1295b83c0d5fSJens Wiklander if (!pmem->fobj)
129639d1f75cSPascal Brand continue;
129739d1f75cSPascal Brand
129853a68c38SJens Wiklander if (pmem_is_hidden(pmem))
1299abe38974SJens Wiklander continue;
1300abe38974SJens Wiklander
130153a68c38SJens Wiklander pmem->flags |= PMEM_FLAG_HIDDEN;
1302b83c0d5fSJens Wiklander pmem_unmap(pmem, NULL);
130340c2618fSEtienne Carriere }
1304abe38974SJens Wiklander }
130539d1f75cSPascal Brand
13069438dbdbSJens Wiklander static unsigned int __maybe_unused
num_regions_with_pmem(struct tee_pager_pmem * pmem)1307d5ad7ccfSJens Wiklander num_regions_with_pmem(struct tee_pager_pmem *pmem)
13089438dbdbSJens Wiklander {
1309d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
13109438dbdbSJens Wiklander unsigned int num_matches = 0;
13119438dbdbSJens Wiklander
1312d5ad7ccfSJens Wiklander TAILQ_FOREACH(reg, &pmem->fobj->regions, fobj_link)
1313d5ad7ccfSJens Wiklander if (pmem_is_covered_by_region(pmem, reg))
13149438dbdbSJens Wiklander num_matches++;
13159438dbdbSJens Wiklander
13169438dbdbSJens Wiklander return num_matches;
13179438dbdbSJens Wiklander }
13189438dbdbSJens Wiklander
131939d1f75cSPascal Brand /*
132039d1f75cSPascal Brand * Find mapped pmem, hide and move to pageble pmem.
132139d1f75cSPascal Brand * Return false if page was not mapped, and true if page was mapped.
132239d1f75cSPascal Brand */
tee_pager_release_one_phys(struct vm_paged_region * reg,vaddr_t page_va)1323d5ad7ccfSJens Wiklander static bool tee_pager_release_one_phys(struct vm_paged_region *reg,
1324a884c935SJens Wiklander vaddr_t page_va)
132539d1f75cSPascal Brand {
13265ca851ecSJens Wiklander struct tee_pager_pmem *pmem = NULL;
13275ca851ecSJens Wiklander struct tblidx tblidx = { };
13285ca851ecSJens Wiklander size_t fobj_pgidx = 0;
13295ca851ecSJens Wiklander
1330d5ad7ccfSJens Wiklander assert(page_va >= reg->base && page_va < (reg->base + reg->size));
1331d5ad7ccfSJens Wiklander fobj_pgidx = (page_va - reg->base) / SMALL_PAGE_SIZE +
1332d5ad7ccfSJens Wiklander reg->fobj_pgoffs;
133339d1f75cSPascal Brand
1334092a2b76SJens Wiklander TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1335d5ad7ccfSJens Wiklander if (pmem->fobj != reg->fobj || pmem->fobj_pgidx != fobj_pgidx)
133639d1f75cSPascal Brand continue;
133739d1f75cSPascal Brand
1338b83c0d5fSJens Wiklander /*
13399438dbdbSJens Wiklander * Locked pages may not be shared. We're asserting that the
1340d5ad7ccfSJens Wiklander * number of regions using this pmem is one and only one as
13419438dbdbSJens Wiklander * we're about to unmap it.
1342b83c0d5fSJens Wiklander */
1343d5ad7ccfSJens Wiklander assert(num_regions_with_pmem(pmem) == 1);
1344b83c0d5fSJens Wiklander
1345d5ad7ccfSJens Wiklander tblidx = pmem_get_region_tblidx(pmem, reg);
13465ca851ecSJens Wiklander tblidx_set_entry(tblidx, 0, 0);
13475ca851ecSJens Wiklander pgt_dec_used_entries(tblidx.pgt);
1348092a2b76SJens Wiklander TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1349afe47fe8SJens Wiklander pmem_clear(pmem);
135039d1f75cSPascal Brand tee_pager_npages++;
135193074435SPascal Brand set_npages();
135239d1f75cSPascal Brand TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
135393074435SPascal Brand incr_zi_released();
135439d1f75cSPascal Brand return true;
135539d1f75cSPascal Brand }
135639d1f75cSPascal Brand
135739d1f75cSPascal Brand return false;
135839d1f75cSPascal Brand }
1359abe38974SJens Wiklander
pager_deploy_page(struct tee_pager_pmem * pmem,struct vm_paged_region * reg,vaddr_t page_va,bool clean_user_cache,bool writable)136013616e88SJens Wiklander static void pager_deploy_page(struct tee_pager_pmem *pmem,
1361d5ad7ccfSJens Wiklander struct vm_paged_region *reg, vaddr_t page_va,
136213616e88SJens Wiklander bool clean_user_cache, bool writable)
136313616e88SJens Wiklander {
1364d5ad7ccfSJens Wiklander struct tblidx tblidx = region_va2tblidx(reg, page_va);
1365d5ad7ccfSJens Wiklander uint32_t attr = get_region_mattr(reg->flags);
136613616e88SJens Wiklander struct core_mmu_table_info *ti = NULL;
136713616e88SJens Wiklander uint8_t *va_alias = pmem->va_alias;
136813616e88SJens Wiklander paddr_t pa = get_pmem_pa(pmem);
136913616e88SJens Wiklander unsigned int idx_alias = 0;
137013616e88SJens Wiklander uint32_t attr_alias = 0;
137113616e88SJens Wiklander paddr_t pa_alias = 0;
137213616e88SJens Wiklander
137313616e88SJens Wiklander /* Ensure we are allowed to write to aliased virtual page */
137413616e88SJens Wiklander ti = find_table_info((vaddr_t)va_alias);
137513616e88SJens Wiklander idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
137613616e88SJens Wiklander core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
137713616e88SJens Wiklander if (!(attr_alias & TEE_MATTR_PW)) {
137813616e88SJens Wiklander attr_alias |= TEE_MATTR_PW;
137913616e88SJens Wiklander core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1380fe16b87bSAlvin Chang tlbi_va_allasid((vaddr_t)va_alias);
138113616e88SJens Wiklander }
138213616e88SJens Wiklander
138313616e88SJens Wiklander asan_tag_access(va_alias, va_alias + SMALL_PAGE_SIZE);
138413616e88SJens Wiklander if (fobj_load_page(pmem->fobj, pmem->fobj_pgidx, va_alias)) {
138513616e88SJens Wiklander EMSG("PH 0x%" PRIxVA " failed", page_va);
138613616e88SJens Wiklander panic();
138713616e88SJens Wiklander }
1388d5ad7ccfSJens Wiklander switch (reg->type) {
1389d5ad7ccfSJens Wiklander case PAGED_REGION_TYPE_RO:
139013616e88SJens Wiklander TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
139113616e88SJens Wiklander incr_ro_hits();
139213616e88SJens Wiklander /* Forbid write to aliases for read-only (maybe exec) pages */
139313616e88SJens Wiklander attr_alias &= ~TEE_MATTR_PW;
139413616e88SJens Wiklander core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1395fe16b87bSAlvin Chang tlbi_va_allasid((vaddr_t)va_alias);
139613616e88SJens Wiklander break;
1397d5ad7ccfSJens Wiklander case PAGED_REGION_TYPE_RW:
139813616e88SJens Wiklander TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
139913616e88SJens Wiklander if (writable && (attr & (TEE_MATTR_PW | TEE_MATTR_UW)))
140013616e88SJens Wiklander pmem->flags |= PMEM_FLAG_DIRTY;
140113616e88SJens Wiklander incr_rw_hits();
140213616e88SJens Wiklander break;
1403d5ad7ccfSJens Wiklander case PAGED_REGION_TYPE_LOCK:
140413616e88SJens Wiklander /* Move page to lock list */
140513616e88SJens Wiklander if (tee_pager_npages <= 0)
140613616e88SJens Wiklander panic("Running out of pages");
140713616e88SJens Wiklander tee_pager_npages--;
140813616e88SJens Wiklander set_npages();
140913616e88SJens Wiklander TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
141013616e88SJens Wiklander break;
141113616e88SJens Wiklander default:
141213616e88SJens Wiklander panic();
141313616e88SJens Wiklander }
141413616e88SJens Wiklander asan_tag_no_access(va_alias, va_alias + SMALL_PAGE_SIZE);
141513616e88SJens Wiklander
141613616e88SJens Wiklander if (!writable)
141713616e88SJens Wiklander attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
141813616e88SJens Wiklander
141913616e88SJens Wiklander /*
142013616e88SJens Wiklander * We've updated the page using the aliased mapping and
142113616e88SJens Wiklander * some cache maintenance is now needed if it's an
142213616e88SJens Wiklander * executable page.
142313616e88SJens Wiklander *
142413616e88SJens Wiklander * Since the d-cache is a Physically-indexed,
142513616e88SJens Wiklander * physically-tagged (PIPT) cache we can clean either the
142613616e88SJens Wiklander * aliased address or the real virtual address. In this
142713616e88SJens Wiklander * case we choose the real virtual address.
142813616e88SJens Wiklander *
142913616e88SJens Wiklander * The i-cache can also be PIPT, but may be something else
143013616e88SJens Wiklander * too like VIPT. The current code requires the caches to
143113616e88SJens Wiklander * implement the IVIPT extension, that is:
143213616e88SJens Wiklander * "instruction cache maintenance is required only after
143313616e88SJens Wiklander * writing new data to a physical address that holds an
143413616e88SJens Wiklander * instruction."
143513616e88SJens Wiklander *
143613616e88SJens Wiklander * To portably invalidate the icache the page has to
143713616e88SJens Wiklander * be mapped at the final virtual address but not
143813616e88SJens Wiklander * executable.
143913616e88SJens Wiklander */
1440d5ad7ccfSJens Wiklander if (reg->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
144113616e88SJens Wiklander uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
144213616e88SJens Wiklander TEE_MATTR_PW | TEE_MATTR_UW;
144313616e88SJens Wiklander void *va = (void *)page_va;
144413616e88SJens Wiklander
144513616e88SJens Wiklander /* Set a temporary read-only mapping */
14465ca851ecSJens Wiklander tblidx_set_entry(tblidx, pa, attr & ~mask);
14475ca851ecSJens Wiklander tblidx_tlbi_entry(tblidx);
144813616e88SJens Wiklander
144913616e88SJens Wiklander dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
145013616e88SJens Wiklander if (clean_user_cache)
145113616e88SJens Wiklander icache_inv_user_range(va, SMALL_PAGE_SIZE);
145213616e88SJens Wiklander else
145313616e88SJens Wiklander icache_inv_range(va, SMALL_PAGE_SIZE);
145413616e88SJens Wiklander
145513616e88SJens Wiklander /* Set the final mapping */
14565ca851ecSJens Wiklander tblidx_set_entry(tblidx, pa, attr);
14575ca851ecSJens Wiklander tblidx_tlbi_entry(tblidx);
145813616e88SJens Wiklander } else {
14595ca851ecSJens Wiklander tblidx_set_entry(tblidx, pa, attr);
146013616e88SJens Wiklander /*
146113616e88SJens Wiklander * No need to flush TLB for this entry, it was
146213616e88SJens Wiklander * invalid. We should use a barrier though, to make
146313616e88SJens Wiklander * sure that the change is visible.
146413616e88SJens Wiklander */
146513616e88SJens Wiklander dsb_ishst();
146613616e88SJens Wiklander }
14675ca851ecSJens Wiklander pgt_inc_used_entries(tblidx.pgt);
146813616e88SJens Wiklander
146913616e88SJens Wiklander FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
147013616e88SJens Wiklander }
147113616e88SJens Wiklander
make_dirty_page(struct tee_pager_pmem * pmem,struct vm_paged_region * reg,struct tblidx tblidx,paddr_t pa)1472afe47fe8SJens Wiklander static void make_dirty_page(struct tee_pager_pmem *pmem,
1473d5ad7ccfSJens Wiklander struct vm_paged_region *reg, struct tblidx tblidx,
14745ca851ecSJens Wiklander paddr_t pa)
1475afe47fe8SJens Wiklander {
1476d5ad7ccfSJens Wiklander assert(reg->flags & (TEE_MATTR_UW | TEE_MATTR_PW));
1477afe47fe8SJens Wiklander assert(!(pmem->flags & PMEM_FLAG_DIRTY));
1478afe47fe8SJens Wiklander
14795ca851ecSJens Wiklander FMSG("Dirty %#"PRIxVA, tblidx2va(tblidx));
1480afe47fe8SJens Wiklander pmem->flags |= PMEM_FLAG_DIRTY;
1481d5ad7ccfSJens Wiklander tblidx_set_entry(tblidx, pa, get_region_mattr(reg->flags));
14825ca851ecSJens Wiklander tblidx_tlbi_entry(tblidx);
1483afe47fe8SJens Wiklander }
1484afe47fe8SJens Wiklander
148513616e88SJens Wiklander /*
148613616e88SJens Wiklander * This function takes a reference to a page (@fobj + fobj_pgidx) and makes
148713616e88SJens Wiklander * the corresponding IV available.
148813616e88SJens Wiklander *
148913616e88SJens Wiklander * In case the page needs to be saved the IV must be writable, consequently
149013616e88SJens Wiklander * is the page holding the IV made dirty. If the page instead only is to
149113616e88SJens Wiklander * be verified it's enough that the page holding the IV is readonly and
149213616e88SJens Wiklander * thus doesn't have to be made dirty too.
149313616e88SJens Wiklander *
149413616e88SJens Wiklander * This function depends on pager_spare_pmem pointing to a free pmem when
149513616e88SJens Wiklander * entered. In case the page holding the needed IV isn't mapped this spare
149613616e88SJens Wiklander * pmem is used to map the page. If this function has used pager_spare_pmem
149713616e88SJens Wiklander * and assigned it to NULL it must be reassigned with a new free pmem
149813616e88SJens Wiklander * before this function can be called again.
149913616e88SJens Wiklander */
make_iv_available(struct fobj * fobj,unsigned int fobj_pgidx,bool writable)150013616e88SJens Wiklander static void make_iv_available(struct fobj *fobj, unsigned int fobj_pgidx,
150113616e88SJens Wiklander bool writable)
1502abe38974SJens Wiklander {
1503d5ad7ccfSJens Wiklander struct vm_paged_region *reg = pager_iv_region;
150413616e88SJens Wiklander struct tee_pager_pmem *pmem = NULL;
15055ca851ecSJens Wiklander struct tblidx tblidx = { };
150613616e88SJens Wiklander vaddr_t page_va = 0;
150713616e88SJens Wiklander uint32_t attr = 0;
150813616e88SJens Wiklander paddr_t pa = 0;
1509abe38974SJens Wiklander
151013616e88SJens Wiklander page_va = fobj_get_iv_vaddr(fobj, fobj_pgidx) & ~SMALL_PAGE_MASK;
1511b757e307SJens Wiklander if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || !page_va) {
1512b757e307SJens Wiklander assert(!page_va);
151313616e88SJens Wiklander return;
1514b757e307SJens Wiklander }
151513616e88SJens Wiklander
1516d5ad7ccfSJens Wiklander assert(reg && reg->type == PAGED_REGION_TYPE_RW);
151713616e88SJens Wiklander assert(pager_spare_pmem);
1518d5ad7ccfSJens Wiklander assert(core_is_buffer_inside(page_va, 1, reg->base, reg->size));
151913616e88SJens Wiklander
1520d5ad7ccfSJens Wiklander tblidx = region_va2tblidx(reg, page_va);
152113616e88SJens Wiklander /*
152213616e88SJens Wiklander * We don't care if tee_pager_unhide_page() succeeds or not, we're
152313616e88SJens Wiklander * still checking the attributes afterwards.
152413616e88SJens Wiklander */
1525d5ad7ccfSJens Wiklander tee_pager_unhide_page(reg, page_va);
15265ca851ecSJens Wiklander tblidx_get_entry(tblidx, &pa, &attr);
152713616e88SJens Wiklander if (!(attr & TEE_MATTR_VALID_BLOCK)) {
152813616e88SJens Wiklander /*
152913616e88SJens Wiklander * We're using the spare pmem to map the IV corresponding
153013616e88SJens Wiklander * to another page.
153113616e88SJens Wiklander */
153213616e88SJens Wiklander pmem = pager_spare_pmem;
153313616e88SJens Wiklander pager_spare_pmem = NULL;
1534d5ad7ccfSJens Wiklander pmem_assign_fobj_page(pmem, reg, page_va);
153513616e88SJens Wiklander
153613616e88SJens Wiklander if (writable)
153713616e88SJens Wiklander pmem->flags |= PMEM_FLAG_DIRTY;
153813616e88SJens Wiklander
1539d5ad7ccfSJens Wiklander pager_deploy_page(pmem, reg, page_va,
154013616e88SJens Wiklander false /*!clean_user_cache*/, writable);
154113616e88SJens Wiklander } else if (writable && !(attr & TEE_MATTR_PW)) {
1542d5ad7ccfSJens Wiklander pmem = pmem_find(reg, page_va);
154313616e88SJens Wiklander /* Note that pa is valid since TEE_MATTR_VALID_BLOCK is set */
1544d5ad7ccfSJens Wiklander make_dirty_page(pmem, reg, tblidx, pa);
154513616e88SJens Wiklander }
154613616e88SJens Wiklander }
154713616e88SJens Wiklander
pager_get_page(struct vm_paged_region * reg,struct abort_info * ai,bool clean_user_cache)1548d5ad7ccfSJens Wiklander static void pager_get_page(struct vm_paged_region *reg, struct abort_info *ai,
154913616e88SJens Wiklander bool clean_user_cache)
155013616e88SJens Wiklander {
155113616e88SJens Wiklander vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1552d5ad7ccfSJens Wiklander struct tblidx tblidx = region_va2tblidx(reg, page_va);
155313616e88SJens Wiklander struct tee_pager_pmem *pmem = NULL;
155413616e88SJens Wiklander bool writable = false;
155513616e88SJens Wiklander uint32_t attr = 0;
155613616e88SJens Wiklander
155713616e88SJens Wiklander /*
155813616e88SJens Wiklander * Get a pmem to load code and data into, also make sure
155913616e88SJens Wiklander * the corresponding IV page is available.
156013616e88SJens Wiklander */
156113616e88SJens Wiklander while (true) {
1562abe38974SJens Wiklander pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1563abe38974SJens Wiklander if (!pmem) {
1564092a2b76SJens Wiklander EMSG("No pmem entries");
156513616e88SJens Wiklander abort_print(ai);
156613616e88SJens Wiklander panic();
1567abe38974SJens Wiklander }
1568b83c0d5fSJens Wiklander
1569b83c0d5fSJens Wiklander if (pmem->fobj) {
1570b83c0d5fSJens Wiklander pmem_unmap(pmem, NULL);
157113616e88SJens Wiklander if (pmem_is_dirty(pmem)) {
157213616e88SJens Wiklander uint8_t *va = pmem->va_alias;
157313616e88SJens Wiklander
157413616e88SJens Wiklander make_iv_available(pmem->fobj, pmem->fobj_pgidx,
157513616e88SJens Wiklander true /*writable*/);
157613616e88SJens Wiklander asan_tag_access(va, va + SMALL_PAGE_SIZE);
157713616e88SJens Wiklander if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
157813616e88SJens Wiklander pmem->va_alias))
157913616e88SJens Wiklander panic("fobj_save_page");
158013616e88SJens Wiklander asan_tag_no_access(va, va + SMALL_PAGE_SIZE);
158113616e88SJens Wiklander
158213616e88SJens Wiklander pmem_clear(pmem);
158313616e88SJens Wiklander
158413616e88SJens Wiklander /*
158513616e88SJens Wiklander * If the spare pmem was used by
158613616e88SJens Wiklander * make_iv_available() we need to replace
158713616e88SJens Wiklander * it with the just freed pmem.
158813616e88SJens Wiklander *
158913616e88SJens Wiklander * See make_iv_available() for details.
159013616e88SJens Wiklander */
1591b757e307SJens Wiklander if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) &&
1592b757e307SJens Wiklander !pager_spare_pmem) {
159313616e88SJens Wiklander TAILQ_REMOVE(&tee_pager_pmem_head,
159413616e88SJens Wiklander pmem, link);
159513616e88SJens Wiklander pager_spare_pmem = pmem;
159613616e88SJens Wiklander pmem = NULL;
159713616e88SJens Wiklander }
159813616e88SJens Wiklander
159913616e88SJens Wiklander /*
160013616e88SJens Wiklander * Check if the needed virtual page was
160113616e88SJens Wiklander * made available as a side effect of the
160213616e88SJens Wiklander * call to make_iv_available() above. If so
160313616e88SJens Wiklander * we're done.
160413616e88SJens Wiklander */
16055ca851ecSJens Wiklander tblidx_get_entry(tblidx, NULL, &attr);
160613616e88SJens Wiklander if (attr & TEE_MATTR_VALID_BLOCK)
160713616e88SJens Wiklander return;
160813616e88SJens Wiklander
160913616e88SJens Wiklander /*
161013616e88SJens Wiklander * The freed pmem was used to replace the
161113616e88SJens Wiklander * consumed pager_spare_pmem above. Restart
161213616e88SJens Wiklander * to find another pmem.
161313616e88SJens Wiklander */
161413616e88SJens Wiklander if (!pmem)
161513616e88SJens Wiklander continue;
161613616e88SJens Wiklander }
1617abe38974SJens Wiklander }
1618abe38974SJens Wiklander
1619abe38974SJens Wiklander TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1620afe47fe8SJens Wiklander pmem_clear(pmem);
162113616e88SJens Wiklander
1622d5ad7ccfSJens Wiklander pmem_assign_fobj_page(pmem, reg, page_va);
162313616e88SJens Wiklander make_iv_available(pmem->fobj, pmem->fobj_pgidx,
162413616e88SJens Wiklander false /*!writable*/);
1625b757e307SJens Wiklander if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || pager_spare_pmem)
162613616e88SJens Wiklander break;
162713616e88SJens Wiklander
162813616e88SJens Wiklander /*
162913616e88SJens Wiklander * The spare pmem was used by make_iv_available(). We need
163013616e88SJens Wiklander * to replace it with the just freed pmem. And get another
163113616e88SJens Wiklander * pmem.
163213616e88SJens Wiklander *
163313616e88SJens Wiklander * See make_iv_available() for details.
163413616e88SJens Wiklander */
163513616e88SJens Wiklander pmem_clear(pmem);
163613616e88SJens Wiklander pager_spare_pmem = pmem;
1637abe38974SJens Wiklander }
1638abe38974SJens Wiklander
163913616e88SJens Wiklander /*
1640d5ad7ccfSJens Wiklander * PAGED_REGION_TYPE_LOCK are always writable while PAGED_REGION_TYPE_RO
164113616e88SJens Wiklander * are never writable.
164213616e88SJens Wiklander *
1643d5ad7ccfSJens Wiklander * Pages from PAGED_REGION_TYPE_RW starts read-only to be
164413616e88SJens Wiklander * able to tell when they are updated and should be tagged
164513616e88SJens Wiklander * as dirty.
164613616e88SJens Wiklander */
1647d5ad7ccfSJens Wiklander if (reg->type == PAGED_REGION_TYPE_LOCK ||
1648d5ad7ccfSJens Wiklander (reg->type == PAGED_REGION_TYPE_RW && abort_is_write_fault(ai)))
164913616e88SJens Wiklander writable = true;
165013616e88SJens Wiklander else
165113616e88SJens Wiklander writable = false;
165213616e88SJens Wiklander
1653d5ad7ccfSJens Wiklander pager_deploy_page(pmem, reg, page_va, clean_user_cache, writable);
165421106ea2SJens Wiklander }
1655abe38974SJens Wiklander
pager_update_permissions(struct vm_paged_region * reg,struct abort_info * ai,bool * handled)1656d5ad7ccfSJens Wiklander static bool pager_update_permissions(struct vm_paged_region *reg,
1657a884c935SJens Wiklander struct abort_info *ai, bool *handled)
165821106ea2SJens Wiklander {
1659d5ad7ccfSJens Wiklander struct tblidx tblidx = region_va2tblidx(reg, ai->va);
166053a68c38SJens Wiklander struct tee_pager_pmem *pmem = NULL;
166153a68c38SJens Wiklander uint32_t attr = 0;
166253a68c38SJens Wiklander paddr_t pa = 0;
166321106ea2SJens Wiklander
1664a884c935SJens Wiklander *handled = false;
1665a884c935SJens Wiklander
16665ca851ecSJens Wiklander tblidx_get_entry(tblidx, &pa, &attr);
166721106ea2SJens Wiklander
166821106ea2SJens Wiklander /* Not mapped */
166921106ea2SJens Wiklander if (!(attr & TEE_MATTR_VALID_BLOCK))
167021106ea2SJens Wiklander return false;
167121106ea2SJens Wiklander
167221106ea2SJens Wiklander /* Not readable, should not happen */
1673a884c935SJens Wiklander if (abort_is_user_exception(ai)) {
1674a884c935SJens Wiklander if (!(attr & TEE_MATTR_UR))
1675a884c935SJens Wiklander return true;
1676a884c935SJens Wiklander } else {
167721106ea2SJens Wiklander if (!(attr & TEE_MATTR_PR)) {
167829e63291SJens Wiklander abort_print_error(ai);
167921106ea2SJens Wiklander panic();
168021106ea2SJens Wiklander }
1681a884c935SJens Wiklander }
168221106ea2SJens Wiklander
168321106ea2SJens Wiklander switch (core_mmu_get_fault_type(ai->fault_descr)) {
168421106ea2SJens Wiklander case CORE_MMU_FAULT_TRANSLATION:
168521106ea2SJens Wiklander case CORE_MMU_FAULT_READ_PERMISSION:
1686a884c935SJens Wiklander if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1687a884c935SJens Wiklander /* Check attempting to execute from an NOX page */
1688a884c935SJens Wiklander if (abort_is_user_exception(ai)) {
1689a884c935SJens Wiklander if (!(attr & TEE_MATTR_UX))
1690a884c935SJens Wiklander return true;
1691a884c935SJens Wiklander } else {
1692a884c935SJens Wiklander if (!(attr & TEE_MATTR_PX)) {
169329e63291SJens Wiklander abort_print_error(ai);
169421106ea2SJens Wiklander panic();
169521106ea2SJens Wiklander }
1696a884c935SJens Wiklander }
1697a884c935SJens Wiklander }
169821106ea2SJens Wiklander /* Since the page is mapped now it's OK */
1699a884c935SJens Wiklander break;
170021106ea2SJens Wiklander case CORE_MMU_FAULT_WRITE_PERMISSION:
1701a884c935SJens Wiklander /* Check attempting to write to an RO page */
1702d5ad7ccfSJens Wiklander pmem = pmem_find(reg, ai->va);
170353a68c38SJens Wiklander if (!pmem)
170453a68c38SJens Wiklander panic();
1705a884c935SJens Wiklander if (abort_is_user_exception(ai)) {
1706d5ad7ccfSJens Wiklander if (!(reg->flags & TEE_MATTR_UW))
1707a884c935SJens Wiklander return true;
1708afe47fe8SJens Wiklander if (!(attr & TEE_MATTR_UW))
1709d5ad7ccfSJens Wiklander make_dirty_page(pmem, reg, tblidx, pa);
1710a884c935SJens Wiklander } else {
1711d5ad7ccfSJens Wiklander if (!(reg->flags & TEE_MATTR_PW)) {
171229e63291SJens Wiklander abort_print_error(ai);
171321106ea2SJens Wiklander panic();
171421106ea2SJens Wiklander }
1715afe47fe8SJens Wiklander if (!(attr & TEE_MATTR_PW))
1716d5ad7ccfSJens Wiklander make_dirty_page(pmem, reg, tblidx, pa);
1717a884c935SJens Wiklander }
1718092a2b76SJens Wiklander /* Since permissions has been updated now it's OK */
1719a884c935SJens Wiklander break;
172021106ea2SJens Wiklander default:
172121106ea2SJens Wiklander /* Some fault we can't deal with */
1722a884c935SJens Wiklander if (abort_is_user_exception(ai))
1723a884c935SJens Wiklander return true;
172429e63291SJens Wiklander abort_print_error(ai);
172521106ea2SJens Wiklander panic();
172621106ea2SJens Wiklander }
1727a884c935SJens Wiklander *handled = true;
1728a884c935SJens Wiklander return true;
1729abe38974SJens Wiklander }
1730abe38974SJens Wiklander
173104c205f6SJens Wiklander #ifdef CFG_TEE_CORE_DEBUG
stat_handle_fault(void)173204c205f6SJens Wiklander static void stat_handle_fault(void)
173304c205f6SJens Wiklander {
173404c205f6SJens Wiklander static size_t num_faults;
173504c205f6SJens Wiklander static size_t min_npages = SIZE_MAX;
173604c205f6SJens Wiklander static size_t total_min_npages = SIZE_MAX;
173704c205f6SJens Wiklander
173804c205f6SJens Wiklander num_faults++;
173904c205f6SJens Wiklander if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
174004c205f6SJens Wiklander DMSG("nfaults %zu npages %zu (min %zu)",
174104c205f6SJens Wiklander num_faults, tee_pager_npages, min_npages);
174204c205f6SJens Wiklander min_npages = tee_pager_npages; /* reset */
174304c205f6SJens Wiklander }
174404c205f6SJens Wiklander if (tee_pager_npages < min_npages)
174504c205f6SJens Wiklander min_npages = tee_pager_npages;
174604c205f6SJens Wiklander if (tee_pager_npages < total_min_npages)
174704c205f6SJens Wiklander total_min_npages = tee_pager_npages;
174804c205f6SJens Wiklander }
174904c205f6SJens Wiklander #else
stat_handle_fault(void)175004c205f6SJens Wiklander static void stat_handle_fault(void)
175104c205f6SJens Wiklander {
175204c205f6SJens Wiklander }
175304c205f6SJens Wiklander #endif
175404c205f6SJens Wiklander
tee_pager_handle_fault(struct abort_info * ai)175579c1dec7SJens Wiklander bool tee_pager_handle_fault(struct abort_info *ai)
1756abe38974SJens Wiklander {
1757d5ad7ccfSJens Wiklander struct vm_paged_region *reg;
1758abe38974SJens Wiklander vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
175921106ea2SJens Wiklander uint32_t exceptions;
176079c1dec7SJens Wiklander bool ret;
1761c4a57390SJens Wiklander bool clean_user_cache = false;
1762abe38974SJens Wiklander
1763abe38974SJens Wiklander #ifdef TEE_PAGER_DEBUG_PRINT
1764c0bc8d0eSJens Wiklander if (!abort_is_user_exception(ai))
176529e63291SJens Wiklander abort_print(ai);
1766abe38974SJens Wiklander #endif
1767abe38974SJens Wiklander
176821106ea2SJens Wiklander /*
176921106ea2SJens Wiklander * We're updating pages that can affect several active CPUs at a
177021106ea2SJens Wiklander * time below. We end up here because a thread tries to access some
177121106ea2SJens Wiklander * memory that isn't available. We have to be careful when making
177221106ea2SJens Wiklander * that memory available as other threads may succeed in accessing
177321106ea2SJens Wiklander * that address the moment after we've made it available.
177421106ea2SJens Wiklander *
177521106ea2SJens Wiklander * That means that we can't just map the memory and populate the
177621106ea2SJens Wiklander * page, instead we use the aliased mapping to populate the page
177721106ea2SJens Wiklander * and once everything is ready we map it.
177821106ea2SJens Wiklander */
17793078da83SJens Wiklander exceptions = pager_lock(ai);
178021106ea2SJens Wiklander
178104c205f6SJens Wiklander stat_handle_fault();
178204c205f6SJens Wiklander
1783ff3dc840SJens Wiklander /* check if the access is valid */
1784a884c935SJens Wiklander if (abort_is_user_exception(ai)) {
1785d5ad7ccfSJens Wiklander reg = find_uta_region(ai->va);
1786c4a57390SJens Wiklander clean_user_cache = true;
1787a884c935SJens Wiklander } else {
1788d5ad7ccfSJens Wiklander reg = find_region(&core_vm_regions, ai->va);
1789d5ad7ccfSJens Wiklander if (!reg) {
1790d5ad7ccfSJens Wiklander reg = find_uta_region(ai->va);
1791c4a57390SJens Wiklander clean_user_cache = true;
1792c4a57390SJens Wiklander }
1793a884c935SJens Wiklander }
1794d5ad7ccfSJens Wiklander if (!reg || !reg->pgt_array[0]) {
179579c1dec7SJens Wiklander ret = false;
179679c1dec7SJens Wiklander goto out;
1797ff3dc840SJens Wiklander }
1798ff3dc840SJens Wiklander
1799d5ad7ccfSJens Wiklander if (tee_pager_unhide_page(reg, page_va))
180013616e88SJens Wiklander goto out_success;
180121106ea2SJens Wiklander
180221106ea2SJens Wiklander /*
180321106ea2SJens Wiklander * The page wasn't hidden, but some other core may have
1804092a2b76SJens Wiklander * updated the table entry before we got here or we need
1805092a2b76SJens Wiklander * to make a read-only page read-write (dirty).
180621106ea2SJens Wiklander */
1807d5ad7ccfSJens Wiklander if (pager_update_permissions(reg, ai, &ret)) {
180821106ea2SJens Wiklander /*
1809a884c935SJens Wiklander * Nothing more to do with the abort. The problem
1810a884c935SJens Wiklander * could already have been dealt with from another
1811a884c935SJens Wiklander * core or if ret is false the TA will be paniced.
181221106ea2SJens Wiklander */
181321106ea2SJens Wiklander goto out;
181421106ea2SJens Wiklander }
181521106ea2SJens Wiklander
1816d5ad7ccfSJens Wiklander pager_get_page(reg, ai, clean_user_cache);
1817abe38974SJens Wiklander
181813616e88SJens Wiklander out_success:
1819abe38974SJens Wiklander tee_pager_hide_pages();
182079c1dec7SJens Wiklander ret = true;
182121106ea2SJens Wiklander out:
1822a257edb4SJens Wiklander pager_unlock(exceptions);
182379c1dec7SJens Wiklander return ret;
1824abe38974SJens Wiklander }
1825abe38974SJens Wiklander
tee_pager_add_pages(vaddr_t vaddr,size_t npages,bool unmap)1826abe38974SJens Wiklander void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1827abe38974SJens Wiklander {
18285ca851ecSJens Wiklander size_t n = 0;
1829abe38974SJens Wiklander
1830abe38974SJens Wiklander DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1831abe38974SJens Wiklander vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1832abe38974SJens Wiklander
1833abe38974SJens Wiklander /* setup memory */
1834abe38974SJens Wiklander for (n = 0; n < npages; n++) {
18355ca851ecSJens Wiklander struct core_mmu_table_info *ti = NULL;
18365ca851ecSJens Wiklander struct tee_pager_pmem *pmem = NULL;
18372ffdd194SJens Wiklander vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
18385ca851ecSJens Wiklander struct tblidx tblidx = { };
18395ca851ecSJens Wiklander unsigned int pgidx = 0;
18405ca851ecSJens Wiklander paddr_t pa = 0;
18415ca851ecSJens Wiklander uint32_t attr = 0;
1842abe38974SJens Wiklander
1843b2087a20SJens Wiklander ti = find_table_info(va);
1844b2087a20SJens Wiklander pgidx = core_mmu_va2idx(ti, va);
1845a884c935SJens Wiklander /*
1846a884c935SJens Wiklander * Note that we can only support adding pages in the
1847a884c935SJens Wiklander * valid range of this table info, currently not a problem.
1848a884c935SJens Wiklander */
1849fb4595abSJens Wiklander core_mmu_get_entry(ti, pgidx, &pa, &attr);
1850abe38974SJens Wiklander
1851abe38974SJens Wiklander /* Ignore unmapped pages/blocks */
1852abe38974SJens Wiklander if (!(attr & TEE_MATTR_VALID_BLOCK))
1853abe38974SJens Wiklander continue;
1854abe38974SJens Wiklander
185553a68c38SJens Wiklander pmem = calloc(1, sizeof(struct tee_pager_pmem));
1856d13278b8SEtienne Carriere if (!pmem)
18578c9d9445SEtienne Carriere panic("out of mem");
185813616e88SJens Wiklander pmem_clear(pmem);
1859abe38974SJens Wiklander
186021106ea2SJens Wiklander pmem->va_alias = pager_add_alias_page(pa);
1861abe38974SJens Wiklander
1862abe38974SJens Wiklander if (unmap) {
1863fb4595abSJens Wiklander core_mmu_set_entry(ti, pgidx, 0, 0);
1864b2087a20SJens Wiklander pgt_dec_used_entries(find_core_pgt(va));
186521106ea2SJens Wiklander } else {
1866d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
1867b83c0d5fSJens Wiklander
186821106ea2SJens Wiklander /*
1869d5ad7ccfSJens Wiklander * The page is still mapped, let's assign the region
187021106ea2SJens Wiklander * and update the protection bits accordingly.
187121106ea2SJens Wiklander */
1872d5ad7ccfSJens Wiklander reg = find_region(&core_vm_regions, va);
1873d5ad7ccfSJens Wiklander assert(reg);
1874d5ad7ccfSJens Wiklander pmem_assign_fobj_page(pmem, reg, va);
1875d5ad7ccfSJens Wiklander tblidx = pmem_get_region_tblidx(pmem, reg);
18765ca851ecSJens Wiklander assert(tblidx.pgt == find_core_pgt(va));
1877f7f7b639SJens Wiklander assert(pa == get_pmem_pa(pmem));
18785ca851ecSJens Wiklander tblidx_set_entry(tblidx, pa,
1879d5ad7ccfSJens Wiklander get_region_mattr(reg->flags));
1880abe38974SJens Wiklander }
188121106ea2SJens Wiklander
1882b757e307SJens Wiklander if (unmap && IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) &&
1883b757e307SJens Wiklander !pager_spare_pmem) {
188413616e88SJens Wiklander pager_spare_pmem = pmem;
188513616e88SJens Wiklander } else {
1886abe38974SJens Wiklander tee_pager_npages++;
188793074435SPascal Brand incr_npages_all();
188893074435SPascal Brand set_npages();
1889abe38974SJens Wiklander TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1890abe38974SJens Wiklander }
189113616e88SJens Wiklander }
1892abe38974SJens Wiklander
1893f0d0c301SEtienne Carriere /*
1894f0d0c301SEtienne Carriere * As this is done at inits, invalidate all TLBs once instead of
1895f0d0c301SEtienne Carriere * targeting only the modified entries.
1896f0d0c301SEtienne Carriere */
1897f0d0c301SEtienne Carriere tlbi_all();
1898abe38974SJens Wiklander }
189939d1f75cSPascal Brand
1900a884c935SJens Wiklander #ifdef CFG_PAGED_USER_TA
find_pgt(struct pgt * pgt,vaddr_t va)1901a884c935SJens Wiklander static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1902a884c935SJens Wiklander {
1903a884c935SJens Wiklander struct pgt *p = pgt;
1904a884c935SJens Wiklander
1905a884c935SJens Wiklander while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1906a884c935SJens Wiklander p = SLIST_NEXT(p, link);
1907a884c935SJens Wiklander return p;
1908a884c935SJens Wiklander }
1909a884c935SJens Wiklander
tee_pager_assign_um_tables(struct user_mode_ctx * uctx)19101936dfc7SJens Wiklander void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1911a884c935SJens Wiklander {
1912d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
1913867d3c7dSJens Wiklander struct pgt *pgt = NULL;
19144a3f6ad0SJens Wiklander size_t n = 0;
1915a884c935SJens Wiklander
1916d5ad7ccfSJens Wiklander if (!uctx->regions)
1917867d3c7dSJens Wiklander return;
1918867d3c7dSJens Wiklander
1919e17e7a56SJens Wiklander pgt = SLIST_FIRST(&uctx->pgt_cache);
1920d5ad7ccfSJens Wiklander TAILQ_FOREACH(reg, uctx->regions, link) {
1921d5ad7ccfSJens Wiklander for (n = 0; n < get_pgt_count(reg->base, reg->size); n++) {
1922d5ad7ccfSJens Wiklander vaddr_t va = reg->base + CORE_MMU_PGDIR_SIZE * n;
19234a3f6ad0SJens Wiklander struct pgt *p __maybe_unused = find_pgt(pgt, va);
19244a3f6ad0SJens Wiklander
1925d5ad7ccfSJens Wiklander if (!reg->pgt_array[n])
1926d5ad7ccfSJens Wiklander reg->pgt_array[n] = p;
1927a884c935SJens Wiklander else
1928d5ad7ccfSJens Wiklander assert(reg->pgt_array[n] == p);
19294a3f6ad0SJens Wiklander }
1930a884c935SJens Wiklander }
1931a884c935SJens Wiklander }
1932a884c935SJens Wiklander
tee_pager_pgt_save_and_release_entries(struct pgt * pgt)1933a884c935SJens Wiklander void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1934a884c935SJens Wiklander {
193555e64140SJens Wiklander struct tee_pager_pmem *pmem = NULL;
1936d5ad7ccfSJens Wiklander struct vm_paged_region *reg = NULL;
1937d5ad7ccfSJens Wiklander struct vm_paged_region_head *regions = NULL;
1938f16a8545SJens Wiklander uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
19394a3f6ad0SJens Wiklander size_t n = 0;
1940a884c935SJens Wiklander
1941a884c935SJens Wiklander if (!pgt->num_used_entries)
1942a884c935SJens Wiklander goto out;
1943a884c935SJens Wiklander
1944a884c935SJens Wiklander TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1945b83c0d5fSJens Wiklander if (pmem->fobj)
1946b83c0d5fSJens Wiklander pmem_unmap(pmem, pgt);
1947a884c935SJens Wiklander }
1948a884c935SJens Wiklander assert(!pgt->num_used_entries);
1949a884c935SJens Wiklander
1950a884c935SJens Wiklander out:
1951d5ad7ccfSJens Wiklander regions = to_user_mode_ctx(pgt->ctx)->regions;
1952d5ad7ccfSJens Wiklander if (regions) {
1953d5ad7ccfSJens Wiklander TAILQ_FOREACH(reg, regions, link) {
1954d5ad7ccfSJens Wiklander for (n = 0; n < get_pgt_count(reg->base, reg->size);
19554a3f6ad0SJens Wiklander n++) {
1956d5ad7ccfSJens Wiklander if (reg->pgt_array[n] == pgt) {
1957d5ad7ccfSJens Wiklander reg->pgt_array[n] = NULL;
19584a3f6ad0SJens Wiklander break;
19594a3f6ad0SJens Wiklander }
19604a3f6ad0SJens Wiklander }
1961a884c935SJens Wiklander }
196255e64140SJens Wiklander }
1963a884c935SJens Wiklander
1964a257edb4SJens Wiklander pager_unlock(exceptions);
1965a884c935SJens Wiklander }
19663639b55fSJerome Forissier DECLARE_KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1967a884c935SJens Wiklander #endif /*CFG_PAGED_USER_TA*/
1968a884c935SJens Wiklander
tee_pager_release_phys(void * addr,size_t size)1969092a2b76SJens Wiklander void tee_pager_release_phys(void *addr, size_t size)
197039d1f75cSPascal Brand {
197139d1f75cSPascal Brand bool unmaped = false;
1972092a2b76SJens Wiklander vaddr_t va = (vaddr_t)addr;
1973092a2b76SJens Wiklander vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1974092a2b76SJens Wiklander vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1975d5ad7ccfSJens Wiklander struct vm_paged_region *reg;
1976a884c935SJens Wiklander uint32_t exceptions;
197739d1f75cSPascal Brand
197864ec106bSJens Wiklander if (end <= begin)
1979a884c935SJens Wiklander return;
1980a884c935SJens Wiklander
198187d96185SJens Wiklander exceptions = pager_lock_check_stack(128);
1982ff3dc840SJens Wiklander
1983e8193433SJens Wiklander for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1984d5ad7ccfSJens Wiklander reg = find_region(&core_vm_regions, va);
1985d5ad7ccfSJens Wiklander if (!reg)
1986e8193433SJens Wiklander panic();
1987d5ad7ccfSJens Wiklander unmaped |= tee_pager_release_one_phys(reg, va);
1988e8193433SJens Wiklander }
198939d1f75cSPascal Brand
199039d1f75cSPascal Brand if (unmaped)
1991fe16b87bSAlvin Chang tlbi_va_range(begin, end - begin, SMALL_PAGE_SIZE);
199239d1f75cSPascal Brand
1993a257edb4SJens Wiklander pager_unlock(exceptions);
199439d1f75cSPascal Brand }
19953639b55fSJerome Forissier DECLARE_KEEP_PAGER(tee_pager_release_phys);
199639d1f75cSPascal Brand
tee_pager_alloc(size_t size)19977513149eSJens Wiklander void *tee_pager_alloc(size_t size)
199839d1f75cSPascal Brand {
199971e2b567SJens Wiklander tee_mm_entry_t *mm = NULL;
200071e2b567SJens Wiklander uint8_t *smem = NULL;
200171e2b567SJens Wiklander size_t num_pages = 0;
200271e2b567SJens Wiklander struct fobj *fobj = NULL;
200339d1f75cSPascal Brand
200439d1f75cSPascal Brand if (!size)
200539d1f75cSPascal Brand return NULL;
200639d1f75cSPascal Brand
2007*9b0ee59dSJens Wiklander mm = tee_mm_alloc(&core_virt_mem_pool, ROUNDUP(size, SMALL_PAGE_SIZE));
200839d1f75cSPascal Brand if (!mm)
200939d1f75cSPascal Brand return NULL;
201039d1f75cSPascal Brand
2011f16a8545SJens Wiklander smem = (uint8_t *)tee_mm_get_smem(mm);
201271e2b567SJens Wiklander num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
201371e2b567SJens Wiklander fobj = fobj_locked_paged_alloc(num_pages);
201471e2b567SJens Wiklander if (!fobj) {
201571e2b567SJens Wiklander tee_mm_free(mm);
201671e2b567SJens Wiklander return NULL;
201771e2b567SJens Wiklander }
201871e2b567SJens Wiklander
2019d5ad7ccfSJens Wiklander tee_pager_add_core_region((vaddr_t)smem, PAGED_REGION_TYPE_LOCK, fobj);
202071e2b567SJens Wiklander fobj_put(fobj);
202171e2b567SJens Wiklander
202271e2b567SJens Wiklander asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
202339d1f75cSPascal Brand
2024f16a8545SJens Wiklander return smem;
202539d1f75cSPascal Brand }
202613616e88SJens Wiklander
tee_pager_init_iv_region(struct fobj * fobj)2027d5ad7ccfSJens Wiklander vaddr_t tee_pager_init_iv_region(struct fobj *fobj)
202813616e88SJens Wiklander {
202913616e88SJens Wiklander tee_mm_entry_t *mm = NULL;
203013616e88SJens Wiklander uint8_t *smem = NULL;
203113616e88SJens Wiklander
2032d5ad7ccfSJens Wiklander assert(!pager_iv_region);
203313616e88SJens Wiklander
2034*9b0ee59dSJens Wiklander mm = tee_mm_alloc(&core_virt_mem_pool,
2035*9b0ee59dSJens Wiklander fobj->num_pages * SMALL_PAGE_SIZE);
203613616e88SJens Wiklander if (!mm)
203713616e88SJens Wiklander panic();
203813616e88SJens Wiklander
203913616e88SJens Wiklander smem = (uint8_t *)tee_mm_get_smem(mm);
2040d5ad7ccfSJens Wiklander tee_pager_add_core_region((vaddr_t)smem, PAGED_REGION_TYPE_RW, fobj);
204113616e88SJens Wiklander fobj_put(fobj);
204213616e88SJens Wiklander
204313616e88SJens Wiklander asan_tag_access(smem, smem + fobj->num_pages * SMALL_PAGE_SIZE);
204413616e88SJens Wiklander
2045d5ad7ccfSJens Wiklander pager_iv_region = find_region(&core_vm_regions, (vaddr_t)smem);
2046d5ad7ccfSJens Wiklander assert(pager_iv_region && pager_iv_region->fobj == fobj);
204713616e88SJens Wiklander
204813616e88SJens Wiklander return (vaddr_t)smem;
204913616e88SJens Wiklander }
2050