xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 43be6453dd3e98d39721c8bc6725416772f4205c)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/tlb_helpers.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <mm/core_memprot.h>
23 #include <mm/fobj.h>
24 #include <mm/tee_mm.h>
25 #include <mm/tee_pager.h>
26 #include <stdlib.h>
27 #include <sys/queue.h>
28 #include <tee_api_defines.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <utee_defines.h>
32 #include <util.h>
33 
34 
35 static struct tee_pager_area_head tee_pager_area_head =
36 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
37 
38 #define INVALID_PGIDX		UINT_MAX
39 #define PMEM_FLAG_DIRTY		BIT(0)
40 #define PMEM_FLAG_HIDDEN	BIT(1)
41 
42 /*
43  * struct tee_pager_pmem - Represents a physical page used for paging.
44  *
45  * @flags	flags defined by PMEM_FLAG_* above
46  * @fobj_pgidx	index of the page in the @fobj
47  * @fobj	File object of which a page is made visible.
48  * @va_alias	Virtual address where the physical page always is aliased.
49  *		Used during remapping of the page when the content need to
50  *		be updated before it's available at the new location.
51  */
52 struct tee_pager_pmem {
53 	unsigned int flags;
54 	unsigned int fobj_pgidx;
55 	struct fobj *fobj;
56 	void *va_alias;
57 	TAILQ_ENTRY(tee_pager_pmem) link;
58 };
59 
60 /* The list of physical pages. The first page in the list is the oldest */
61 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
62 
63 static struct tee_pager_pmem_head tee_pager_pmem_head =
64 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
65 
66 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
67 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
68 
69 /* number of pages hidden */
70 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
71 
72 /* Number of registered physical pages, used hiding pages. */
73 static size_t tee_pager_npages;
74 
75 #ifdef CFG_WITH_STATS
76 static struct tee_pager_stats pager_stats;
77 
78 static inline void incr_ro_hits(void)
79 {
80 	pager_stats.ro_hits++;
81 }
82 
83 static inline void incr_rw_hits(void)
84 {
85 	pager_stats.rw_hits++;
86 }
87 
88 static inline void incr_hidden_hits(void)
89 {
90 	pager_stats.hidden_hits++;
91 }
92 
93 static inline void incr_zi_released(void)
94 {
95 	pager_stats.zi_released++;
96 }
97 
98 static inline void incr_npages_all(void)
99 {
100 	pager_stats.npages_all++;
101 }
102 
103 static inline void set_npages(void)
104 {
105 	pager_stats.npages = tee_pager_npages;
106 }
107 
108 void tee_pager_get_stats(struct tee_pager_stats *stats)
109 {
110 	*stats = pager_stats;
111 
112 	pager_stats.hidden_hits = 0;
113 	pager_stats.ro_hits = 0;
114 	pager_stats.rw_hits = 0;
115 	pager_stats.zi_released = 0;
116 }
117 
118 #else /* CFG_WITH_STATS */
119 static inline void incr_ro_hits(void) { }
120 static inline void incr_rw_hits(void) { }
121 static inline void incr_hidden_hits(void) { }
122 static inline void incr_zi_released(void) { }
123 static inline void incr_npages_all(void) { }
124 static inline void set_npages(void) { }
125 
126 void tee_pager_get_stats(struct tee_pager_stats *stats)
127 {
128 	memset(stats, 0, sizeof(struct tee_pager_stats));
129 }
130 #endif /* CFG_WITH_STATS */
131 
132 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
133 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
134 #define TBL_SHIFT	SMALL_PAGE_SHIFT
135 
136 #define EFFECTIVE_VA_SIZE \
137 	(ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \
138 	 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE))
139 
140 static struct pager_table {
141 	struct pgt pgt;
142 	struct core_mmu_table_info tbl_info;
143 } *pager_tables;
144 static unsigned int num_pager_tables;
145 
146 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
147 
148 /* Defines the range of the alias area */
149 static tee_mm_entry_t *pager_alias_area;
150 /*
151  * Physical pages are added in a stack like fashion to the alias area,
152  * @pager_alias_next_free gives the address of next free entry if
153  * @pager_alias_next_free is != 0
154  */
155 static uintptr_t pager_alias_next_free;
156 
157 #ifdef CFG_TEE_CORE_DEBUG
158 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
159 
160 static uint32_t pager_lock_dldetect(const char *func, const int line,
161 				    struct abort_info *ai)
162 {
163 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
164 	unsigned int retries = 0;
165 	unsigned int reminder = 0;
166 
167 	while (!cpu_spin_trylock(&pager_spinlock)) {
168 		retries++;
169 		if (!retries) {
170 			/* wrapped, time to report */
171 			trace_printf(func, line, TRACE_ERROR, true,
172 				     "possible spinlock deadlock reminder %u",
173 				     reminder);
174 			if (reminder < UINT_MAX)
175 				reminder++;
176 			if (ai)
177 				abort_print(ai);
178 		}
179 	}
180 
181 	return exceptions;
182 }
183 #else
184 static uint32_t pager_lock(struct abort_info __unused *ai)
185 {
186 	return cpu_spin_lock_xsave(&pager_spinlock);
187 }
188 #endif
189 
190 static uint32_t pager_lock_check_stack(size_t stack_size)
191 {
192 	if (stack_size) {
193 		int8_t buf[stack_size];
194 		size_t n;
195 
196 		/*
197 		 * Make sure to touch all pages of the stack that we expect
198 		 * to use with this lock held. We need to take eventual
199 		 * page faults before the lock is taken or we'll deadlock
200 		 * the pager. The pages that are populated in this way will
201 		 * eventually be released at certain save transitions of
202 		 * the thread.
203 		 */
204 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
205 			io_write8((vaddr_t)buf + n, 1);
206 		io_write8((vaddr_t)buf + stack_size - 1, 1);
207 	}
208 
209 	return pager_lock(NULL);
210 }
211 
212 static void pager_unlock(uint32_t exceptions)
213 {
214 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
215 }
216 
217 void *tee_pager_phys_to_virt(paddr_t pa)
218 {
219 	struct core_mmu_table_info ti;
220 	unsigned idx;
221 	uint32_t a;
222 	paddr_t p;
223 	vaddr_t v;
224 	size_t n;
225 
226 	/*
227 	 * Most addresses are mapped lineary, try that first if possible.
228 	 */
229 	if (!tee_pager_get_table_info(pa, &ti))
230 		return NULL; /* impossible pa */
231 	idx = core_mmu_va2idx(&ti, pa);
232 	core_mmu_get_entry(&ti, idx, &p, &a);
233 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
234 		return (void *)core_mmu_idx2va(&ti, idx);
235 
236 	n = 0;
237 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
238 	while (true) {
239 		while (idx < TBL_NUM_ENTRIES) {
240 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
241 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
242 				return NULL;
243 
244 			core_mmu_get_entry(&pager_tables[n].tbl_info,
245 					   idx, &p, &a);
246 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
247 				return (void *)v;
248 			idx++;
249 		}
250 
251 		n++;
252 		if (n >= num_pager_tables)
253 			return NULL;
254 		idx = 0;
255 	}
256 
257 	return NULL;
258 }
259 
260 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
261 {
262 	return pmem->flags & PMEM_FLAG_HIDDEN;
263 }
264 
265 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
266 {
267 	return pmem->flags & PMEM_FLAG_DIRTY;
268 }
269 
270 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
271 				    struct tee_pager_area *area)
272 {
273 	if (pmem->fobj != area->fobj)
274 		return false;
275 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
276 		return false;
277 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
278 	    (area->size >> SMALL_PAGE_SHIFT))
279 		return false;
280 
281 	return true;
282 }
283 
284 static size_t pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
285 				   struct tee_pager_area *area)
286 {
287 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
288 
289 	return pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
290 }
291 
292 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
293 {
294 	size_t n;
295 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
296 
297 	if (!pager_tables)
298 		return NULL;
299 
300 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
301 	    CORE_MMU_PGDIR_SHIFT;
302 	if (n >= num_pager_tables)
303 		return NULL;
304 
305 	assert(va >= pager_tables[n].tbl_info.va_base &&
306 	       va <= (pager_tables[n].tbl_info.va_base | mask));
307 
308 	return pager_tables + n;
309 }
310 
311 static struct pager_table *find_pager_table(vaddr_t va)
312 {
313 	struct pager_table *pt = find_pager_table_may_fail(va);
314 
315 	assert(pt);
316 	return pt;
317 }
318 
319 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
320 {
321 	struct pager_table *pt = find_pager_table_may_fail(va);
322 
323 	if (!pt)
324 		return false;
325 
326 	*ti = pt->tbl_info;
327 	return true;
328 }
329 
330 static struct core_mmu_table_info *find_table_info(vaddr_t va)
331 {
332 	return &find_pager_table(va)->tbl_info;
333 }
334 
335 static struct pgt *find_core_pgt(vaddr_t va)
336 {
337 	return &find_pager_table(va)->pgt;
338 }
339 
340 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
341 {
342 	struct pager_table *pt;
343 	unsigned idx;
344 	vaddr_t smem = tee_mm_get_smem(mm);
345 	size_t nbytes = tee_mm_get_bytes(mm);
346 	vaddr_t v;
347 	uint32_t a = 0;
348 
349 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
350 
351 	assert(!pager_alias_area);
352 	pager_alias_area = mm;
353 	pager_alias_next_free = smem;
354 
355 	/* Clear all mapping in the alias area */
356 	pt = find_pager_table(smem);
357 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
358 	while (pt <= (pager_tables + num_pager_tables - 1)) {
359 		while (idx < TBL_NUM_ENTRIES) {
360 			v = core_mmu_idx2va(&pt->tbl_info, idx);
361 			if (v >= (smem + nbytes))
362 				goto out;
363 
364 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
365 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
366 			if (a & TEE_MATTR_VALID_BLOCK)
367 				pgt_dec_used_entries(&pt->pgt);
368 			idx++;
369 		}
370 
371 		pt++;
372 		idx = 0;
373 	}
374 
375 out:
376 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
377 }
378 
379 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
380 {
381 	size_t n;
382 	uint32_t a = 0;
383 	size_t usage = 0;
384 
385 	for (n = 0; n < ti->num_entries; n++) {
386 		core_mmu_get_entry(ti, n, NULL, &a);
387 		if (a & TEE_MATTR_VALID_BLOCK)
388 			usage++;
389 	}
390 	return usage;
391 }
392 
393 static void area_get_entry(struct tee_pager_area *area, size_t idx,
394 			   paddr_t *pa, uint32_t *attr)
395 {
396 	assert(area->pgt);
397 	assert(idx < TBL_NUM_ENTRIES);
398 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
399 }
400 
401 static void area_set_entry(struct tee_pager_area *area, size_t idx,
402 			   paddr_t pa, uint32_t attr)
403 {
404 	assert(area->pgt);
405 	assert(idx < TBL_NUM_ENTRIES);
406 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
407 }
408 
409 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
410 {
411 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
412 }
413 
414 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
415 {
416 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
417 }
418 
419 static void area_tlbi_entry(struct tee_pager_area *area, size_t idx)
420 {
421 	vaddr_t va = area_idx2va(area, idx);
422 
423 #if defined(CFG_PAGED_USER_TA)
424 	assert(area->pgt);
425 	if (area->pgt->ctx) {
426 		uint32_t asid = to_user_mode_ctx(area->pgt->ctx)->vm_info.asid;
427 
428 		tlbi_mva_asid(va, asid);
429 		return;
430 	}
431 #endif
432 	tlbi_mva_allasid(va);
433 }
434 
435 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
436 {
437 	struct tee_pager_area *area = NULL;
438 	size_t tblidx = 0;
439 	uint32_t a = 0;
440 
441 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
442 		/*
443 		 * If only_this_pgt points to a pgt then the pgt of this
444 		 * area has to match or we'll skip over it.
445 		 */
446 		if (only_this_pgt && area->pgt != only_this_pgt)
447 			continue;
448 		if (!area->pgt || !pmem_is_covered_by_area(pmem, area))
449 			continue;
450 		tblidx = pmem_get_area_tblidx(pmem, area);
451 		area_get_entry(area, tblidx, NULL, &a);
452 		if (a & TEE_MATTR_VALID_BLOCK) {
453 			area_set_entry(area, tblidx, 0, 0);
454 			pgt_dec_used_entries(area->pgt);
455 			area_tlbi_entry(area, tblidx);
456 		}
457 	}
458 }
459 
460 void tee_pager_early_init(void)
461 {
462 	size_t n = 0;
463 
464 	num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE;
465 	pager_tables = calloc(num_pager_tables, sizeof(*pager_tables));
466 	if (!pager_tables)
467 		panic("Cannot allocate pager_tables");
468 
469 	/*
470 	 * Note that this depends on add_pager_vaspace() adding vaspace
471 	 * after end of memory.
472 	 */
473 	for (n = 0; n < num_pager_tables; n++) {
474 		if (!core_mmu_find_table(NULL, VCORE_START_VA +
475 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
476 					 &pager_tables[n].tbl_info))
477 			panic("can't find mmu tables");
478 
479 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
480 			panic("Unsupported page size in translation table");
481 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
482 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
483 
484 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
485 		pgt_set_used_entries(&pager_tables[n].pgt,
486 				tbl_usage_count(&pager_tables[n].tbl_info));
487 	}
488 }
489 
490 static void *pager_add_alias_page(paddr_t pa)
491 {
492 	unsigned idx;
493 	struct core_mmu_table_info *ti;
494 	/* Alias pages mapped without write permission: runtime will care */
495 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
496 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
497 			TEE_MATTR_SECURE | TEE_MATTR_PR;
498 
499 	DMSG("0x%" PRIxPA, pa);
500 
501 	ti = find_table_info(pager_alias_next_free);
502 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
503 	core_mmu_set_entry(ti, idx, pa, attr);
504 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
505 	pager_alias_next_free += SMALL_PAGE_SIZE;
506 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
507 				      tee_mm_get_bytes(pager_alias_area)))
508 		pager_alias_next_free = 0;
509 	return (void *)core_mmu_idx2va(ti, idx);
510 }
511 
512 static void area_insert(struct tee_pager_area_head *head,
513 			struct tee_pager_area *area,
514 			struct tee_pager_area *a_prev)
515 {
516 	uint32_t exceptions = pager_lock_check_stack(8);
517 
518 	if (a_prev)
519 		TAILQ_INSERT_AFTER(head, a_prev, area, link);
520 	else
521 		TAILQ_INSERT_HEAD(head, area, link);
522 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
523 
524 	pager_unlock(exceptions);
525 }
526 KEEP_PAGER(area_insert);
527 
528 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
529 			     struct fobj *fobj)
530 {
531 	struct tee_pager_area *area = NULL;
532 	uint32_t flags = 0;
533 	size_t fobj_pgoffs = 0;
534 	vaddr_t b = base;
535 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
536 	size_t s2 = 0;
537 
538 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
539 
540 	if (base & SMALL_PAGE_MASK || !s) {
541 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
542 		panic();
543 	}
544 
545 	switch (type) {
546 	case PAGER_AREA_TYPE_RO:
547 		flags = TEE_MATTR_PRX;
548 		break;
549 	case PAGER_AREA_TYPE_RW:
550 	case PAGER_AREA_TYPE_LOCK:
551 		flags = TEE_MATTR_PRW;
552 		break;
553 	default:
554 		panic();
555 	}
556 
557 	if (!fobj)
558 		panic();
559 
560 	while (s) {
561 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
562 		area = calloc(1, sizeof(*area));
563 		if (!area)
564 			panic("alloc_area");
565 
566 		area->fobj = fobj_get(fobj);
567 		area->fobj_pgoffs = fobj_pgoffs;
568 		area->type = type;
569 		area->pgt = find_core_pgt(b);
570 		area->base = b;
571 		area->size = s2;
572 		area->flags = flags;
573 		area_insert(&tee_pager_area_head, area, NULL);
574 
575 		b += s2;
576 		s -= s2;
577 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
578 	}
579 }
580 
581 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
582 					vaddr_t va)
583 {
584 	struct tee_pager_area *area;
585 
586 	if (!areas)
587 		return NULL;
588 
589 	TAILQ_FOREACH(area, areas, link) {
590 		if (core_is_buffer_inside(va, 1, area->base, area->size))
591 			return area;
592 	}
593 	return NULL;
594 }
595 
596 #ifdef CFG_PAGED_USER_TA
597 static struct tee_pager_area *find_uta_area(vaddr_t va)
598 {
599 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
600 
601 	if (!is_user_mode_ctx(ctx))
602 		return NULL;
603 	return find_area(to_user_mode_ctx(ctx)->areas, va);
604 }
605 #else
606 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
607 {
608 	return NULL;
609 }
610 #endif /*CFG_PAGED_USER_TA*/
611 
612 
613 static uint32_t get_area_mattr(uint32_t area_flags)
614 {
615 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
616 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
617 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
618 
619 	return attr;
620 }
621 
622 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
623 {
624 	struct core_mmu_table_info *ti;
625 	paddr_t pa;
626 	unsigned idx;
627 
628 	ti = find_table_info((vaddr_t)pmem->va_alias);
629 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
630 	core_mmu_get_entry(ti, idx, &pa, NULL);
631 	return pa;
632 }
633 
634 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
635 			void *va_alias)
636 {
637 	size_t fobj_pgoffs = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
638 			     area->fobj_pgoffs;
639 	struct core_mmu_table_info *ti;
640 	uint32_t attr_alias;
641 	paddr_t pa_alias;
642 	unsigned int idx_alias;
643 
644 	/* Insure we are allowed to write to aliased virtual page */
645 	ti = find_table_info((vaddr_t)va_alias);
646 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
647 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
648 	if (!(attr_alias & TEE_MATTR_PW)) {
649 		attr_alias |= TEE_MATTR_PW;
650 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
651 		tlbi_mva_allasid((vaddr_t)va_alias);
652 	}
653 
654 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
655 	if (fobj_load_page(area->fobj, fobj_pgoffs, va_alias)) {
656 		EMSG("PH 0x%" PRIxVA " failed", page_va);
657 		panic();
658 	}
659 	switch (area->type) {
660 	case PAGER_AREA_TYPE_RO:
661 		incr_ro_hits();
662 		/* Forbid write to aliases for read-only (maybe exec) pages */
663 		attr_alias &= ~TEE_MATTR_PW;
664 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
665 		tlbi_mva_allasid((vaddr_t)va_alias);
666 		break;
667 	case PAGER_AREA_TYPE_RW:
668 		incr_rw_hits();
669 		break;
670 	case PAGER_AREA_TYPE_LOCK:
671 		break;
672 	default:
673 		panic();
674 	}
675 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
676 }
677 
678 static void tee_pager_save_page(struct tee_pager_pmem *pmem)
679 {
680 	if (pmem_is_dirty(pmem)) {
681 		asan_tag_access(pmem->va_alias,
682 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
683 		if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
684 				   pmem->va_alias))
685 			panic("fobj_save_page");
686 		asan_tag_no_access(pmem->va_alias,
687 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
688 	}
689 }
690 
691 #ifdef CFG_PAGED_USER_TA
692 static void unlink_area(struct tee_pager_area_head *area_head,
693 			struct tee_pager_area *area)
694 {
695 	uint32_t exceptions = pager_lock_check_stack(64);
696 
697 	TAILQ_REMOVE(area_head, area, link);
698 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
699 
700 	pager_unlock(exceptions);
701 }
702 KEEP_PAGER(unlink_area);
703 
704 static void free_area(struct tee_pager_area *area)
705 {
706 	fobj_put(area->fobj);
707 	free(area);
708 }
709 
710 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
711 				    struct fobj *fobj, uint32_t prot)
712 {
713 	struct tee_pager_area *a_prev = NULL;
714 	struct tee_pager_area *area = NULL;
715 	vaddr_t b = base;
716 	size_t fobj_pgoffs = 0;
717 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
718 
719 	if (!uctx->areas) {
720 		uctx->areas = malloc(sizeof(*uctx->areas));
721 		if (!uctx->areas)
722 			return TEE_ERROR_OUT_OF_MEMORY;
723 		TAILQ_INIT(uctx->areas);
724 	}
725 
726 	area = TAILQ_FIRST(uctx->areas);
727 	while (area) {
728 		if (core_is_buffer_intersect(b, s, area->base,
729 					     area->size))
730 			return TEE_ERROR_BAD_PARAMETERS;
731 		if (b < area->base)
732 			break;
733 		a_prev = area;
734 		area = TAILQ_NEXT(area, link);
735 	}
736 
737 	while (s) {
738 		size_t s2;
739 
740 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
741 		area = calloc(1, sizeof(*area));
742 		if (!area)
743 			return TEE_ERROR_OUT_OF_MEMORY;
744 
745 		/* Table info will be set when the context is activated. */
746 		area->fobj = fobj_get(fobj);
747 		area->fobj_pgoffs = fobj_pgoffs;
748 		area->type = PAGER_AREA_TYPE_RW;
749 		area->base = b;
750 		area->size = s2;
751 		area->flags = prot;
752 
753 		area_insert(uctx->areas, area, a_prev);
754 
755 		a_prev = area;
756 		b += s2;
757 		s -= s2;
758 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
759 	}
760 
761 	return TEE_SUCCESS;
762 }
763 
764 TEE_Result tee_pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
765 				 struct fobj *fobj, uint32_t prot)
766 {
767 	TEE_Result res = TEE_SUCCESS;
768 	struct thread_specific_data *tsd = thread_get_tsd();
769 	struct tee_pager_area *area = NULL;
770 	struct core_mmu_table_info dir_info = { NULL };
771 
772 	if (&uctx->ctx != tsd->ctx) {
773 		/*
774 		 * Changes are to an utc that isn't active. Just add the
775 		 * areas page tables will be dealt with later.
776 		 */
777 		return pager_add_um_area(uctx, base, fobj, prot);
778 	}
779 
780 	/*
781 	 * Assign page tables before adding areas to be able to tell which
782 	 * are newly added and should be removed in case of failure.
783 	 */
784 	tee_pager_assign_um_tables(uctx);
785 	res = pager_add_um_area(uctx, base, fobj, prot);
786 	if (res) {
787 		struct tee_pager_area *next_a;
788 
789 		/* Remove all added areas */
790 		TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
791 			if (!area->pgt) {
792 				unlink_area(uctx->areas, area);
793 				free_area(area);
794 			}
795 		}
796 		return res;
797 	}
798 
799 	/*
800 	 * Assign page tables to the new areas and make sure that the page
801 	 * tables are registered in the upper table.
802 	 */
803 	tee_pager_assign_um_tables(uctx);
804 	core_mmu_get_user_pgdir(&dir_info);
805 	TAILQ_FOREACH(area, uctx->areas, link) {
806 		paddr_t pa;
807 		size_t idx;
808 		uint32_t attr;
809 
810 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
811 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
812 
813 		/*
814 		 * Check if the page table already is used, if it is, it's
815 		 * already registered.
816 		 */
817 		if (area->pgt->num_used_entries) {
818 			assert(attr & TEE_MATTR_TABLE);
819 			assert(pa == virt_to_phys(area->pgt->tbl));
820 			continue;
821 		}
822 
823 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
824 		pa = virt_to_phys(area->pgt->tbl);
825 		assert(pa);
826 		/*
827 		 * Note that the update of the table entry is guaranteed to
828 		 * be atomic.
829 		 */
830 		core_mmu_set_entry(&dir_info, idx, pa, attr);
831 	}
832 
833 	return TEE_SUCCESS;
834 }
835 
836 static void split_area(struct tee_pager_area_head *area_head,
837 		       struct tee_pager_area *area, struct tee_pager_area *a2,
838 		       vaddr_t va)
839 {
840 	uint32_t exceptions = pager_lock_check_stack(64);
841 	size_t diff = va - area->base;
842 
843 	a2->fobj = fobj_get(area->fobj);
844 	a2->fobj_pgoffs = area->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
845 	a2->type = area->type;
846 	a2->flags = area->flags;
847 	a2->base = va;
848 	a2->size = area->size - diff;
849 	a2->pgt = area->pgt;
850 	area->size = diff;
851 
852 	TAILQ_INSERT_AFTER(area_head, area, a2, link);
853 	TAILQ_INSERT_AFTER(&area->fobj->areas, area, a2, fobj_link);
854 
855 	pager_unlock(exceptions);
856 }
857 KEEP_PAGER(split_area);
858 
859 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
860 {
861 	struct tee_pager_area *area = NULL;
862 	struct tee_pager_area *a2 = NULL;
863 
864 	if (va & SMALL_PAGE_MASK)
865 		return TEE_ERROR_BAD_PARAMETERS;
866 
867 	TAILQ_FOREACH(area, uctx->areas, link) {
868 		if (va == area->base || va == area->base + area->size)
869 			return TEE_SUCCESS;
870 		if (va > area->base && va < area->base + area->size) {
871 			a2 = calloc(1, sizeof(*a2));
872 			if (!a2)
873 				return TEE_ERROR_OUT_OF_MEMORY;
874 			split_area(uctx->areas, area, a2, va);
875 			return TEE_SUCCESS;
876 		}
877 	}
878 
879 	return TEE_SUCCESS;
880 }
881 
882 static void merge_area_with_next(struct tee_pager_area_head *area_head,
883 				 struct tee_pager_area *a,
884 				 struct tee_pager_area *a_next)
885 {
886 	uint32_t exceptions = pager_lock_check_stack(64);
887 
888 	TAILQ_REMOVE(area_head, a_next, link);
889 	TAILQ_REMOVE(&a_next->fobj->areas, a_next, fobj_link);
890 	a->size += a_next->size;
891 
892 	pager_unlock(exceptions);
893 }
894 KEEP_PAGER(merge_area_with_next);
895 
896 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
897 			       size_t len)
898 {
899 	struct tee_pager_area *a_next = NULL;
900 	struct tee_pager_area *a = NULL;
901 
902 	if ((va | len) & SMALL_PAGE_MASK)
903 		return;
904 
905 	for (a = TAILQ_FIRST(uctx->areas);; a = a_next) {
906 		a_next = TAILQ_NEXT(a, link);
907 		if (!a_next)
908 			return;
909 
910 		/* Try merging with the area just before va */
911 		if (a->base + a->size < va)
912 			continue;
913 
914 		/*
915 		 * If a->base is well past our range we're done.
916 		 * Note that if it's just the page after our range we'll
917 		 * try to merge.
918 		 */
919 		if (a->base > va + len)
920 			return;
921 
922 		if (a->base + a->size != a_next->base)
923 			continue;
924 		if (a->fobj != a_next->fobj || a->type != a_next->type ||
925 		    a->flags != a_next->flags || a->pgt != a_next->pgt)
926 			continue;
927 		if (a->fobj_pgoffs + a->size / SMALL_PAGE_SIZE !=
928 		    a_next->fobj_pgoffs)
929 			continue;
930 
931 		merge_area_with_next(uctx->areas, a, a_next);
932 		free_area(a_next);
933 		a_next = a;
934 	}
935 }
936 
937 static void rem_area(struct tee_pager_area_head *area_head,
938 		     struct tee_pager_area *area)
939 {
940 	struct tee_pager_pmem *pmem;
941 	size_t last_pgoffs = area->fobj_pgoffs +
942 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
943 	uint32_t exceptions;
944 	size_t idx = 0;
945 	uint32_t a = 0;
946 
947 	exceptions = pager_lock_check_stack(64);
948 
949 	TAILQ_REMOVE(area_head, area, link);
950 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
951 
952 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
953 		if (pmem->fobj != area->fobj ||
954 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
955 		    pmem->fobj_pgidx > last_pgoffs)
956 			continue;
957 
958 		idx = pmem_get_area_tblidx(pmem, area);
959 		area_get_entry(area, idx, NULL, &a);
960 		if (!(a & TEE_MATTR_VALID_BLOCK))
961 			continue;
962 
963 		area_set_entry(area, idx, 0, 0);
964 		area_tlbi_entry(area, idx);
965 		pgt_dec_used_entries(area->pgt);
966 	}
967 
968 	pager_unlock(exceptions);
969 
970 	free_area(area);
971 }
972 KEEP_PAGER(rem_area);
973 
974 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
975 			     size_t size)
976 {
977 	struct tee_pager_area *area;
978 	struct tee_pager_area *next_a;
979 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
980 
981 	TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
982 		if (core_is_buffer_inside(area->base, area->size, base, s))
983 			rem_area(uctx->areas, area);
984 	}
985 	tlbi_asid(uctx->vm_info.asid);
986 }
987 
988 void tee_pager_rem_um_areas(struct user_mode_ctx *uctx)
989 {
990 	struct tee_pager_area *area = NULL;
991 
992 	if (!uctx->areas)
993 		return;
994 
995 	while (true) {
996 		area = TAILQ_FIRST(uctx->areas);
997 		if (!area)
998 			break;
999 		unlink_area(uctx->areas, area);
1000 		free_area(area);
1001 	}
1002 
1003 	free(uctx->areas);
1004 }
1005 
1006 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
1007 {
1008 	struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas);
1009 	void *ctx = a->pgt->ctx;
1010 
1011 	do {
1012 		a = TAILQ_NEXT(a, fobj_link);
1013 		if (!a)
1014 			return true;
1015 	} while (a->pgt->ctx == ctx);
1016 
1017 	return false;
1018 }
1019 
1020 bool tee_pager_set_um_area_attr(struct user_mode_ctx *uctx, vaddr_t base,
1021 				size_t size, uint32_t flags)
1022 {
1023 	bool ret = false;
1024 	vaddr_t b = base;
1025 	size_t s = size;
1026 	size_t s2 = 0;
1027 	struct tee_pager_area *area = find_area(uctx->areas, b);
1028 	uint32_t exceptions = 0;
1029 	struct tee_pager_pmem *pmem = NULL;
1030 	uint32_t a = 0;
1031 	uint32_t f = 0;
1032 	uint32_t mattr = 0;
1033 	uint32_t f2 = 0;
1034 	size_t tblidx = 0;
1035 
1036 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1037 	if (f & TEE_MATTR_UW)
1038 		f |= TEE_MATTR_PW;
1039 	mattr = get_area_mattr(f);
1040 
1041 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1042 
1043 	while (s) {
1044 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1045 		if (!area || area->base != b || area->size != s2) {
1046 			ret = false;
1047 			goto out;
1048 		}
1049 		b += s2;
1050 		s -= s2;
1051 
1052 		if (area->flags == f)
1053 			goto next_area;
1054 
1055 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1056 			if (!pmem_is_covered_by_area(pmem, area))
1057 				continue;
1058 
1059 			tblidx = pmem_get_area_tblidx(pmem, area);
1060 			area_get_entry(area, tblidx, NULL, &a);
1061 			if (a == f)
1062 				continue;
1063 			area_set_entry(area, tblidx, 0, 0);
1064 			area_tlbi_entry(area, tblidx);
1065 
1066 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1067 			if (pmem_is_dirty(pmem))
1068 				f2 = mattr;
1069 			else
1070 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
1071 			area_set_entry(area, tblidx, get_pmem_pa(pmem), f2);
1072 			if (!(a & TEE_MATTR_VALID_BLOCK))
1073 				pgt_inc_used_entries(area->pgt);
1074 			/*
1075 			 * Make sure the table update is visible before
1076 			 * continuing.
1077 			 */
1078 			dsb_ishst();
1079 
1080 			/*
1081 			 * Here's a problem if this page already is shared.
1082 			 * We need do icache invalidate for each context
1083 			 * in which it is shared. In practice this will
1084 			 * never happen.
1085 			 */
1086 			if (flags & TEE_MATTR_UX) {
1087 				void *va = (void *)area_idx2va(area, tblidx);
1088 
1089 				/* Assert that the pmem isn't shared. */
1090 				assert(same_context(pmem));
1091 
1092 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1093 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1094 			}
1095 		}
1096 
1097 		area->flags = f;
1098 next_area:
1099 		area = TAILQ_NEXT(area, link);
1100 	}
1101 
1102 	ret = true;
1103 out:
1104 	pager_unlock(exceptions);
1105 	return ret;
1106 }
1107 
1108 KEEP_PAGER(tee_pager_set_um_area_attr);
1109 #endif /*CFG_PAGED_USER_TA*/
1110 
1111 void tee_pager_invalidate_fobj(struct fobj *fobj)
1112 {
1113 	struct tee_pager_pmem *pmem;
1114 	uint32_t exceptions;
1115 
1116 	exceptions = pager_lock_check_stack(64);
1117 
1118 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1119 		if (pmem->fobj == fobj) {
1120 			pmem->fobj = NULL;
1121 			pmem->fobj_pgidx = INVALID_PGIDX;
1122 		}
1123 	}
1124 
1125 	pager_unlock(exceptions);
1126 }
1127 KEEP_PAGER(tee_pager_invalidate_fobj);
1128 
1129 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area,
1130 					unsigned int tblidx)
1131 {
1132 	struct tee_pager_pmem *pmem = NULL;
1133 
1134 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1135 		if (pmem->fobj == area->fobj &&
1136 		    pmem_get_area_tblidx(pmem, area) == tblidx)
1137 			return pmem;
1138 
1139 	return NULL;
1140 }
1141 
1142 static bool tee_pager_unhide_page(struct tee_pager_area *area,
1143 				  unsigned int tblidx)
1144 {
1145 	struct tee_pager_pmem *pmem = pmem_find(area, tblidx);
1146 	uint32_t a = get_area_mattr(area->flags);
1147 	uint32_t attr = 0;
1148 	paddr_t pa = 0;
1149 
1150 	if (!pmem)
1151 		return false;
1152 
1153 	area_get_entry(area, tblidx, NULL, &attr);
1154 	if (attr & TEE_MATTR_VALID_BLOCK)
1155 		return false;
1156 
1157 	/*
1158 	 * The page is hidden, or not not mapped yet. Unhide the page and
1159 	 * move it to the tail.
1160 	 *
1161 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1162 	 * for this address, so no TLB invalidation is required after setting
1163 	 * the new entry. A DSB is needed though, to make the write visible.
1164 	 *
1165 	 * For user executable pages it's more complicated. Those pages can
1166 	 * be shared between multiple TA mappings and thus populated by
1167 	 * another TA. The reference manual states that:
1168 	 *
1169 	 * "instruction cache maintenance is required only after writing
1170 	 * new data to a physical address that holds an instruction."
1171 	 *
1172 	 * So for hidden pages we would not need to invalidate i-cache, but
1173 	 * for newly populated pages we do. Since we don't know which we
1174 	 * have to assume the worst and always invalidate the i-cache. We
1175 	 * don't need to clean the d-cache though, since that has already
1176 	 * been done earlier.
1177 	 *
1178 	 * Additional bookkeeping to tell if the i-cache invalidation is
1179 	 * needed or not is left as a future optimization.
1180 	 */
1181 
1182 	/* If it's not a dirty block, then it should be read only. */
1183 	if (!pmem_is_dirty(pmem))
1184 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1185 
1186 	pa = get_pmem_pa(pmem);
1187 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1188 	if (area->flags & TEE_MATTR_UX) {
1189 		void *va = (void *)area_idx2va(area, tblidx);
1190 
1191 		/* Set a temporary read-only mapping */
1192 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1193 		area_set_entry(area, tblidx, pa, a & ~TEE_MATTR_UX);
1194 		dsb_ishst();
1195 
1196 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1197 
1198 		/* Set the final mapping */
1199 		area_set_entry(area, tblidx, pa, a);
1200 		area_tlbi_entry(area, tblidx);
1201 	} else {
1202 		area_set_entry(area, tblidx, pa, a);
1203 		dsb_ishst();
1204 	}
1205 	pgt_inc_used_entries(area->pgt);
1206 
1207 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1208 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1209 	incr_hidden_hits();
1210 	return true;
1211 }
1212 
1213 static void tee_pager_hide_pages(void)
1214 {
1215 	struct tee_pager_pmem *pmem = NULL;
1216 	size_t n = 0;
1217 
1218 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1219 		if (n >= TEE_PAGER_NHIDE)
1220 			break;
1221 		n++;
1222 
1223 		/* we cannot hide pages when pmem->fobj is not defined. */
1224 		if (!pmem->fobj)
1225 			continue;
1226 
1227 		if (pmem_is_hidden(pmem))
1228 			continue;
1229 
1230 		pmem->flags |= PMEM_FLAG_HIDDEN;
1231 		pmem_unmap(pmem, NULL);
1232 	}
1233 }
1234 
1235 static unsigned int __maybe_unused
1236 num_areas_with_pmem(struct tee_pager_pmem *pmem)
1237 {
1238 	struct tee_pager_area *a = NULL;
1239 	unsigned int num_matches = 0;
1240 
1241 	TAILQ_FOREACH(a, &pmem->fobj->areas, fobj_link)
1242 		if (pmem_is_covered_by_area(pmem, a))
1243 			num_matches++;
1244 
1245 	return num_matches;
1246 }
1247 
1248 /*
1249  * Find mapped pmem, hide and move to pageble pmem.
1250  * Return false if page was not mapped, and true if page was mapped.
1251  */
1252 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1253 				       vaddr_t page_va)
1254 {
1255 	struct tee_pager_pmem *pmem;
1256 	size_t tblidx = 0;
1257 	size_t pgidx = area_va2idx(area, page_va) + area->fobj_pgoffs -
1258 		       ((area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT);
1259 
1260 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1261 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != pgidx)
1262 			continue;
1263 
1264 		/*
1265 		 * Locked pages may not be shared. We're asserting that the
1266 		 * number of areas using this pmem is one and only one as
1267 		 * we're about to unmap it.
1268 		 */
1269 		assert(num_areas_with_pmem(pmem) == 1);
1270 
1271 		tblidx = pmem_get_area_tblidx(pmem, area);
1272 		area_set_entry(area, tblidx, 0, 0);
1273 		pgt_dec_used_entries(area->pgt);
1274 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1275 		pmem->fobj = NULL;
1276 		pmem->fobj_pgidx = INVALID_PGIDX;
1277 		tee_pager_npages++;
1278 		set_npages();
1279 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1280 		incr_zi_released();
1281 		return true;
1282 	}
1283 
1284 	return false;
1285 }
1286 
1287 /* Finds the oldest page and unmaps it from all tables */
1288 static struct tee_pager_pmem *tee_pager_get_page(enum tee_pager_area_type at)
1289 {
1290 	struct tee_pager_pmem *pmem;
1291 
1292 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1293 	if (!pmem) {
1294 		EMSG("No pmem entries");
1295 		return NULL;
1296 	}
1297 
1298 	if (pmem->fobj) {
1299 		pmem_unmap(pmem, NULL);
1300 		tee_pager_save_page(pmem);
1301 	}
1302 
1303 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1304 	pmem->fobj = NULL;
1305 	pmem->fobj_pgidx = INVALID_PGIDX;
1306 	pmem->flags = 0;
1307 	if (at == PAGER_AREA_TYPE_LOCK) {
1308 		/* Move page to lock list */
1309 		if (tee_pager_npages <= 0)
1310 			panic("running out of page");
1311 		tee_pager_npages--;
1312 		set_npages();
1313 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1314 	} else {
1315 		/* move page to back */
1316 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1317 	}
1318 
1319 	return pmem;
1320 }
1321 
1322 static bool pager_update_permissions(struct tee_pager_area *area,
1323 			struct abort_info *ai, bool *handled)
1324 {
1325 	unsigned int pgidx = area_va2idx(area, ai->va);
1326 	struct tee_pager_pmem *pmem = NULL;
1327 	uint32_t attr = 0;
1328 	paddr_t pa = 0;
1329 
1330 	*handled = false;
1331 
1332 	area_get_entry(area, pgidx, &pa, &attr);
1333 
1334 	/* Not mapped */
1335 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1336 		return false;
1337 
1338 	/* Not readable, should not happen */
1339 	if (abort_is_user_exception(ai)) {
1340 		if (!(attr & TEE_MATTR_UR))
1341 			return true;
1342 	} else {
1343 		if (!(attr & TEE_MATTR_PR)) {
1344 			abort_print_error(ai);
1345 			panic();
1346 		}
1347 	}
1348 
1349 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1350 	case CORE_MMU_FAULT_TRANSLATION:
1351 	case CORE_MMU_FAULT_READ_PERMISSION:
1352 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1353 			/* Check attempting to execute from an NOX page */
1354 			if (abort_is_user_exception(ai)) {
1355 				if (!(attr & TEE_MATTR_UX))
1356 					return true;
1357 			} else {
1358 				if (!(attr & TEE_MATTR_PX)) {
1359 					abort_print_error(ai);
1360 					panic();
1361 				}
1362 			}
1363 		}
1364 		/* Since the page is mapped now it's OK */
1365 		break;
1366 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1367 		/* Check attempting to write to an RO page */
1368 		pmem = pmem_find(area, pgidx);
1369 		if (!pmem)
1370 			panic();
1371 		if (abort_is_user_exception(ai)) {
1372 			if (!(area->flags & TEE_MATTR_UW))
1373 				return true;
1374 			if (!(attr & TEE_MATTR_UW)) {
1375 				FMSG("Dirty %p",
1376 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1377 				pmem->flags |= PMEM_FLAG_DIRTY;
1378 				area_set_entry(area, pgidx, pa,
1379 					       get_area_mattr(area->flags));
1380 				area_tlbi_entry(area, pgidx);
1381 			}
1382 
1383 		} else {
1384 			if (!(area->flags & TEE_MATTR_PW)) {
1385 				abort_print_error(ai);
1386 				panic();
1387 			}
1388 			if (!(attr & TEE_MATTR_PW)) {
1389 				FMSG("Dirty %p",
1390 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1391 				pmem->flags |= PMEM_FLAG_DIRTY;
1392 				area_set_entry(area, pgidx, pa,
1393 					       get_area_mattr(area->flags));
1394 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1395 			}
1396 		}
1397 		/* Since permissions has been updated now it's OK */
1398 		break;
1399 	default:
1400 		/* Some fault we can't deal with */
1401 		if (abort_is_user_exception(ai))
1402 			return true;
1403 		abort_print_error(ai);
1404 		panic();
1405 	}
1406 	*handled = true;
1407 	return true;
1408 }
1409 
1410 #ifdef CFG_TEE_CORE_DEBUG
1411 static void stat_handle_fault(void)
1412 {
1413 	static size_t num_faults;
1414 	static size_t min_npages = SIZE_MAX;
1415 	static size_t total_min_npages = SIZE_MAX;
1416 
1417 	num_faults++;
1418 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1419 		DMSG("nfaults %zu npages %zu (min %zu)",
1420 		     num_faults, tee_pager_npages, min_npages);
1421 		min_npages = tee_pager_npages; /* reset */
1422 	}
1423 	if (tee_pager_npages < min_npages)
1424 		min_npages = tee_pager_npages;
1425 	if (tee_pager_npages < total_min_npages)
1426 		total_min_npages = tee_pager_npages;
1427 }
1428 #else
1429 static void stat_handle_fault(void)
1430 {
1431 }
1432 #endif
1433 
1434 bool tee_pager_handle_fault(struct abort_info *ai)
1435 {
1436 	struct tee_pager_area *area;
1437 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1438 	uint32_t exceptions;
1439 	bool ret;
1440 	bool clean_user_cache = false;
1441 
1442 #ifdef TEE_PAGER_DEBUG_PRINT
1443 	if (!abort_is_user_exception(ai))
1444 		abort_print(ai);
1445 #endif
1446 
1447 	/*
1448 	 * We're updating pages that can affect several active CPUs at a
1449 	 * time below. We end up here because a thread tries to access some
1450 	 * memory that isn't available. We have to be careful when making
1451 	 * that memory available as other threads may succeed in accessing
1452 	 * that address the moment after we've made it available.
1453 	 *
1454 	 * That means that we can't just map the memory and populate the
1455 	 * page, instead we use the aliased mapping to populate the page
1456 	 * and once everything is ready we map it.
1457 	 */
1458 	exceptions = pager_lock(ai);
1459 
1460 	stat_handle_fault();
1461 
1462 	/* check if the access is valid */
1463 	if (abort_is_user_exception(ai)) {
1464 		area = find_uta_area(ai->va);
1465 		clean_user_cache = true;
1466 	} else {
1467 		area = find_area(&tee_pager_area_head, ai->va);
1468 		if (!area) {
1469 			area = find_uta_area(ai->va);
1470 			clean_user_cache = true;
1471 		}
1472 	}
1473 	if (!area || !area->pgt) {
1474 		ret = false;
1475 		goto out;
1476 	}
1477 
1478 	if (!tee_pager_unhide_page(area, area_va2idx(area, page_va))) {
1479 		struct tee_pager_pmem *pmem = NULL;
1480 		uint32_t attr = 0;
1481 		paddr_t pa = 0;
1482 		size_t tblidx = 0;
1483 
1484 		/*
1485 		 * The page wasn't hidden, but some other core may have
1486 		 * updated the table entry before we got here or we need
1487 		 * to make a read-only page read-write (dirty).
1488 		 */
1489 		if (pager_update_permissions(area, ai, &ret)) {
1490 			/*
1491 			 * Nothing more to do with the abort. The problem
1492 			 * could already have been dealt with from another
1493 			 * core or if ret is false the TA will be paniced.
1494 			 */
1495 			goto out;
1496 		}
1497 
1498 		pmem = tee_pager_get_page(area->type);
1499 		if (!pmem) {
1500 			abort_print(ai);
1501 			panic();
1502 		}
1503 
1504 		/* load page code & data */
1505 		tee_pager_load_page(area, page_va, pmem->va_alias);
1506 
1507 
1508 		pmem->fobj = area->fobj;
1509 		pmem->fobj_pgidx = area_va2idx(area, page_va) +
1510 				   area->fobj_pgoffs -
1511 				   ((area->base & CORE_MMU_PGDIR_MASK) >>
1512 					SMALL_PAGE_SHIFT);
1513 		tblidx = pmem_get_area_tblidx(pmem, area);
1514 		attr = get_area_mattr(area->flags);
1515 		/*
1516 		 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1517 		 * able to tell when they are updated and should be tagged
1518 		 * as dirty.
1519 		 */
1520 		if (area->type == PAGER_AREA_TYPE_RW)
1521 			attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1522 		pa = get_pmem_pa(pmem);
1523 
1524 		/*
1525 		 * We've updated the page using the aliased mapping and
1526 		 * some cache maintenence is now needed if it's an
1527 		 * executable page.
1528 		 *
1529 		 * Since the d-cache is a Physically-indexed,
1530 		 * physically-tagged (PIPT) cache we can clean either the
1531 		 * aliased address or the real virtual address. In this
1532 		 * case we choose the real virtual address.
1533 		 *
1534 		 * The i-cache can also be PIPT, but may be something else
1535 		 * too like VIPT. The current code requires the caches to
1536 		 * implement the IVIPT extension, that is:
1537 		 * "instruction cache maintenance is required only after
1538 		 * writing new data to a physical address that holds an
1539 		 * instruction."
1540 		 *
1541 		 * To portably invalidate the icache the page has to
1542 		 * be mapped at the final virtual address but not
1543 		 * executable.
1544 		 */
1545 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1546 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1547 					TEE_MATTR_PW | TEE_MATTR_UW;
1548 			void *va = (void *)page_va;
1549 
1550 			/* Set a temporary read-only mapping */
1551 			area_set_entry(area, tblidx, pa, attr & ~mask);
1552 			area_tlbi_entry(area, tblidx);
1553 
1554 			dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1555 			if (clean_user_cache)
1556 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1557 			else
1558 				icache_inv_range(va, SMALL_PAGE_SIZE);
1559 
1560 			/* Set the final mapping */
1561 			area_set_entry(area, tblidx, pa, attr);
1562 			area_tlbi_entry(area, tblidx);
1563 		} else {
1564 			area_set_entry(area, tblidx, pa, attr);
1565 			/*
1566 			 * No need to flush TLB for this entry, it was
1567 			 * invalid. We should use a barrier though, to make
1568 			 * sure that the change is visible.
1569 			 */
1570 			dsb_ishst();
1571 		}
1572 		pgt_inc_used_entries(area->pgt);
1573 
1574 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1575 
1576 	}
1577 
1578 	tee_pager_hide_pages();
1579 	ret = true;
1580 out:
1581 	pager_unlock(exceptions);
1582 	return ret;
1583 }
1584 
1585 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1586 {
1587 	size_t n;
1588 
1589 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1590 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1591 
1592 	/* setup memory */
1593 	for (n = 0; n < npages; n++) {
1594 		struct core_mmu_table_info *ti;
1595 		struct tee_pager_pmem *pmem;
1596 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1597 		unsigned int pgidx;
1598 		paddr_t pa;
1599 		uint32_t attr;
1600 
1601 		ti = find_table_info(va);
1602 		pgidx = core_mmu_va2idx(ti, va);
1603 		/*
1604 		 * Note that we can only support adding pages in the
1605 		 * valid range of this table info, currently not a problem.
1606 		 */
1607 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1608 
1609 		/* Ignore unmapped pages/blocks */
1610 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1611 			continue;
1612 
1613 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1614 		if (!pmem)
1615 			panic("out of mem");
1616 
1617 		pmem->va_alias = pager_add_alias_page(pa);
1618 
1619 		if (unmap) {
1620 			pmem->fobj = NULL;
1621 			pmem->fobj_pgidx = INVALID_PGIDX;
1622 			core_mmu_set_entry(ti, pgidx, 0, 0);
1623 			pgt_dec_used_entries(find_core_pgt(va));
1624 		} else {
1625 			struct tee_pager_area *area = NULL;
1626 
1627 			/*
1628 			 * The page is still mapped, let's assign the area
1629 			 * and update the protection bits accordingly.
1630 			 */
1631 			area = find_area(&tee_pager_area_head, va);
1632 			assert(area && area->pgt == find_core_pgt(va));
1633 			pmem->fobj = area->fobj;
1634 			pmem->fobj_pgidx = pgidx + area->fobj_pgoffs -
1635 					   ((area->base &
1636 							CORE_MMU_PGDIR_MASK) >>
1637 						SMALL_PAGE_SHIFT);
1638 			assert(pgidx == pmem_get_area_tblidx(pmem, area));
1639 			assert(pa == get_pmem_pa(pmem));
1640 			area_set_entry(area, pgidx, pa,
1641 				       get_area_mattr(area->flags));
1642 		}
1643 
1644 		tee_pager_npages++;
1645 		incr_npages_all();
1646 		set_npages();
1647 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1648 	}
1649 
1650 	/*
1651 	 * As this is done at inits, invalidate all TLBs once instead of
1652 	 * targeting only the modified entries.
1653 	 */
1654 	tlbi_all();
1655 }
1656 
1657 #ifdef CFG_PAGED_USER_TA
1658 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1659 {
1660 	struct pgt *p = pgt;
1661 
1662 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1663 		p = SLIST_NEXT(p, link);
1664 	return p;
1665 }
1666 
1667 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1668 {
1669 	struct tee_pager_area *area = NULL;
1670 	struct pgt *pgt = NULL;
1671 
1672 	if (!uctx->areas)
1673 		return;
1674 
1675 	pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1676 	TAILQ_FOREACH(area, uctx->areas, link) {
1677 		if (!area->pgt)
1678 			area->pgt = find_pgt(pgt, area->base);
1679 		else
1680 			assert(area->pgt == find_pgt(pgt, area->base));
1681 		if (!area->pgt)
1682 			panic();
1683 	}
1684 }
1685 
1686 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1687 {
1688 	struct tee_pager_pmem *pmem = NULL;
1689 	struct tee_pager_area *area = NULL;
1690 	struct tee_pager_area_head *areas = NULL;
1691 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1692 
1693 	if (!pgt->num_used_entries)
1694 		goto out;
1695 
1696 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1697 		if (pmem->fobj)
1698 			pmem_unmap(pmem, pgt);
1699 	}
1700 	assert(!pgt->num_used_entries);
1701 
1702 out:
1703 	areas = to_user_ta_ctx(pgt->ctx)->uctx.areas;
1704 	if (areas) {
1705 		TAILQ_FOREACH(area, areas, link) {
1706 			if (area->pgt == pgt)
1707 				area->pgt = NULL;
1708 		}
1709 	}
1710 
1711 	pager_unlock(exceptions);
1712 }
1713 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1714 #endif /*CFG_PAGED_USER_TA*/
1715 
1716 void tee_pager_release_phys(void *addr, size_t size)
1717 {
1718 	bool unmaped = false;
1719 	vaddr_t va = (vaddr_t)addr;
1720 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1721 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1722 	struct tee_pager_area *area;
1723 	uint32_t exceptions;
1724 
1725 	if (end <= begin)
1726 		return;
1727 
1728 	exceptions = pager_lock_check_stack(128);
1729 
1730 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1731 		area = find_area(&tee_pager_area_head, va);
1732 		if (!area)
1733 			panic();
1734 		unmaped |= tee_pager_release_one_phys(area, va);
1735 	}
1736 
1737 	if (unmaped)
1738 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1739 
1740 	pager_unlock(exceptions);
1741 }
1742 KEEP_PAGER(tee_pager_release_phys);
1743 
1744 void *tee_pager_alloc(size_t size)
1745 {
1746 	tee_mm_entry_t *mm = NULL;
1747 	uint8_t *smem = NULL;
1748 	size_t num_pages = 0;
1749 	struct fobj *fobj = NULL;
1750 
1751 	if (!size)
1752 		return NULL;
1753 
1754 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1755 	if (!mm)
1756 		return NULL;
1757 
1758 	smem = (uint8_t *)tee_mm_get_smem(mm);
1759 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1760 	fobj = fobj_locked_paged_alloc(num_pages);
1761 	if (!fobj) {
1762 		tee_mm_free(mm);
1763 		return NULL;
1764 	}
1765 
1766 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1767 	fobj_put(fobj);
1768 
1769 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1770 
1771 	return smem;
1772 }
1773