xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision afe47fe821332bd6e462d9aefae0379b634c03d2)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/tlb_helpers.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <mm/core_memprot.h>
23 #include <mm/fobj.h>
24 #include <mm/tee_mm.h>
25 #include <mm/tee_pager.h>
26 #include <stdlib.h>
27 #include <sys/queue.h>
28 #include <tee_api_defines.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <utee_defines.h>
32 #include <util.h>
33 
34 
35 static struct tee_pager_area_head tee_pager_area_head =
36 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
37 
38 #define INVALID_PGIDX		UINT_MAX
39 #define PMEM_FLAG_DIRTY		BIT(0)
40 #define PMEM_FLAG_HIDDEN	BIT(1)
41 
42 /*
43  * struct tee_pager_pmem - Represents a physical page used for paging.
44  *
45  * @flags	flags defined by PMEM_FLAG_* above
46  * @fobj_pgidx	index of the page in the @fobj
47  * @fobj	File object of which a page is made visible.
48  * @va_alias	Virtual address where the physical page always is aliased.
49  *		Used during remapping of the page when the content need to
50  *		be updated before it's available at the new location.
51  */
52 struct tee_pager_pmem {
53 	unsigned int flags;
54 	unsigned int fobj_pgidx;
55 	struct fobj *fobj;
56 	void *va_alias;
57 	TAILQ_ENTRY(tee_pager_pmem) link;
58 };
59 
60 /* The list of physical pages. The first page in the list is the oldest */
61 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
62 
63 static struct tee_pager_pmem_head tee_pager_pmem_head =
64 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
65 
66 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
67 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
68 
69 /* number of pages hidden */
70 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
71 
72 /* Number of registered physical pages, used hiding pages. */
73 static size_t tee_pager_npages;
74 
75 #ifdef CFG_WITH_STATS
76 static struct tee_pager_stats pager_stats;
77 
78 static inline void incr_ro_hits(void)
79 {
80 	pager_stats.ro_hits++;
81 }
82 
83 static inline void incr_rw_hits(void)
84 {
85 	pager_stats.rw_hits++;
86 }
87 
88 static inline void incr_hidden_hits(void)
89 {
90 	pager_stats.hidden_hits++;
91 }
92 
93 static inline void incr_zi_released(void)
94 {
95 	pager_stats.zi_released++;
96 }
97 
98 static inline void incr_npages_all(void)
99 {
100 	pager_stats.npages_all++;
101 }
102 
103 static inline void set_npages(void)
104 {
105 	pager_stats.npages = tee_pager_npages;
106 }
107 
108 void tee_pager_get_stats(struct tee_pager_stats *stats)
109 {
110 	*stats = pager_stats;
111 
112 	pager_stats.hidden_hits = 0;
113 	pager_stats.ro_hits = 0;
114 	pager_stats.rw_hits = 0;
115 	pager_stats.zi_released = 0;
116 }
117 
118 #else /* CFG_WITH_STATS */
119 static inline void incr_ro_hits(void) { }
120 static inline void incr_rw_hits(void) { }
121 static inline void incr_hidden_hits(void) { }
122 static inline void incr_zi_released(void) { }
123 static inline void incr_npages_all(void) { }
124 static inline void set_npages(void) { }
125 
126 void tee_pager_get_stats(struct tee_pager_stats *stats)
127 {
128 	memset(stats, 0, sizeof(struct tee_pager_stats));
129 }
130 #endif /* CFG_WITH_STATS */
131 
132 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
133 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
134 #define TBL_SHIFT	SMALL_PAGE_SHIFT
135 
136 #define EFFECTIVE_VA_SIZE \
137 	(ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \
138 	 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE))
139 
140 static struct pager_table {
141 	struct pgt pgt;
142 	struct core_mmu_table_info tbl_info;
143 } *pager_tables;
144 static unsigned int num_pager_tables;
145 
146 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
147 
148 /* Defines the range of the alias area */
149 static tee_mm_entry_t *pager_alias_area;
150 /*
151  * Physical pages are added in a stack like fashion to the alias area,
152  * @pager_alias_next_free gives the address of next free entry if
153  * @pager_alias_next_free is != 0
154  */
155 static uintptr_t pager_alias_next_free;
156 
157 #ifdef CFG_TEE_CORE_DEBUG
158 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
159 
160 static uint32_t pager_lock_dldetect(const char *func, const int line,
161 				    struct abort_info *ai)
162 {
163 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
164 	unsigned int retries = 0;
165 	unsigned int reminder = 0;
166 
167 	while (!cpu_spin_trylock(&pager_spinlock)) {
168 		retries++;
169 		if (!retries) {
170 			/* wrapped, time to report */
171 			trace_printf(func, line, TRACE_ERROR, true,
172 				     "possible spinlock deadlock reminder %u",
173 				     reminder);
174 			if (reminder < UINT_MAX)
175 				reminder++;
176 			if (ai)
177 				abort_print(ai);
178 		}
179 	}
180 
181 	return exceptions;
182 }
183 #else
184 static uint32_t pager_lock(struct abort_info __unused *ai)
185 {
186 	return cpu_spin_lock_xsave(&pager_spinlock);
187 }
188 #endif
189 
190 static uint32_t pager_lock_check_stack(size_t stack_size)
191 {
192 	if (stack_size) {
193 		int8_t buf[stack_size];
194 		size_t n;
195 
196 		/*
197 		 * Make sure to touch all pages of the stack that we expect
198 		 * to use with this lock held. We need to take eventual
199 		 * page faults before the lock is taken or we'll deadlock
200 		 * the pager. The pages that are populated in this way will
201 		 * eventually be released at certain save transitions of
202 		 * the thread.
203 		 */
204 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
205 			io_write8((vaddr_t)buf + n, 1);
206 		io_write8((vaddr_t)buf + stack_size - 1, 1);
207 	}
208 
209 	return pager_lock(NULL);
210 }
211 
212 static void pager_unlock(uint32_t exceptions)
213 {
214 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
215 }
216 
217 void *tee_pager_phys_to_virt(paddr_t pa)
218 {
219 	struct core_mmu_table_info ti;
220 	unsigned idx;
221 	uint32_t a;
222 	paddr_t p;
223 	vaddr_t v;
224 	size_t n;
225 
226 	/*
227 	 * Most addresses are mapped lineary, try that first if possible.
228 	 */
229 	if (!tee_pager_get_table_info(pa, &ti))
230 		return NULL; /* impossible pa */
231 	idx = core_mmu_va2idx(&ti, pa);
232 	core_mmu_get_entry(&ti, idx, &p, &a);
233 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
234 		return (void *)core_mmu_idx2va(&ti, idx);
235 
236 	n = 0;
237 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
238 	while (true) {
239 		while (idx < TBL_NUM_ENTRIES) {
240 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
241 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
242 				return NULL;
243 
244 			core_mmu_get_entry(&pager_tables[n].tbl_info,
245 					   idx, &p, &a);
246 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
247 				return (void *)v;
248 			idx++;
249 		}
250 
251 		n++;
252 		if (n >= num_pager_tables)
253 			return NULL;
254 		idx = 0;
255 	}
256 
257 	return NULL;
258 }
259 
260 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
261 {
262 	return pmem->flags & PMEM_FLAG_HIDDEN;
263 }
264 
265 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
266 {
267 	return pmem->flags & PMEM_FLAG_DIRTY;
268 }
269 
270 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
271 				    struct tee_pager_area *area)
272 {
273 	if (pmem->fobj != area->fobj)
274 		return false;
275 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
276 		return false;
277 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
278 	    (area->size >> SMALL_PAGE_SHIFT))
279 		return false;
280 
281 	return true;
282 }
283 
284 static size_t pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
285 				   struct tee_pager_area *area)
286 {
287 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
288 
289 	return pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
290 }
291 
292 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
293 {
294 	size_t n;
295 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
296 
297 	if (!pager_tables)
298 		return NULL;
299 
300 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
301 	    CORE_MMU_PGDIR_SHIFT;
302 	if (n >= num_pager_tables)
303 		return NULL;
304 
305 	assert(va >= pager_tables[n].tbl_info.va_base &&
306 	       va <= (pager_tables[n].tbl_info.va_base | mask));
307 
308 	return pager_tables + n;
309 }
310 
311 static struct pager_table *find_pager_table(vaddr_t va)
312 {
313 	struct pager_table *pt = find_pager_table_may_fail(va);
314 
315 	assert(pt);
316 	return pt;
317 }
318 
319 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
320 {
321 	struct pager_table *pt = find_pager_table_may_fail(va);
322 
323 	if (!pt)
324 		return false;
325 
326 	*ti = pt->tbl_info;
327 	return true;
328 }
329 
330 static struct core_mmu_table_info *find_table_info(vaddr_t va)
331 {
332 	return &find_pager_table(va)->tbl_info;
333 }
334 
335 static struct pgt *find_core_pgt(vaddr_t va)
336 {
337 	return &find_pager_table(va)->pgt;
338 }
339 
340 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
341 {
342 	struct pager_table *pt;
343 	unsigned idx;
344 	vaddr_t smem = tee_mm_get_smem(mm);
345 	size_t nbytes = tee_mm_get_bytes(mm);
346 	vaddr_t v;
347 	uint32_t a = 0;
348 
349 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
350 
351 	assert(!pager_alias_area);
352 	pager_alias_area = mm;
353 	pager_alias_next_free = smem;
354 
355 	/* Clear all mapping in the alias area */
356 	pt = find_pager_table(smem);
357 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
358 	while (pt <= (pager_tables + num_pager_tables - 1)) {
359 		while (idx < TBL_NUM_ENTRIES) {
360 			v = core_mmu_idx2va(&pt->tbl_info, idx);
361 			if (v >= (smem + nbytes))
362 				goto out;
363 
364 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
365 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
366 			if (a & TEE_MATTR_VALID_BLOCK)
367 				pgt_dec_used_entries(&pt->pgt);
368 			idx++;
369 		}
370 
371 		pt++;
372 		idx = 0;
373 	}
374 
375 out:
376 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
377 }
378 
379 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
380 {
381 	size_t n;
382 	uint32_t a = 0;
383 	size_t usage = 0;
384 
385 	for (n = 0; n < ti->num_entries; n++) {
386 		core_mmu_get_entry(ti, n, NULL, &a);
387 		if (a & TEE_MATTR_VALID_BLOCK)
388 			usage++;
389 	}
390 	return usage;
391 }
392 
393 static void area_get_entry(struct tee_pager_area *area, size_t idx,
394 			   paddr_t *pa, uint32_t *attr)
395 {
396 	assert(area->pgt);
397 	assert(idx < TBL_NUM_ENTRIES);
398 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
399 }
400 
401 static void area_set_entry(struct tee_pager_area *area, size_t idx,
402 			   paddr_t pa, uint32_t attr)
403 {
404 	assert(area->pgt);
405 	assert(idx < TBL_NUM_ENTRIES);
406 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
407 }
408 
409 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
410 {
411 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
412 }
413 
414 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
415 {
416 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
417 }
418 
419 static void area_tlbi_page_va(struct tee_pager_area *area, vaddr_t va)
420 {
421 #if defined(CFG_PAGED_USER_TA)
422 	assert(area->pgt);
423 	if (area->pgt->ctx) {
424 		uint32_t asid = to_user_mode_ctx(area->pgt->ctx)->vm_info.asid;
425 
426 		tlbi_mva_asid(va, asid);
427 		return;
428 	}
429 #endif
430 	tlbi_mva_allasid(va);
431 }
432 
433 static void area_tlbi_entry(struct tee_pager_area *area, size_t idx)
434 {
435 	area_tlbi_page_va(area, area_idx2va(area, idx));
436 }
437 
438 static void pmem_clear(struct tee_pager_pmem *pmem)
439 {
440 	pmem->fobj = NULL;
441 	pmem->fobj_pgidx = INVALID_PGIDX;
442 	pmem->flags = 0;
443 }
444 
445 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
446 {
447 	struct tee_pager_area *area = NULL;
448 	size_t tblidx = 0;
449 	uint32_t a = 0;
450 
451 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
452 		/*
453 		 * If only_this_pgt points to a pgt then the pgt of this
454 		 * area has to match or we'll skip over it.
455 		 */
456 		if (only_this_pgt && area->pgt != only_this_pgt)
457 			continue;
458 		if (!area->pgt || !pmem_is_covered_by_area(pmem, area))
459 			continue;
460 		tblidx = pmem_get_area_tblidx(pmem, area);
461 		area_get_entry(area, tblidx, NULL, &a);
462 		if (a & TEE_MATTR_VALID_BLOCK) {
463 			area_set_entry(area, tblidx, 0, 0);
464 			pgt_dec_used_entries(area->pgt);
465 			area_tlbi_entry(area, tblidx);
466 		}
467 	}
468 }
469 
470 void tee_pager_early_init(void)
471 {
472 	size_t n = 0;
473 
474 	num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE;
475 	pager_tables = calloc(num_pager_tables, sizeof(*pager_tables));
476 	if (!pager_tables)
477 		panic("Cannot allocate pager_tables");
478 
479 	/*
480 	 * Note that this depends on add_pager_vaspace() adding vaspace
481 	 * after end of memory.
482 	 */
483 	for (n = 0; n < num_pager_tables; n++) {
484 		if (!core_mmu_find_table(NULL, VCORE_START_VA +
485 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
486 					 &pager_tables[n].tbl_info))
487 			panic("can't find mmu tables");
488 
489 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
490 			panic("Unsupported page size in translation table");
491 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
492 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
493 
494 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
495 		pgt_set_used_entries(&pager_tables[n].pgt,
496 				tbl_usage_count(&pager_tables[n].tbl_info));
497 	}
498 }
499 
500 static void *pager_add_alias_page(paddr_t pa)
501 {
502 	unsigned idx;
503 	struct core_mmu_table_info *ti;
504 	/* Alias pages mapped without write permission: runtime will care */
505 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
506 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
507 			TEE_MATTR_SECURE | TEE_MATTR_PR;
508 
509 	DMSG("0x%" PRIxPA, pa);
510 
511 	ti = find_table_info(pager_alias_next_free);
512 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
513 	core_mmu_set_entry(ti, idx, pa, attr);
514 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
515 	pager_alias_next_free += SMALL_PAGE_SIZE;
516 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
517 				      tee_mm_get_bytes(pager_alias_area)))
518 		pager_alias_next_free = 0;
519 	return (void *)core_mmu_idx2va(ti, idx);
520 }
521 
522 static void area_insert(struct tee_pager_area_head *head,
523 			struct tee_pager_area *area,
524 			struct tee_pager_area *a_prev)
525 {
526 	uint32_t exceptions = pager_lock_check_stack(8);
527 
528 	if (a_prev)
529 		TAILQ_INSERT_AFTER(head, a_prev, area, link);
530 	else
531 		TAILQ_INSERT_HEAD(head, area, link);
532 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
533 
534 	pager_unlock(exceptions);
535 }
536 DECLARE_KEEP_PAGER(area_insert);
537 
538 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
539 			     struct fobj *fobj)
540 {
541 	struct tee_pager_area *area = NULL;
542 	uint32_t flags = 0;
543 	size_t fobj_pgoffs = 0;
544 	vaddr_t b = base;
545 	size_t s = 0;
546 	size_t s2 = 0;
547 
548 	assert(fobj);
549 	s = fobj->num_pages * SMALL_PAGE_SIZE;
550 
551 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
552 
553 	if (base & SMALL_PAGE_MASK || !s) {
554 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
555 		panic();
556 	}
557 
558 	switch (type) {
559 	case PAGER_AREA_TYPE_RO:
560 		flags = TEE_MATTR_PRX;
561 		break;
562 	case PAGER_AREA_TYPE_RW:
563 	case PAGER_AREA_TYPE_LOCK:
564 		flags = TEE_MATTR_PRW;
565 		break;
566 	default:
567 		panic();
568 	}
569 
570 	while (s) {
571 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
572 		area = calloc(1, sizeof(*area));
573 		if (!area)
574 			panic("alloc_area");
575 
576 		area->fobj = fobj_get(fobj);
577 		area->fobj_pgoffs = fobj_pgoffs;
578 		area->type = type;
579 		area->pgt = find_core_pgt(b);
580 		area->base = b;
581 		area->size = s2;
582 		area->flags = flags;
583 		area_insert(&tee_pager_area_head, area, NULL);
584 
585 		b += s2;
586 		s -= s2;
587 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
588 	}
589 }
590 
591 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
592 					vaddr_t va)
593 {
594 	struct tee_pager_area *area;
595 
596 	if (!areas)
597 		return NULL;
598 
599 	TAILQ_FOREACH(area, areas, link) {
600 		if (core_is_buffer_inside(va, 1, area->base, area->size))
601 			return area;
602 	}
603 	return NULL;
604 }
605 
606 #ifdef CFG_PAGED_USER_TA
607 static struct tee_pager_area *find_uta_area(vaddr_t va)
608 {
609 	struct ts_ctx *ctx = thread_get_tsd()->ctx;
610 
611 	if (!is_user_mode_ctx(ctx))
612 		return NULL;
613 	return find_area(to_user_mode_ctx(ctx)->areas, va);
614 }
615 #else
616 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
617 {
618 	return NULL;
619 }
620 #endif /*CFG_PAGED_USER_TA*/
621 
622 
623 static uint32_t get_area_mattr(uint32_t area_flags)
624 {
625 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
626 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
627 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
628 
629 	return attr;
630 }
631 
632 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
633 {
634 	struct core_mmu_table_info *ti;
635 	paddr_t pa;
636 	unsigned idx;
637 
638 	ti = find_table_info((vaddr_t)pmem->va_alias);
639 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
640 	core_mmu_get_entry(ti, idx, &pa, NULL);
641 	return pa;
642 }
643 
644 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
645 			void *va_alias)
646 {
647 	size_t fobj_pgoffs = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
648 			     area->fobj_pgoffs;
649 	struct core_mmu_table_info *ti;
650 	uint32_t attr_alias;
651 	paddr_t pa_alias;
652 	unsigned int idx_alias;
653 
654 	/* Insure we are allowed to write to aliased virtual page */
655 	ti = find_table_info((vaddr_t)va_alias);
656 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
657 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
658 	if (!(attr_alias & TEE_MATTR_PW)) {
659 		attr_alias |= TEE_MATTR_PW;
660 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
661 		tlbi_mva_allasid((vaddr_t)va_alias);
662 	}
663 
664 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
665 	if (fobj_load_page(area->fobj, fobj_pgoffs, va_alias)) {
666 		EMSG("PH 0x%" PRIxVA " failed", page_va);
667 		panic();
668 	}
669 	switch (area->type) {
670 	case PAGER_AREA_TYPE_RO:
671 		incr_ro_hits();
672 		/* Forbid write to aliases for read-only (maybe exec) pages */
673 		attr_alias &= ~TEE_MATTR_PW;
674 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
675 		tlbi_mva_allasid((vaddr_t)va_alias);
676 		break;
677 	case PAGER_AREA_TYPE_RW:
678 		incr_rw_hits();
679 		break;
680 	case PAGER_AREA_TYPE_LOCK:
681 		break;
682 	default:
683 		panic();
684 	}
685 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
686 }
687 
688 static void tee_pager_save_page(struct tee_pager_pmem *pmem)
689 {
690 	if (pmem_is_dirty(pmem)) {
691 		asan_tag_access(pmem->va_alias,
692 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
693 		if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
694 				   pmem->va_alias))
695 			panic("fobj_save_page");
696 		asan_tag_no_access(pmem->va_alias,
697 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
698 	}
699 }
700 
701 #ifdef CFG_PAGED_USER_TA
702 static void unlink_area(struct tee_pager_area_head *area_head,
703 			struct tee_pager_area *area)
704 {
705 	uint32_t exceptions = pager_lock_check_stack(64);
706 
707 	TAILQ_REMOVE(area_head, area, link);
708 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
709 
710 	pager_unlock(exceptions);
711 }
712 DECLARE_KEEP_PAGER(unlink_area);
713 
714 static void free_area(struct tee_pager_area *area)
715 {
716 	fobj_put(area->fobj);
717 	free(area);
718 }
719 
720 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
721 				    struct fobj *fobj, uint32_t prot)
722 {
723 	struct tee_pager_area *a_prev = NULL;
724 	struct tee_pager_area *area = NULL;
725 	vaddr_t b = base;
726 	size_t fobj_pgoffs = 0;
727 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
728 
729 	if (!uctx->areas) {
730 		uctx->areas = malloc(sizeof(*uctx->areas));
731 		if (!uctx->areas)
732 			return TEE_ERROR_OUT_OF_MEMORY;
733 		TAILQ_INIT(uctx->areas);
734 	}
735 
736 	area = TAILQ_FIRST(uctx->areas);
737 	while (area) {
738 		if (core_is_buffer_intersect(b, s, area->base,
739 					     area->size))
740 			return TEE_ERROR_BAD_PARAMETERS;
741 		if (b < area->base)
742 			break;
743 		a_prev = area;
744 		area = TAILQ_NEXT(area, link);
745 	}
746 
747 	while (s) {
748 		size_t s2;
749 
750 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
751 		area = calloc(1, sizeof(*area));
752 		if (!area)
753 			return TEE_ERROR_OUT_OF_MEMORY;
754 
755 		/* Table info will be set when the context is activated. */
756 		area->fobj = fobj_get(fobj);
757 		area->fobj_pgoffs = fobj_pgoffs;
758 		area->type = PAGER_AREA_TYPE_RW;
759 		area->base = b;
760 		area->size = s2;
761 		area->flags = prot;
762 
763 		area_insert(uctx->areas, area, a_prev);
764 
765 		a_prev = area;
766 		b += s2;
767 		s -= s2;
768 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
769 	}
770 
771 	return TEE_SUCCESS;
772 }
773 
774 TEE_Result tee_pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
775 				 struct fobj *fobj, uint32_t prot)
776 {
777 	TEE_Result res = TEE_SUCCESS;
778 	struct thread_specific_data *tsd = thread_get_tsd();
779 	struct tee_pager_area *area = NULL;
780 	struct core_mmu_table_info dir_info = { NULL };
781 
782 	if (uctx->ts_ctx != tsd->ctx) {
783 		/*
784 		 * Changes are to an utc that isn't active. Just add the
785 		 * areas page tables will be dealt with later.
786 		 */
787 		return pager_add_um_area(uctx, base, fobj, prot);
788 	}
789 
790 	/*
791 	 * Assign page tables before adding areas to be able to tell which
792 	 * are newly added and should be removed in case of failure.
793 	 */
794 	tee_pager_assign_um_tables(uctx);
795 	res = pager_add_um_area(uctx, base, fobj, prot);
796 	if (res) {
797 		struct tee_pager_area *next_a;
798 
799 		/* Remove all added areas */
800 		TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
801 			if (!area->pgt) {
802 				unlink_area(uctx->areas, area);
803 				free_area(area);
804 			}
805 		}
806 		return res;
807 	}
808 
809 	/*
810 	 * Assign page tables to the new areas and make sure that the page
811 	 * tables are registered in the upper table.
812 	 */
813 	tee_pager_assign_um_tables(uctx);
814 	core_mmu_get_user_pgdir(&dir_info);
815 	TAILQ_FOREACH(area, uctx->areas, link) {
816 		paddr_t pa;
817 		size_t idx;
818 		uint32_t attr;
819 
820 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
821 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
822 
823 		/*
824 		 * Check if the page table already is used, if it is, it's
825 		 * already registered.
826 		 */
827 		if (area->pgt->num_used_entries) {
828 			assert(attr & TEE_MATTR_TABLE);
829 			assert(pa == virt_to_phys(area->pgt->tbl));
830 			continue;
831 		}
832 
833 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
834 		pa = virt_to_phys(area->pgt->tbl);
835 		assert(pa);
836 		/*
837 		 * Note that the update of the table entry is guaranteed to
838 		 * be atomic.
839 		 */
840 		core_mmu_set_entry(&dir_info, idx, pa, attr);
841 	}
842 
843 	return TEE_SUCCESS;
844 }
845 
846 static void split_area(struct tee_pager_area_head *area_head,
847 		       struct tee_pager_area *area, struct tee_pager_area *a2,
848 		       vaddr_t va)
849 {
850 	uint32_t exceptions = pager_lock_check_stack(64);
851 	size_t diff = va - area->base;
852 
853 	a2->fobj = fobj_get(area->fobj);
854 	a2->fobj_pgoffs = area->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
855 	a2->type = area->type;
856 	a2->flags = area->flags;
857 	a2->base = va;
858 	a2->size = area->size - diff;
859 	a2->pgt = area->pgt;
860 	area->size = diff;
861 
862 	TAILQ_INSERT_AFTER(area_head, area, a2, link);
863 	TAILQ_INSERT_AFTER(&area->fobj->areas, area, a2, fobj_link);
864 
865 	pager_unlock(exceptions);
866 }
867 DECLARE_KEEP_PAGER(split_area);
868 
869 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
870 {
871 	struct tee_pager_area *area = NULL;
872 	struct tee_pager_area *a2 = NULL;
873 
874 	if (va & SMALL_PAGE_MASK)
875 		return TEE_ERROR_BAD_PARAMETERS;
876 
877 	TAILQ_FOREACH(area, uctx->areas, link) {
878 		if (va == area->base || va == area->base + area->size)
879 			return TEE_SUCCESS;
880 		if (va > area->base && va < area->base + area->size) {
881 			a2 = calloc(1, sizeof(*a2));
882 			if (!a2)
883 				return TEE_ERROR_OUT_OF_MEMORY;
884 			split_area(uctx->areas, area, a2, va);
885 			return TEE_SUCCESS;
886 		}
887 	}
888 
889 	return TEE_SUCCESS;
890 }
891 
892 static void merge_area_with_next(struct tee_pager_area_head *area_head,
893 				 struct tee_pager_area *a,
894 				 struct tee_pager_area *a_next)
895 {
896 	uint32_t exceptions = pager_lock_check_stack(64);
897 
898 	TAILQ_REMOVE(area_head, a_next, link);
899 	TAILQ_REMOVE(&a_next->fobj->areas, a_next, fobj_link);
900 	a->size += a_next->size;
901 
902 	pager_unlock(exceptions);
903 }
904 DECLARE_KEEP_PAGER(merge_area_with_next);
905 
906 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
907 			       size_t len)
908 {
909 	struct tee_pager_area *a_next = NULL;
910 	struct tee_pager_area *a = NULL;
911 	vaddr_t end_va = 0;
912 
913 	if ((va | len) & SMALL_PAGE_MASK)
914 		return;
915 	if (ADD_OVERFLOW(va, len, &end_va))
916 		return;
917 
918 	for (a = TAILQ_FIRST(uctx->areas);; a = a_next) {
919 		a_next = TAILQ_NEXT(a, link);
920 		if (!a_next)
921 			return;
922 
923 		/* Try merging with the area just before va */
924 		if (a->base + a->size < va)
925 			continue;
926 
927 		/*
928 		 * If a->base is well past our range we're done.
929 		 * Note that if it's just the page after our range we'll
930 		 * try to merge.
931 		 */
932 		if (a->base > end_va)
933 			return;
934 
935 		if (a->base + a->size != a_next->base)
936 			continue;
937 		if (a->fobj != a_next->fobj || a->type != a_next->type ||
938 		    a->flags != a_next->flags || a->pgt != a_next->pgt)
939 			continue;
940 		if (a->fobj_pgoffs + a->size / SMALL_PAGE_SIZE !=
941 		    a_next->fobj_pgoffs)
942 			continue;
943 
944 		merge_area_with_next(uctx->areas, a, a_next);
945 		free_area(a_next);
946 		a_next = a;
947 	}
948 }
949 
950 static void rem_area(struct tee_pager_area_head *area_head,
951 		     struct tee_pager_area *area)
952 {
953 	struct tee_pager_pmem *pmem;
954 	size_t last_pgoffs = area->fobj_pgoffs +
955 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
956 	uint32_t exceptions;
957 	size_t idx = 0;
958 	uint32_t a = 0;
959 
960 	exceptions = pager_lock_check_stack(64);
961 
962 	TAILQ_REMOVE(area_head, area, link);
963 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
964 
965 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
966 		if (pmem->fobj != area->fobj ||
967 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
968 		    pmem->fobj_pgidx > last_pgoffs)
969 			continue;
970 
971 		idx = pmem_get_area_tblidx(pmem, area);
972 		area_get_entry(area, idx, NULL, &a);
973 		if (!(a & TEE_MATTR_VALID_BLOCK))
974 			continue;
975 
976 		area_set_entry(area, idx, 0, 0);
977 		area_tlbi_entry(area, idx);
978 		pgt_dec_used_entries(area->pgt);
979 	}
980 
981 	pager_unlock(exceptions);
982 
983 	free_area(area);
984 }
985 DECLARE_KEEP_PAGER(rem_area);
986 
987 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
988 			     size_t size)
989 {
990 	struct tee_pager_area *area;
991 	struct tee_pager_area *next_a;
992 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
993 
994 	TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
995 		if (core_is_buffer_inside(area->base, area->size, base, s))
996 			rem_area(uctx->areas, area);
997 	}
998 	tlbi_asid(uctx->vm_info.asid);
999 }
1000 
1001 void tee_pager_rem_um_areas(struct user_mode_ctx *uctx)
1002 {
1003 	struct tee_pager_area *area = NULL;
1004 
1005 	if (!uctx->areas)
1006 		return;
1007 
1008 	while (true) {
1009 		area = TAILQ_FIRST(uctx->areas);
1010 		if (!area)
1011 			break;
1012 		unlink_area(uctx->areas, area);
1013 		free_area(area);
1014 	}
1015 
1016 	free(uctx->areas);
1017 }
1018 
1019 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
1020 {
1021 	struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas);
1022 	void *ctx = a->pgt->ctx;
1023 
1024 	do {
1025 		a = TAILQ_NEXT(a, fobj_link);
1026 		if (!a)
1027 			return true;
1028 	} while (a->pgt->ctx == ctx);
1029 
1030 	return false;
1031 }
1032 
1033 bool tee_pager_set_um_area_attr(struct user_mode_ctx *uctx, vaddr_t base,
1034 				size_t size, uint32_t flags)
1035 {
1036 	bool ret = false;
1037 	vaddr_t b = base;
1038 	size_t s = size;
1039 	size_t s2 = 0;
1040 	struct tee_pager_area *area = find_area(uctx->areas, b);
1041 	uint32_t exceptions = 0;
1042 	struct tee_pager_pmem *pmem = NULL;
1043 	uint32_t a = 0;
1044 	uint32_t f = 0;
1045 	uint32_t mattr = 0;
1046 	uint32_t f2 = 0;
1047 	size_t tblidx = 0;
1048 
1049 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1050 	if (f & TEE_MATTR_UW)
1051 		f |= TEE_MATTR_PW;
1052 	mattr = get_area_mattr(f);
1053 
1054 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1055 
1056 	while (s) {
1057 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1058 		if (!area || area->base != b || area->size != s2) {
1059 			ret = false;
1060 			goto out;
1061 		}
1062 		b += s2;
1063 		s -= s2;
1064 
1065 		if (area->flags == f)
1066 			goto next_area;
1067 
1068 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1069 			if (!pmem_is_covered_by_area(pmem, area))
1070 				continue;
1071 
1072 			tblidx = pmem_get_area_tblidx(pmem, area);
1073 			area_get_entry(area, tblidx, NULL, &a);
1074 			if (a == f)
1075 				continue;
1076 			area_set_entry(area, tblidx, 0, 0);
1077 			area_tlbi_entry(area, tblidx);
1078 
1079 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1080 			if (pmem_is_dirty(pmem))
1081 				f2 = mattr;
1082 			else
1083 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
1084 			area_set_entry(area, tblidx, get_pmem_pa(pmem), f2);
1085 			if (!(a & TEE_MATTR_VALID_BLOCK))
1086 				pgt_inc_used_entries(area->pgt);
1087 			/*
1088 			 * Make sure the table update is visible before
1089 			 * continuing.
1090 			 */
1091 			dsb_ishst();
1092 
1093 			/*
1094 			 * Here's a problem if this page already is shared.
1095 			 * We need do icache invalidate for each context
1096 			 * in which it is shared. In practice this will
1097 			 * never happen.
1098 			 */
1099 			if (flags & TEE_MATTR_UX) {
1100 				void *va = (void *)area_idx2va(area, tblidx);
1101 
1102 				/* Assert that the pmem isn't shared. */
1103 				assert(same_context(pmem));
1104 
1105 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1106 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1107 			}
1108 		}
1109 
1110 		area->flags = f;
1111 next_area:
1112 		area = TAILQ_NEXT(area, link);
1113 	}
1114 
1115 	ret = true;
1116 out:
1117 	pager_unlock(exceptions);
1118 	return ret;
1119 }
1120 
1121 DECLARE_KEEP_PAGER(tee_pager_set_um_area_attr);
1122 #endif /*CFG_PAGED_USER_TA*/
1123 
1124 void tee_pager_invalidate_fobj(struct fobj *fobj)
1125 {
1126 	struct tee_pager_pmem *pmem;
1127 	uint32_t exceptions;
1128 
1129 	exceptions = pager_lock_check_stack(64);
1130 
1131 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1132 		if (pmem->fobj == fobj)
1133 			pmem_clear(pmem);
1134 
1135 	pager_unlock(exceptions);
1136 }
1137 DECLARE_KEEP_PAGER(tee_pager_invalidate_fobj);
1138 
1139 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area,
1140 					unsigned int tblidx)
1141 {
1142 	struct tee_pager_pmem *pmem = NULL;
1143 
1144 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1145 		if (pmem->fobj == area->fobj &&
1146 		    pmem_get_area_tblidx(pmem, area) == tblidx)
1147 			return pmem;
1148 
1149 	return NULL;
1150 }
1151 
1152 static bool tee_pager_unhide_page(struct tee_pager_area *area, vaddr_t page_va)
1153 {
1154 	unsigned int tblidx = area_va2idx(area, page_va);
1155 	struct tee_pager_pmem *pmem = pmem_find(area, tblidx);
1156 	uint32_t a = get_area_mattr(area->flags);
1157 	uint32_t attr = 0;
1158 	paddr_t pa = 0;
1159 
1160 	if (!pmem)
1161 		return false;
1162 
1163 	area_get_entry(area, tblidx, NULL, &attr);
1164 	if (attr & TEE_MATTR_VALID_BLOCK)
1165 		return false;
1166 
1167 	/*
1168 	 * The page is hidden, or not not mapped yet. Unhide the page and
1169 	 * move it to the tail.
1170 	 *
1171 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1172 	 * for this address, so no TLB invalidation is required after setting
1173 	 * the new entry. A DSB is needed though, to make the write visible.
1174 	 *
1175 	 * For user executable pages it's more complicated. Those pages can
1176 	 * be shared between multiple TA mappings and thus populated by
1177 	 * another TA. The reference manual states that:
1178 	 *
1179 	 * "instruction cache maintenance is required only after writing
1180 	 * new data to a physical address that holds an instruction."
1181 	 *
1182 	 * So for hidden pages we would not need to invalidate i-cache, but
1183 	 * for newly populated pages we do. Since we don't know which we
1184 	 * have to assume the worst and always invalidate the i-cache. We
1185 	 * don't need to clean the d-cache though, since that has already
1186 	 * been done earlier.
1187 	 *
1188 	 * Additional bookkeeping to tell if the i-cache invalidation is
1189 	 * needed or not is left as a future optimization.
1190 	 */
1191 
1192 	/* If it's not a dirty block, then it should be read only. */
1193 	if (!pmem_is_dirty(pmem))
1194 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1195 
1196 	pa = get_pmem_pa(pmem);
1197 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1198 	if (area->flags & TEE_MATTR_UX) {
1199 		void *va = (void *)area_idx2va(area, tblidx);
1200 
1201 		/* Set a temporary read-only mapping */
1202 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1203 		area_set_entry(area, tblidx, pa, a & ~TEE_MATTR_UX);
1204 		dsb_ishst();
1205 
1206 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1207 
1208 		/* Set the final mapping */
1209 		area_set_entry(area, tblidx, pa, a);
1210 		area_tlbi_entry(area, tblidx);
1211 	} else {
1212 		area_set_entry(area, tblidx, pa, a);
1213 		dsb_ishst();
1214 	}
1215 	pgt_inc_used_entries(area->pgt);
1216 
1217 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1218 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1219 	incr_hidden_hits();
1220 	return true;
1221 }
1222 
1223 static void tee_pager_hide_pages(void)
1224 {
1225 	struct tee_pager_pmem *pmem = NULL;
1226 	size_t n = 0;
1227 
1228 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1229 		if (n >= TEE_PAGER_NHIDE)
1230 			break;
1231 		n++;
1232 
1233 		/* we cannot hide pages when pmem->fobj is not defined. */
1234 		if (!pmem->fobj)
1235 			continue;
1236 
1237 		if (pmem_is_hidden(pmem))
1238 			continue;
1239 
1240 		pmem->flags |= PMEM_FLAG_HIDDEN;
1241 		pmem_unmap(pmem, NULL);
1242 	}
1243 }
1244 
1245 static unsigned int __maybe_unused
1246 num_areas_with_pmem(struct tee_pager_pmem *pmem)
1247 {
1248 	struct tee_pager_area *a = NULL;
1249 	unsigned int num_matches = 0;
1250 
1251 	TAILQ_FOREACH(a, &pmem->fobj->areas, fobj_link)
1252 		if (pmem_is_covered_by_area(pmem, a))
1253 			num_matches++;
1254 
1255 	return num_matches;
1256 }
1257 
1258 /*
1259  * Find mapped pmem, hide and move to pageble pmem.
1260  * Return false if page was not mapped, and true if page was mapped.
1261  */
1262 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1263 				       vaddr_t page_va)
1264 {
1265 	struct tee_pager_pmem *pmem;
1266 	size_t tblidx = 0;
1267 	size_t pgidx = area_va2idx(area, page_va) + area->fobj_pgoffs -
1268 		       ((area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT);
1269 
1270 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1271 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != pgidx)
1272 			continue;
1273 
1274 		/*
1275 		 * Locked pages may not be shared. We're asserting that the
1276 		 * number of areas using this pmem is one and only one as
1277 		 * we're about to unmap it.
1278 		 */
1279 		assert(num_areas_with_pmem(pmem) == 1);
1280 
1281 		tblidx = pmem_get_area_tblidx(pmem, area);
1282 		area_set_entry(area, tblidx, 0, 0);
1283 		pgt_dec_used_entries(area->pgt);
1284 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1285 		pmem_clear(pmem);
1286 		tee_pager_npages++;
1287 		set_npages();
1288 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1289 		incr_zi_released();
1290 		return true;
1291 	}
1292 
1293 	return false;
1294 }
1295 
1296 static void make_dirty_page(struct tee_pager_pmem *pmem,
1297 			    struct tee_pager_area *area, unsigned int tblidx,
1298 			    paddr_t pa, vaddr_t page_va)
1299 {
1300 	assert(area->flags & (TEE_MATTR_UW | TEE_MATTR_PW));
1301 	assert(!(pmem->flags & PMEM_FLAG_DIRTY));
1302 
1303 	FMSG("Dirty %#"PRIxVA, page_va);
1304 	pmem->flags |= PMEM_FLAG_DIRTY;
1305 	area_set_entry(area, tblidx, pa, get_area_mattr(area->flags));
1306 	area_tlbi_page_va(area, page_va);
1307 }
1308 
1309 /* Finds the oldest page and unmaps it from all tables */
1310 static struct tee_pager_pmem *tee_pager_get_page(enum tee_pager_area_type at)
1311 {
1312 	struct tee_pager_pmem *pmem;
1313 
1314 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1315 	if (!pmem) {
1316 		EMSG("No pmem entries");
1317 		return NULL;
1318 	}
1319 
1320 	if (pmem->fobj) {
1321 		pmem_unmap(pmem, NULL);
1322 		tee_pager_save_page(pmem);
1323 	}
1324 
1325 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1326 	pmem_clear(pmem);
1327 	if (at == PAGER_AREA_TYPE_LOCK) {
1328 		/* Move page to lock list */
1329 		if (tee_pager_npages <= 0)
1330 			panic("running out of page");
1331 		tee_pager_npages--;
1332 		set_npages();
1333 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1334 	} else {
1335 		/* move page to back */
1336 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1337 	}
1338 
1339 	return pmem;
1340 }
1341 
1342 static bool pager_update_permissions(struct tee_pager_area *area,
1343 			struct abort_info *ai, bool *handled)
1344 {
1345 	unsigned int pgidx = area_va2idx(area, ai->va);
1346 	struct tee_pager_pmem *pmem = NULL;
1347 	uint32_t attr = 0;
1348 	paddr_t pa = 0;
1349 
1350 	*handled = false;
1351 
1352 	area_get_entry(area, pgidx, &pa, &attr);
1353 
1354 	/* Not mapped */
1355 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1356 		return false;
1357 
1358 	/* Not readable, should not happen */
1359 	if (abort_is_user_exception(ai)) {
1360 		if (!(attr & TEE_MATTR_UR))
1361 			return true;
1362 	} else {
1363 		if (!(attr & TEE_MATTR_PR)) {
1364 			abort_print_error(ai);
1365 			panic();
1366 		}
1367 	}
1368 
1369 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1370 	case CORE_MMU_FAULT_TRANSLATION:
1371 	case CORE_MMU_FAULT_READ_PERMISSION:
1372 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1373 			/* Check attempting to execute from an NOX page */
1374 			if (abort_is_user_exception(ai)) {
1375 				if (!(attr & TEE_MATTR_UX))
1376 					return true;
1377 			} else {
1378 				if (!(attr & TEE_MATTR_PX)) {
1379 					abort_print_error(ai);
1380 					panic();
1381 				}
1382 			}
1383 		}
1384 		/* Since the page is mapped now it's OK */
1385 		break;
1386 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1387 		/* Check attempting to write to an RO page */
1388 		pmem = pmem_find(area, pgidx);
1389 		if (!pmem)
1390 			panic();
1391 		if (abort_is_user_exception(ai)) {
1392 			if (!(area->flags & TEE_MATTR_UW))
1393 				return true;
1394 			if (!(attr & TEE_MATTR_UW))
1395 				make_dirty_page(pmem, area, pgidx, pa,
1396 						ai->va & ~SMALL_PAGE_MASK);
1397 
1398 		} else {
1399 			if (!(area->flags & TEE_MATTR_PW)) {
1400 				abort_print_error(ai);
1401 				panic();
1402 			}
1403 			if (!(attr & TEE_MATTR_PW))
1404 				make_dirty_page(pmem, area, pgidx, pa,
1405 						ai->va & ~SMALL_PAGE_MASK);
1406 		}
1407 		/* Since permissions has been updated now it's OK */
1408 		break;
1409 	default:
1410 		/* Some fault we can't deal with */
1411 		if (abort_is_user_exception(ai))
1412 			return true;
1413 		abort_print_error(ai);
1414 		panic();
1415 	}
1416 	*handled = true;
1417 	return true;
1418 }
1419 
1420 #ifdef CFG_TEE_CORE_DEBUG
1421 static void stat_handle_fault(void)
1422 {
1423 	static size_t num_faults;
1424 	static size_t min_npages = SIZE_MAX;
1425 	static size_t total_min_npages = SIZE_MAX;
1426 
1427 	num_faults++;
1428 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1429 		DMSG("nfaults %zu npages %zu (min %zu)",
1430 		     num_faults, tee_pager_npages, min_npages);
1431 		min_npages = tee_pager_npages; /* reset */
1432 	}
1433 	if (tee_pager_npages < min_npages)
1434 		min_npages = tee_pager_npages;
1435 	if (tee_pager_npages < total_min_npages)
1436 		total_min_npages = tee_pager_npages;
1437 }
1438 #else
1439 static void stat_handle_fault(void)
1440 {
1441 }
1442 #endif
1443 
1444 bool tee_pager_handle_fault(struct abort_info *ai)
1445 {
1446 	struct tee_pager_area *area;
1447 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1448 	uint32_t exceptions;
1449 	bool ret;
1450 	bool clean_user_cache = false;
1451 
1452 #ifdef TEE_PAGER_DEBUG_PRINT
1453 	if (!abort_is_user_exception(ai))
1454 		abort_print(ai);
1455 #endif
1456 
1457 	/*
1458 	 * We're updating pages that can affect several active CPUs at a
1459 	 * time below. We end up here because a thread tries to access some
1460 	 * memory that isn't available. We have to be careful when making
1461 	 * that memory available as other threads may succeed in accessing
1462 	 * that address the moment after we've made it available.
1463 	 *
1464 	 * That means that we can't just map the memory and populate the
1465 	 * page, instead we use the aliased mapping to populate the page
1466 	 * and once everything is ready we map it.
1467 	 */
1468 	exceptions = pager_lock(ai);
1469 
1470 	stat_handle_fault();
1471 
1472 	/* check if the access is valid */
1473 	if (abort_is_user_exception(ai)) {
1474 		area = find_uta_area(ai->va);
1475 		clean_user_cache = true;
1476 	} else {
1477 		area = find_area(&tee_pager_area_head, ai->va);
1478 		if (!area) {
1479 			area = find_uta_area(ai->va);
1480 			clean_user_cache = true;
1481 		}
1482 	}
1483 	if (!area || !area->pgt) {
1484 		ret = false;
1485 		goto out;
1486 	}
1487 
1488 	if (!tee_pager_unhide_page(area, page_va)) {
1489 		struct tee_pager_pmem *pmem = NULL;
1490 		uint32_t attr = 0;
1491 		paddr_t pa = 0;
1492 		size_t tblidx = 0;
1493 
1494 		/*
1495 		 * The page wasn't hidden, but some other core may have
1496 		 * updated the table entry before we got here or we need
1497 		 * to make a read-only page read-write (dirty).
1498 		 */
1499 		if (pager_update_permissions(area, ai, &ret)) {
1500 			/*
1501 			 * Nothing more to do with the abort. The problem
1502 			 * could already have been dealt with from another
1503 			 * core or if ret is false the TA will be paniced.
1504 			 */
1505 			goto out;
1506 		}
1507 
1508 		pmem = tee_pager_get_page(area->type);
1509 		if (!pmem) {
1510 			abort_print(ai);
1511 			panic();
1512 		}
1513 
1514 		/* load page code & data */
1515 		tee_pager_load_page(area, page_va, pmem->va_alias);
1516 
1517 
1518 		pmem->fobj = area->fobj;
1519 		pmem->fobj_pgidx = area_va2idx(area, page_va) +
1520 				   area->fobj_pgoffs -
1521 				   ((area->base & CORE_MMU_PGDIR_MASK) >>
1522 					SMALL_PAGE_SHIFT);
1523 		tblidx = pmem_get_area_tblidx(pmem, area);
1524 		attr = get_area_mattr(area->flags);
1525 		/*
1526 		 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1527 		 * able to tell when they are updated and should be tagged
1528 		 * as dirty.
1529 		 */
1530 		if (area->type == PAGER_AREA_TYPE_RW)
1531 			attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1532 		pa = get_pmem_pa(pmem);
1533 
1534 		/*
1535 		 * We've updated the page using the aliased mapping and
1536 		 * some cache maintenence is now needed if it's an
1537 		 * executable page.
1538 		 *
1539 		 * Since the d-cache is a Physically-indexed,
1540 		 * physically-tagged (PIPT) cache we can clean either the
1541 		 * aliased address or the real virtual address. In this
1542 		 * case we choose the real virtual address.
1543 		 *
1544 		 * The i-cache can also be PIPT, but may be something else
1545 		 * too like VIPT. The current code requires the caches to
1546 		 * implement the IVIPT extension, that is:
1547 		 * "instruction cache maintenance is required only after
1548 		 * writing new data to a physical address that holds an
1549 		 * instruction."
1550 		 *
1551 		 * To portably invalidate the icache the page has to
1552 		 * be mapped at the final virtual address but not
1553 		 * executable.
1554 		 */
1555 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1556 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1557 					TEE_MATTR_PW | TEE_MATTR_UW;
1558 			void *va = (void *)page_va;
1559 
1560 			/* Set a temporary read-only mapping */
1561 			area_set_entry(area, tblidx, pa, attr & ~mask);
1562 			area_tlbi_entry(area, tblidx);
1563 
1564 			dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1565 			if (clean_user_cache)
1566 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1567 			else
1568 				icache_inv_range(va, SMALL_PAGE_SIZE);
1569 
1570 			/* Set the final mapping */
1571 			area_set_entry(area, tblidx, pa, attr);
1572 			area_tlbi_entry(area, tblidx);
1573 		} else {
1574 			area_set_entry(area, tblidx, pa, attr);
1575 			/*
1576 			 * No need to flush TLB for this entry, it was
1577 			 * invalid. We should use a barrier though, to make
1578 			 * sure that the change is visible.
1579 			 */
1580 			dsb_ishst();
1581 		}
1582 		pgt_inc_used_entries(area->pgt);
1583 
1584 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1585 
1586 	}
1587 
1588 	tee_pager_hide_pages();
1589 	ret = true;
1590 out:
1591 	pager_unlock(exceptions);
1592 	return ret;
1593 }
1594 
1595 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1596 {
1597 	size_t n;
1598 
1599 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1600 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1601 
1602 	/* setup memory */
1603 	for (n = 0; n < npages; n++) {
1604 		struct core_mmu_table_info *ti;
1605 		struct tee_pager_pmem *pmem;
1606 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1607 		unsigned int pgidx;
1608 		paddr_t pa;
1609 		uint32_t attr;
1610 
1611 		ti = find_table_info(va);
1612 		pgidx = core_mmu_va2idx(ti, va);
1613 		/*
1614 		 * Note that we can only support adding pages in the
1615 		 * valid range of this table info, currently not a problem.
1616 		 */
1617 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1618 
1619 		/* Ignore unmapped pages/blocks */
1620 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1621 			continue;
1622 
1623 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1624 		if (!pmem)
1625 			panic("out of mem");
1626 
1627 		pmem->va_alias = pager_add_alias_page(pa);
1628 
1629 		if (unmap) {
1630 			pmem_clear(pmem);
1631 			core_mmu_set_entry(ti, pgidx, 0, 0);
1632 			pgt_dec_used_entries(find_core_pgt(va));
1633 		} else {
1634 			struct tee_pager_area *area = NULL;
1635 
1636 			/*
1637 			 * The page is still mapped, let's assign the area
1638 			 * and update the protection bits accordingly.
1639 			 */
1640 			area = find_area(&tee_pager_area_head, va);
1641 			assert(area && area->pgt == find_core_pgt(va));
1642 			pmem->fobj = area->fobj;
1643 			pmem->fobj_pgidx = pgidx + area->fobj_pgoffs -
1644 					   ((area->base &
1645 							CORE_MMU_PGDIR_MASK) >>
1646 						SMALL_PAGE_SHIFT);
1647 			assert(pgidx == pmem_get_area_tblidx(pmem, area));
1648 			assert(pa == get_pmem_pa(pmem));
1649 			area_set_entry(area, pgidx, pa,
1650 				       get_area_mattr(area->flags));
1651 		}
1652 
1653 		tee_pager_npages++;
1654 		incr_npages_all();
1655 		set_npages();
1656 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1657 	}
1658 
1659 	/*
1660 	 * As this is done at inits, invalidate all TLBs once instead of
1661 	 * targeting only the modified entries.
1662 	 */
1663 	tlbi_all();
1664 }
1665 
1666 #ifdef CFG_PAGED_USER_TA
1667 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1668 {
1669 	struct pgt *p = pgt;
1670 
1671 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1672 		p = SLIST_NEXT(p, link);
1673 	return p;
1674 }
1675 
1676 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1677 {
1678 	struct tee_pager_area *area = NULL;
1679 	struct pgt *pgt = NULL;
1680 
1681 	if (!uctx->areas)
1682 		return;
1683 
1684 	pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1685 	TAILQ_FOREACH(area, uctx->areas, link) {
1686 		if (!area->pgt)
1687 			area->pgt = find_pgt(pgt, area->base);
1688 		else
1689 			assert(area->pgt == find_pgt(pgt, area->base));
1690 		if (!area->pgt)
1691 			panic();
1692 	}
1693 }
1694 
1695 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1696 {
1697 	struct tee_pager_pmem *pmem = NULL;
1698 	struct tee_pager_area *area = NULL;
1699 	struct tee_pager_area_head *areas = NULL;
1700 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1701 
1702 	if (!pgt->num_used_entries)
1703 		goto out;
1704 
1705 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1706 		if (pmem->fobj)
1707 			pmem_unmap(pmem, pgt);
1708 	}
1709 	assert(!pgt->num_used_entries);
1710 
1711 out:
1712 	areas = to_user_mode_ctx(pgt->ctx)->areas;
1713 	if (areas) {
1714 		TAILQ_FOREACH(area, areas, link) {
1715 			if (area->pgt == pgt)
1716 				area->pgt = NULL;
1717 		}
1718 	}
1719 
1720 	pager_unlock(exceptions);
1721 }
1722 DECLARE_KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1723 #endif /*CFG_PAGED_USER_TA*/
1724 
1725 void tee_pager_release_phys(void *addr, size_t size)
1726 {
1727 	bool unmaped = false;
1728 	vaddr_t va = (vaddr_t)addr;
1729 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1730 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1731 	struct tee_pager_area *area;
1732 	uint32_t exceptions;
1733 
1734 	if (end <= begin)
1735 		return;
1736 
1737 	exceptions = pager_lock_check_stack(128);
1738 
1739 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1740 		area = find_area(&tee_pager_area_head, va);
1741 		if (!area)
1742 			panic();
1743 		unmaped |= tee_pager_release_one_phys(area, va);
1744 	}
1745 
1746 	if (unmaped)
1747 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1748 
1749 	pager_unlock(exceptions);
1750 }
1751 DECLARE_KEEP_PAGER(tee_pager_release_phys);
1752 
1753 void *tee_pager_alloc(size_t size)
1754 {
1755 	tee_mm_entry_t *mm = NULL;
1756 	uint8_t *smem = NULL;
1757 	size_t num_pages = 0;
1758 	struct fobj *fobj = NULL;
1759 
1760 	if (!size)
1761 		return NULL;
1762 
1763 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1764 	if (!mm)
1765 		return NULL;
1766 
1767 	smem = (uint8_t *)tee_mm_get_smem(mm);
1768 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1769 	fobj = fobj_locked_paged_alloc(num_pages);
1770 	if (!fobj) {
1771 		tee_mm_free(mm);
1772 		return NULL;
1773 	}
1774 
1775 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1776 	fobj_put(fobj);
1777 
1778 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1779 
1780 	return smem;
1781 }
1782