xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 7c732ee481c684dbc9cfb923e417b61148729372)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/panic.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/tee_ta_manager.h>
18 #include <kernel/thread.h>
19 #include <kernel/tlb_helpers.h>
20 #include <kernel/user_mode_ctx.h>
21 #include <mm/core_memprot.h>
22 #include <mm/fobj.h>
23 #include <mm/tee_mm.h>
24 #include <mm/tee_pager.h>
25 #include <stdlib.h>
26 #include <sys/queue.h>
27 #include <tee_api_defines.h>
28 #include <trace.h>
29 #include <types_ext.h>
30 #include <utee_defines.h>
31 #include <util.h>
32 
33 
34 static struct tee_pager_area_head tee_pager_area_head =
35 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
36 
37 #define INVALID_PGIDX		UINT_MAX
38 #define PMEM_FLAG_DIRTY		BIT(0)
39 #define PMEM_FLAG_HIDDEN	BIT(1)
40 
41 /*
42  * struct tee_pager_pmem - Represents a physical page used for paging.
43  *
44  * @flags	flags defined by PMEM_FLAG_* above
45  * @fobj_pgidx	index of the page in the @fobj
46  * @fobj	File object of which a page is made visible.
47  * @va_alias	Virtual address where the physical page always is aliased.
48  *		Used during remapping of the page when the content need to
49  *		be updated before it's available at the new location.
50  */
51 struct tee_pager_pmem {
52 	unsigned int flags;
53 	unsigned int fobj_pgidx;
54 	struct fobj *fobj;
55 	void *va_alias;
56 	TAILQ_ENTRY(tee_pager_pmem) link;
57 };
58 
59 /* The list of physical pages. The first page in the list is the oldest */
60 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
61 
62 static struct tee_pager_pmem_head tee_pager_pmem_head =
63 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
64 
65 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
66 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
67 
68 /* number of pages hidden */
69 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
70 
71 /* Number of registered physical pages, used hiding pages. */
72 static size_t tee_pager_npages;
73 
74 #ifdef CFG_WITH_STATS
75 static struct tee_pager_stats pager_stats;
76 
77 static inline void incr_ro_hits(void)
78 {
79 	pager_stats.ro_hits++;
80 }
81 
82 static inline void incr_rw_hits(void)
83 {
84 	pager_stats.rw_hits++;
85 }
86 
87 static inline void incr_hidden_hits(void)
88 {
89 	pager_stats.hidden_hits++;
90 }
91 
92 static inline void incr_zi_released(void)
93 {
94 	pager_stats.zi_released++;
95 }
96 
97 static inline void incr_npages_all(void)
98 {
99 	pager_stats.npages_all++;
100 }
101 
102 static inline void set_npages(void)
103 {
104 	pager_stats.npages = tee_pager_npages;
105 }
106 
107 void tee_pager_get_stats(struct tee_pager_stats *stats)
108 {
109 	*stats = pager_stats;
110 
111 	pager_stats.hidden_hits = 0;
112 	pager_stats.ro_hits = 0;
113 	pager_stats.rw_hits = 0;
114 	pager_stats.zi_released = 0;
115 }
116 
117 #else /* CFG_WITH_STATS */
118 static inline void incr_ro_hits(void) { }
119 static inline void incr_rw_hits(void) { }
120 static inline void incr_hidden_hits(void) { }
121 static inline void incr_zi_released(void) { }
122 static inline void incr_npages_all(void) { }
123 static inline void set_npages(void) { }
124 
125 void tee_pager_get_stats(struct tee_pager_stats *stats)
126 {
127 	memset(stats, 0, sizeof(struct tee_pager_stats));
128 }
129 #endif /* CFG_WITH_STATS */
130 
131 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
132 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
133 #define TBL_SHIFT	SMALL_PAGE_SHIFT
134 
135 #define EFFECTIVE_VA_SIZE \
136 	(ROUNDUP(TEE_RAM_VA_START + TEE_RAM_VA_SIZE, \
137 		 CORE_MMU_PGDIR_SIZE) - \
138 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
139 
140 static struct pager_table {
141 	struct pgt pgt;
142 	struct core_mmu_table_info tbl_info;
143 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
144 
145 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
146 
147 /* Defines the range of the alias area */
148 static tee_mm_entry_t *pager_alias_area;
149 /*
150  * Physical pages are added in a stack like fashion to the alias area,
151  * @pager_alias_next_free gives the address of next free entry if
152  * @pager_alias_next_free is != 0
153  */
154 static uintptr_t pager_alias_next_free;
155 
156 #ifdef CFG_TEE_CORE_DEBUG
157 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
158 
159 static uint32_t pager_lock_dldetect(const char *func, const int line,
160 				    struct abort_info *ai)
161 {
162 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
163 	unsigned int retries = 0;
164 	unsigned int reminder = 0;
165 
166 	while (!cpu_spin_trylock(&pager_spinlock)) {
167 		retries++;
168 		if (!retries) {
169 			/* wrapped, time to report */
170 			trace_printf(func, line, TRACE_ERROR, true,
171 				     "possible spinlock deadlock reminder %u",
172 				     reminder);
173 			if (reminder < UINT_MAX)
174 				reminder++;
175 			if (ai)
176 				abort_print(ai);
177 		}
178 	}
179 
180 	return exceptions;
181 }
182 #else
183 static uint32_t pager_lock(struct abort_info __unused *ai)
184 {
185 	return cpu_spin_lock_xsave(&pager_spinlock);
186 }
187 #endif
188 
189 static uint32_t pager_lock_check_stack(size_t stack_size)
190 {
191 	if (stack_size) {
192 		int8_t buf[stack_size];
193 		size_t n;
194 
195 		/*
196 		 * Make sure to touch all pages of the stack that we expect
197 		 * to use with this lock held. We need to take eventual
198 		 * page faults before the lock is taken or we'll deadlock
199 		 * the pager. The pages that are populated in this way will
200 		 * eventually be released at certain save transitions of
201 		 * the thread.
202 		 */
203 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
204 			io_write8((vaddr_t)buf + n, 1);
205 		io_write8((vaddr_t)buf + stack_size - 1, 1);
206 	}
207 
208 	return pager_lock(NULL);
209 }
210 
211 static void pager_unlock(uint32_t exceptions)
212 {
213 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
214 }
215 
216 void *tee_pager_phys_to_virt(paddr_t pa)
217 {
218 	struct core_mmu_table_info ti;
219 	unsigned idx;
220 	uint32_t a;
221 	paddr_t p;
222 	vaddr_t v;
223 	size_t n;
224 
225 	/*
226 	 * Most addresses are mapped lineary, try that first if possible.
227 	 */
228 	if (!tee_pager_get_table_info(pa, &ti))
229 		return NULL; /* impossible pa */
230 	idx = core_mmu_va2idx(&ti, pa);
231 	core_mmu_get_entry(&ti, idx, &p, &a);
232 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
233 		return (void *)core_mmu_idx2va(&ti, idx);
234 
235 	n = 0;
236 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
237 	while (true) {
238 		while (idx < TBL_NUM_ENTRIES) {
239 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
240 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
241 				return NULL;
242 
243 			core_mmu_get_entry(&pager_tables[n].tbl_info,
244 					   idx, &p, &a);
245 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
246 				return (void *)v;
247 			idx++;
248 		}
249 
250 		n++;
251 		if (n >= ARRAY_SIZE(pager_tables))
252 			return NULL;
253 		idx = 0;
254 	}
255 
256 	return NULL;
257 }
258 
259 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
260 {
261 	return pmem->flags & PMEM_FLAG_HIDDEN;
262 }
263 
264 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
265 {
266 	return pmem->flags & PMEM_FLAG_DIRTY;
267 }
268 
269 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
270 				    struct tee_pager_area *area)
271 {
272 	if (pmem->fobj != area->fobj)
273 		return false;
274 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
275 		return false;
276 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
277 	    (area->size >> SMALL_PAGE_SHIFT))
278 		return false;
279 
280 	return true;
281 }
282 
283 static size_t pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
284 				   struct tee_pager_area *area)
285 {
286 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
287 
288 	return pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
289 }
290 
291 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
292 {
293 	size_t n;
294 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
295 
296 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
297 	    CORE_MMU_PGDIR_SHIFT;
298 	if (n >= ARRAY_SIZE(pager_tables))
299 		return NULL;
300 
301 	assert(va >= pager_tables[n].tbl_info.va_base &&
302 	       va <= (pager_tables[n].tbl_info.va_base | mask));
303 
304 	return pager_tables + n;
305 }
306 
307 static struct pager_table *find_pager_table(vaddr_t va)
308 {
309 	struct pager_table *pt = find_pager_table_may_fail(va);
310 
311 	assert(pt);
312 	return pt;
313 }
314 
315 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
316 {
317 	struct pager_table *pt = find_pager_table_may_fail(va);
318 
319 	if (!pt)
320 		return false;
321 
322 	*ti = pt->tbl_info;
323 	return true;
324 }
325 
326 static struct core_mmu_table_info *find_table_info(vaddr_t va)
327 {
328 	return &find_pager_table(va)->tbl_info;
329 }
330 
331 static struct pgt *find_core_pgt(vaddr_t va)
332 {
333 	return &find_pager_table(va)->pgt;
334 }
335 
336 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
337 {
338 	struct pager_table *pt;
339 	unsigned idx;
340 	vaddr_t smem = tee_mm_get_smem(mm);
341 	size_t nbytes = tee_mm_get_bytes(mm);
342 	vaddr_t v;
343 	uint32_t a = 0;
344 
345 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
346 
347 	assert(!pager_alias_area);
348 	pager_alias_area = mm;
349 	pager_alias_next_free = smem;
350 
351 	/* Clear all mapping in the alias area */
352 	pt = find_pager_table(smem);
353 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
354 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
355 		while (idx < TBL_NUM_ENTRIES) {
356 			v = core_mmu_idx2va(&pt->tbl_info, idx);
357 			if (v >= (smem + nbytes))
358 				goto out;
359 
360 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
361 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
362 			if (a & TEE_MATTR_VALID_BLOCK)
363 				pgt_dec_used_entries(&pt->pgt);
364 			idx++;
365 		}
366 
367 		pt++;
368 		idx = 0;
369 	}
370 
371 out:
372 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
373 }
374 
375 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
376 {
377 	size_t n;
378 	uint32_t a = 0;
379 	size_t usage = 0;
380 
381 	for (n = 0; n < ti->num_entries; n++) {
382 		core_mmu_get_entry(ti, n, NULL, &a);
383 		if (a & TEE_MATTR_VALID_BLOCK)
384 			usage++;
385 	}
386 	return usage;
387 }
388 
389 static void area_get_entry(struct tee_pager_area *area, size_t idx,
390 			   paddr_t *pa, uint32_t *attr)
391 {
392 	assert(area->pgt);
393 	assert(idx < TBL_NUM_ENTRIES);
394 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
395 }
396 
397 static void area_set_entry(struct tee_pager_area *area, size_t idx,
398 			   paddr_t pa, uint32_t attr)
399 {
400 	assert(area->pgt);
401 	assert(idx < TBL_NUM_ENTRIES);
402 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
403 }
404 
405 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
406 {
407 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
408 }
409 
410 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
411 {
412 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
413 }
414 
415 static void area_tlbi_entry(struct tee_pager_area *area, size_t idx)
416 {
417 	vaddr_t va = area_idx2va(area, idx);
418 
419 #if defined(CFG_PAGED_USER_TA)
420 	assert(area->pgt);
421 	if (area->pgt->ctx) {
422 		uint32_t asid = to_user_mode_ctx(area->pgt->ctx)->vm_info.asid;
423 
424 		tlbi_mva_asid(va, asid);
425 		return;
426 	}
427 #endif
428 	tlbi_mva_allasid(va);
429 }
430 
431 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
432 {
433 	struct tee_pager_area *area = NULL;
434 	size_t tblidx = 0;
435 	uint32_t a = 0;
436 
437 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
438 		/*
439 		 * If only_this_pgt points to a pgt then the pgt of this
440 		 * area has to match or we'll skip over it.
441 		 */
442 		if (only_this_pgt && area->pgt != only_this_pgt)
443 			continue;
444 		if (!area->pgt || !pmem_is_covered_by_area(pmem, area))
445 			continue;
446 		tblidx = pmem_get_area_tblidx(pmem, area);
447 		area_get_entry(area, tblidx, NULL, &a);
448 		if (a & TEE_MATTR_VALID_BLOCK) {
449 			area_set_entry(area, tblidx, 0, 0);
450 			pgt_dec_used_entries(area->pgt);
451 			area_tlbi_entry(area, tblidx);
452 		}
453 	}
454 }
455 
456 void tee_pager_early_init(void)
457 {
458 	size_t n;
459 
460 	/*
461 	 * Note that this depends on add_pager_vaspace() adding vaspace
462 	 * after end of memory.
463 	 */
464 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
465 		if (!core_mmu_find_table(NULL, TEE_RAM_VA_START +
466 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
467 					 &pager_tables[n].tbl_info))
468 			panic("can't find mmu tables");
469 
470 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
471 			panic("Unsupported page size in translation table");
472 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
473 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
474 
475 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
476 		pgt_set_used_entries(&pager_tables[n].pgt,
477 				tbl_usage_count(&pager_tables[n].tbl_info));
478 	}
479 }
480 
481 static void *pager_add_alias_page(paddr_t pa)
482 {
483 	unsigned idx;
484 	struct core_mmu_table_info *ti;
485 	/* Alias pages mapped without write permission: runtime will care */
486 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
487 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
488 			TEE_MATTR_SECURE | TEE_MATTR_PR;
489 
490 	DMSG("0x%" PRIxPA, pa);
491 
492 	ti = find_table_info(pager_alias_next_free);
493 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
494 	core_mmu_set_entry(ti, idx, pa, attr);
495 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
496 	pager_alias_next_free += SMALL_PAGE_SIZE;
497 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
498 				      tee_mm_get_bytes(pager_alias_area)))
499 		pager_alias_next_free = 0;
500 	return (void *)core_mmu_idx2va(ti, idx);
501 }
502 
503 static void area_insert_tail(struct tee_pager_area *area)
504 {
505 	uint32_t exceptions = pager_lock_check_stack(8);
506 
507 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
508 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
509 
510 	pager_unlock(exceptions);
511 }
512 KEEP_PAGER(area_insert_tail);
513 
514 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
515 			     struct fobj *fobj)
516 {
517 	struct tee_pager_area *area = NULL;
518 	uint32_t flags = 0;
519 	size_t fobj_pgoffs = 0;
520 	vaddr_t b = base;
521 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
522 	size_t s2 = 0;
523 
524 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
525 
526 	if (base & SMALL_PAGE_MASK || !s) {
527 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
528 		panic();
529 	}
530 
531 	switch (type) {
532 	case PAGER_AREA_TYPE_RO:
533 		flags = TEE_MATTR_PRX;
534 		break;
535 	case PAGER_AREA_TYPE_RW:
536 	case PAGER_AREA_TYPE_LOCK:
537 		flags = TEE_MATTR_PRW;
538 		break;
539 	default:
540 		panic();
541 	}
542 
543 	if (!fobj)
544 		panic();
545 
546 	while (s) {
547 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
548 		area = calloc(1, sizeof(*area));
549 		if (!area)
550 			panic("alloc_area");
551 
552 		area->fobj = fobj_get(fobj);
553 		area->fobj_pgoffs = fobj_pgoffs;
554 		area->type = type;
555 		area->pgt = find_core_pgt(b);
556 		area->base = b;
557 		area->size = s2;
558 		area->flags = flags;
559 		area_insert_tail(area);
560 
561 		b += s2;
562 		s -= s2;
563 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
564 	}
565 }
566 
567 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
568 					vaddr_t va)
569 {
570 	struct tee_pager_area *area;
571 
572 	if (!areas)
573 		return NULL;
574 
575 	TAILQ_FOREACH(area, areas, link) {
576 		if (core_is_buffer_inside(va, 1, area->base, area->size))
577 			return area;
578 	}
579 	return NULL;
580 }
581 
582 #ifdef CFG_PAGED_USER_TA
583 static struct tee_pager_area *find_uta_area(vaddr_t va)
584 {
585 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
586 
587 	if (!is_user_mode_ctx(ctx))
588 		return NULL;
589 	return find_area(to_user_mode_ctx(ctx)->areas, va);
590 }
591 #else
592 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
593 {
594 	return NULL;
595 }
596 #endif /*CFG_PAGED_USER_TA*/
597 
598 
599 static uint32_t get_area_mattr(uint32_t area_flags)
600 {
601 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
602 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
603 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
604 
605 	return attr;
606 }
607 
608 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
609 {
610 	struct core_mmu_table_info *ti;
611 	paddr_t pa;
612 	unsigned idx;
613 
614 	ti = find_table_info((vaddr_t)pmem->va_alias);
615 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
616 	core_mmu_get_entry(ti, idx, &pa, NULL);
617 	return pa;
618 }
619 
620 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
621 			void *va_alias)
622 {
623 	size_t fobj_pgoffs = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
624 			     area->fobj_pgoffs;
625 	struct core_mmu_table_info *ti;
626 	uint32_t attr_alias;
627 	paddr_t pa_alias;
628 	unsigned int idx_alias;
629 
630 	/* Insure we are allowed to write to aliased virtual page */
631 	ti = find_table_info((vaddr_t)va_alias);
632 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
633 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
634 	if (!(attr_alias & TEE_MATTR_PW)) {
635 		attr_alias |= TEE_MATTR_PW;
636 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
637 		tlbi_mva_allasid((vaddr_t)va_alias);
638 	}
639 
640 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
641 	if (fobj_load_page(area->fobj, fobj_pgoffs, va_alias)) {
642 		EMSG("PH 0x%" PRIxVA " failed", page_va);
643 		panic();
644 	}
645 	switch (area->type) {
646 	case PAGER_AREA_TYPE_RO:
647 		incr_ro_hits();
648 		/* Forbid write to aliases for read-only (maybe exec) pages */
649 		attr_alias &= ~TEE_MATTR_PW;
650 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
651 		tlbi_mva_allasid((vaddr_t)va_alias);
652 		break;
653 	case PAGER_AREA_TYPE_RW:
654 		incr_rw_hits();
655 		break;
656 	case PAGER_AREA_TYPE_LOCK:
657 		break;
658 	default:
659 		panic();
660 	}
661 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
662 }
663 
664 static void tee_pager_save_page(struct tee_pager_pmem *pmem)
665 {
666 	if (pmem_is_dirty(pmem)) {
667 		asan_tag_access(pmem->va_alias,
668 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
669 		if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
670 				   pmem->va_alias))
671 			panic("fobj_save_page");
672 		asan_tag_no_access(pmem->va_alias,
673 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
674 	}
675 }
676 
677 #ifdef CFG_PAGED_USER_TA
678 static void unlink_area(struct tee_pager_area_head *area_head,
679 			struct tee_pager_area *area)
680 {
681 	uint32_t exceptions = pager_lock_check_stack(64);
682 
683 	TAILQ_REMOVE(area_head, area, link);
684 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
685 
686 	pager_unlock(exceptions);
687 }
688 KEEP_PAGER(unlink_area);
689 
690 static void free_area(struct tee_pager_area *area)
691 {
692 	fobj_put(area->fobj);
693 	free(area);
694 }
695 
696 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
697 				    struct fobj *fobj, uint32_t prot)
698 {
699 	struct tee_pager_area *area;
700 	vaddr_t b = base;
701 	size_t fobj_pgoffs = 0;
702 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
703 
704 	if (!uctx->areas) {
705 		uctx->areas = malloc(sizeof(*uctx->areas));
706 		if (!uctx->areas)
707 			return TEE_ERROR_OUT_OF_MEMORY;
708 		TAILQ_INIT(uctx->areas);
709 	}
710 
711 	while (s) {
712 		size_t s2;
713 
714 		if (find_area(uctx->areas, b))
715 			return TEE_ERROR_BAD_PARAMETERS;
716 
717 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
718 		area = calloc(1, sizeof(*area));
719 		if (!area)
720 			return TEE_ERROR_OUT_OF_MEMORY;
721 
722 		/* Table info will be set when the context is activated. */
723 		area->fobj = fobj_get(fobj);
724 		area->fobj_pgoffs = fobj_pgoffs;
725 		area->type = PAGER_AREA_TYPE_RW;
726 		area->base = b;
727 		area->size = s2;
728 		area->flags = prot;
729 
730 		TAILQ_INSERT_TAIL(uctx->areas, area, link);
731 		TAILQ_INSERT_TAIL(&fobj->areas, area, fobj_link);
732 		b += s2;
733 		s -= s2;
734 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
735 	}
736 
737 	return TEE_SUCCESS;
738 }
739 
740 TEE_Result tee_pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
741 				 struct fobj *fobj, uint32_t prot)
742 {
743 	TEE_Result res = TEE_SUCCESS;
744 	struct thread_specific_data *tsd = thread_get_tsd();
745 	struct tee_pager_area *area = NULL;
746 	struct core_mmu_table_info dir_info = { NULL };
747 
748 	if (&uctx->ctx != tsd->ctx) {
749 		/*
750 		 * Changes are to an utc that isn't active. Just add the
751 		 * areas page tables will be dealt with later.
752 		 */
753 		return pager_add_um_area(uctx, base, fobj, prot);
754 	}
755 
756 	/*
757 	 * Assign page tables before adding areas to be able to tell which
758 	 * are newly added and should be removed in case of failure.
759 	 */
760 	tee_pager_assign_um_tables(uctx);
761 	res = pager_add_um_area(uctx, base, fobj, prot);
762 	if (res) {
763 		struct tee_pager_area *next_a;
764 
765 		/* Remove all added areas */
766 		TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
767 			if (!area->pgt) {
768 				unlink_area(uctx->areas, area);
769 				free_area(area);
770 			}
771 		}
772 		return res;
773 	}
774 
775 	/*
776 	 * Assign page tables to the new areas and make sure that the page
777 	 * tables are registered in the upper table.
778 	 */
779 	tee_pager_assign_um_tables(uctx);
780 	core_mmu_get_user_pgdir(&dir_info);
781 	TAILQ_FOREACH(area, uctx->areas, link) {
782 		paddr_t pa;
783 		size_t idx;
784 		uint32_t attr;
785 
786 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
787 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
788 
789 		/*
790 		 * Check if the page table already is used, if it is, it's
791 		 * already registered.
792 		 */
793 		if (area->pgt->num_used_entries) {
794 			assert(attr & TEE_MATTR_TABLE);
795 			assert(pa == virt_to_phys(area->pgt->tbl));
796 			continue;
797 		}
798 
799 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
800 		pa = virt_to_phys(area->pgt->tbl);
801 		assert(pa);
802 		/*
803 		 * Note that the update of the table entry is guaranteed to
804 		 * be atomic.
805 		 */
806 		core_mmu_set_entry(&dir_info, idx, pa, attr);
807 	}
808 
809 	return TEE_SUCCESS;
810 }
811 
812 static void rem_area(struct tee_pager_area_head *area_head,
813 		     struct tee_pager_area *area)
814 {
815 	struct tee_pager_pmem *pmem;
816 	size_t last_pgoffs = area->fobj_pgoffs +
817 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
818 	uint32_t exceptions;
819 	size_t idx = 0;
820 	uint32_t a = 0;
821 
822 	exceptions = pager_lock_check_stack(64);
823 
824 	TAILQ_REMOVE(area_head, area, link);
825 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
826 
827 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
828 		if (pmem->fobj != area->fobj ||
829 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
830 		    pmem->fobj_pgidx > last_pgoffs)
831 			continue;
832 
833 		idx = pmem_get_area_tblidx(pmem, area);
834 		area_get_entry(area, idx, NULL, &a);
835 		if (!(a & TEE_MATTR_VALID_BLOCK))
836 			continue;
837 
838 		area_set_entry(area, idx, 0, 0);
839 		area_tlbi_entry(area, idx);
840 		pgt_dec_used_entries(area->pgt);
841 	}
842 
843 	pager_unlock(exceptions);
844 
845 	free_area(area);
846 }
847 KEEP_PAGER(rem_area);
848 
849 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
850 			     size_t size)
851 {
852 	struct tee_pager_area *area;
853 	struct tee_pager_area *next_a;
854 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
855 
856 	TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
857 		if (core_is_buffer_inside(area->base, area->size, base, s))
858 			rem_area(uctx->areas, area);
859 	}
860 	tlbi_asid(uctx->vm_info.asid);
861 }
862 
863 void tee_pager_rem_um_areas(struct user_mode_ctx *uctx)
864 {
865 	struct tee_pager_area *area = NULL;
866 
867 	if (!uctx->areas)
868 		return;
869 
870 	while (true) {
871 		area = TAILQ_FIRST(uctx->areas);
872 		if (!area)
873 			break;
874 		unlink_area(uctx->areas, area);
875 		free_area(area);
876 	}
877 
878 	free(uctx->areas);
879 }
880 
881 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
882 {
883 	struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas);
884 	void *ctx = a->pgt->ctx;
885 
886 	do {
887 		a = TAILQ_NEXT(a, fobj_link);
888 		if (!a)
889 			return true;
890 	} while (a->pgt->ctx == ctx);
891 
892 	return false;
893 }
894 
895 bool tee_pager_set_um_area_attr(struct user_mode_ctx *uctx, vaddr_t base,
896 				size_t size, uint32_t flags)
897 {
898 	bool ret = false;
899 	vaddr_t b = base;
900 	size_t s = size;
901 	size_t s2 = 0;
902 	struct tee_pager_area *area = find_area(uctx->areas, b);
903 	uint32_t exceptions = 0;
904 	struct tee_pager_pmem *pmem = NULL;
905 	uint32_t a = 0;
906 	uint32_t f = 0;
907 	uint32_t mattr = 0;
908 	uint32_t f2 = 0;
909 	size_t tblidx = 0;
910 
911 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
912 	if (f & TEE_MATTR_UW)
913 		f |= TEE_MATTR_PW;
914 	mattr = get_area_mattr(f);
915 
916 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
917 
918 	while (s) {
919 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
920 		if (!area || area->base != b || area->size != s2) {
921 			ret = false;
922 			goto out;
923 		}
924 		b += s2;
925 		s -= s2;
926 
927 		if (area->flags == f)
928 			goto next_area;
929 
930 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
931 			if (!pmem_is_covered_by_area(pmem, area))
932 				continue;
933 
934 			tblidx = pmem_get_area_tblidx(pmem, area);
935 			area_get_entry(area, tblidx, NULL, &a);
936 			if (a == f)
937 				continue;
938 			area_set_entry(area, tblidx, 0, 0);
939 			area_tlbi_entry(area, tblidx);
940 
941 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
942 			if (pmem_is_dirty(pmem))
943 				f2 = mattr;
944 			else
945 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
946 			area_set_entry(area, tblidx, get_pmem_pa(pmem), f2);
947 			if (!(a & TEE_MATTR_VALID_BLOCK))
948 				pgt_inc_used_entries(area->pgt);
949 			/*
950 			 * Make sure the table update is visible before
951 			 * continuing.
952 			 */
953 			dsb_ishst();
954 
955 			/*
956 			 * Here's a problem if this page already is shared.
957 			 * We need do icache invalidate for each context
958 			 * in which it is shared. In practice this will
959 			 * never happen.
960 			 */
961 			if (flags & TEE_MATTR_UX) {
962 				void *va = (void *)area_idx2va(area, tblidx);
963 
964 				/* Assert that the pmem isn't shared. */
965 				assert(same_context(pmem));
966 
967 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
968 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
969 			}
970 		}
971 
972 		area->flags = f;
973 next_area:
974 		area = TAILQ_NEXT(area, link);
975 	}
976 
977 	ret = true;
978 out:
979 	pager_unlock(exceptions);
980 	return ret;
981 }
982 
983 KEEP_PAGER(tee_pager_set_um_area_attr);
984 #endif /*CFG_PAGED_USER_TA*/
985 
986 void tee_pager_invalidate_fobj(struct fobj *fobj)
987 {
988 	struct tee_pager_pmem *pmem;
989 	uint32_t exceptions;
990 
991 	exceptions = pager_lock_check_stack(64);
992 
993 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
994 		if (pmem->fobj == fobj) {
995 			pmem->fobj = NULL;
996 			pmem->fobj_pgidx = INVALID_PGIDX;
997 		}
998 	}
999 
1000 	pager_unlock(exceptions);
1001 }
1002 KEEP_PAGER(tee_pager_invalidate_fobj);
1003 
1004 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area,
1005 					unsigned int tblidx)
1006 {
1007 	struct tee_pager_pmem *pmem = NULL;
1008 
1009 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1010 		if (pmem->fobj == area->fobj &&
1011 		    pmem_get_area_tblidx(pmem, area) == tblidx)
1012 			return pmem;
1013 
1014 	return NULL;
1015 }
1016 
1017 static bool tee_pager_unhide_page(struct tee_pager_area *area,
1018 				  unsigned int tblidx)
1019 {
1020 	struct tee_pager_pmem *pmem = pmem_find(area, tblidx);
1021 	uint32_t a = get_area_mattr(area->flags);
1022 	uint32_t attr = 0;
1023 	paddr_t pa = 0;
1024 
1025 	if (!pmem)
1026 		return false;
1027 
1028 	area_get_entry(area, tblidx, NULL, &attr);
1029 	if (attr & TEE_MATTR_VALID_BLOCK)
1030 		return false;
1031 
1032 	/*
1033 	 * The page is hidden, or not not mapped yet. Unhide the page and
1034 	 * move it to the tail.
1035 	 *
1036 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1037 	 * for this address, so no TLB invalidation is required after setting
1038 	 * the new entry. A DSB is needed though, to make the write visible.
1039 	 *
1040 	 * For user executable pages it's more complicated. Those pages can
1041 	 * be shared between multiple TA mappings and thus populated by
1042 	 * another TA. The reference manual states that:
1043 	 *
1044 	 * "instruction cache maintenance is required only after writing
1045 	 * new data to a physical address that holds an instruction."
1046 	 *
1047 	 * So for hidden pages we would not need to invalidate i-cache, but
1048 	 * for newly populated pages we do. Since we don't know which we
1049 	 * have to assume the worst and always invalidate the i-cache. We
1050 	 * don't need to clean the d-cache though, since that has already
1051 	 * been done earlier.
1052 	 *
1053 	 * Additional bookkeeping to tell if the i-cache invalidation is
1054 	 * needed or not is left as a future optimization.
1055 	 */
1056 
1057 	/* If it's not a dirty block, then it should be read only. */
1058 	if (!pmem_is_dirty(pmem))
1059 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1060 
1061 	pa = get_pmem_pa(pmem);
1062 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1063 	if (area->flags & TEE_MATTR_UX) {
1064 		void *va = (void *)area_idx2va(area, tblidx);
1065 
1066 		/* Set a temporary read-only mapping */
1067 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1068 		area_set_entry(area, tblidx, pa, a & ~TEE_MATTR_UX);
1069 		dsb_ishst();
1070 
1071 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1072 
1073 		/* Set the final mapping */
1074 		area_set_entry(area, tblidx, pa, a);
1075 		area_tlbi_entry(area, tblidx);
1076 	} else {
1077 		area_set_entry(area, tblidx, pa, a);
1078 		dsb_ishst();
1079 	}
1080 	pgt_inc_used_entries(area->pgt);
1081 
1082 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1083 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1084 	incr_hidden_hits();
1085 	return true;
1086 }
1087 
1088 static void tee_pager_hide_pages(void)
1089 {
1090 	struct tee_pager_pmem *pmem = NULL;
1091 	size_t n = 0;
1092 
1093 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1094 		if (n >= TEE_PAGER_NHIDE)
1095 			break;
1096 		n++;
1097 
1098 		/* we cannot hide pages when pmem->fobj is not defined. */
1099 		if (!pmem->fobj)
1100 			continue;
1101 
1102 		if (pmem_is_hidden(pmem))
1103 			continue;
1104 
1105 		pmem->flags |= PMEM_FLAG_HIDDEN;
1106 		pmem_unmap(pmem, NULL);
1107 	}
1108 }
1109 
1110 /*
1111  * Find mapped pmem, hide and move to pageble pmem.
1112  * Return false if page was not mapped, and true if page was mapped.
1113  */
1114 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1115 				       vaddr_t page_va)
1116 {
1117 	struct tee_pager_pmem *pmem;
1118 	size_t tblidx = 0;
1119 	size_t pgidx = area_va2idx(area, page_va) + area->fobj_pgoffs -
1120 		       ((area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT);
1121 
1122 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1123 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != pgidx)
1124 			continue;
1125 
1126 		/*
1127 		 * Locked pages may not be shared, these two asserts checks
1128 		 * that there's only a signed area recorded with this pmem.
1129 		 */
1130 		assert(TAILQ_FIRST(&pmem->fobj->areas) == area);
1131 		assert(TAILQ_LAST(&pmem->fobj->areas,
1132 				  tee_pager_area_head) == area);
1133 
1134 		tblidx = pmem_get_area_tblidx(pmem, area);
1135 		area_set_entry(area, tblidx, 0, 0);
1136 		pgt_dec_used_entries(area->pgt);
1137 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1138 		pmem->fobj = NULL;
1139 		pmem->fobj_pgidx = INVALID_PGIDX;
1140 		tee_pager_npages++;
1141 		set_npages();
1142 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1143 		incr_zi_released();
1144 		return true;
1145 	}
1146 
1147 	return false;
1148 }
1149 
1150 /* Finds the oldest page and unmaps it from all tables */
1151 static struct tee_pager_pmem *tee_pager_get_page(enum tee_pager_area_type at)
1152 {
1153 	struct tee_pager_pmem *pmem;
1154 
1155 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1156 	if (!pmem) {
1157 		EMSG("No pmem entries");
1158 		return NULL;
1159 	}
1160 
1161 	if (pmem->fobj) {
1162 		pmem_unmap(pmem, NULL);
1163 		tee_pager_save_page(pmem);
1164 	}
1165 
1166 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1167 	pmem->fobj = NULL;
1168 	pmem->fobj_pgidx = INVALID_PGIDX;
1169 	pmem->flags = 0;
1170 	if (at == PAGER_AREA_TYPE_LOCK) {
1171 		/* Move page to lock list */
1172 		if (tee_pager_npages <= 0)
1173 			panic("running out of page");
1174 		tee_pager_npages--;
1175 		set_npages();
1176 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1177 	} else {
1178 		/* move page to back */
1179 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1180 	}
1181 
1182 	return pmem;
1183 }
1184 
1185 static bool pager_update_permissions(struct tee_pager_area *area,
1186 			struct abort_info *ai, bool *handled)
1187 {
1188 	unsigned int pgidx = area_va2idx(area, ai->va);
1189 	struct tee_pager_pmem *pmem = NULL;
1190 	uint32_t attr = 0;
1191 	paddr_t pa = 0;
1192 
1193 	*handled = false;
1194 
1195 	area_get_entry(area, pgidx, &pa, &attr);
1196 
1197 	/* Not mapped */
1198 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1199 		return false;
1200 
1201 	/* Not readable, should not happen */
1202 	if (abort_is_user_exception(ai)) {
1203 		if (!(attr & TEE_MATTR_UR))
1204 			return true;
1205 	} else {
1206 		if (!(attr & TEE_MATTR_PR)) {
1207 			abort_print_error(ai);
1208 			panic();
1209 		}
1210 	}
1211 
1212 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1213 	case CORE_MMU_FAULT_TRANSLATION:
1214 	case CORE_MMU_FAULT_READ_PERMISSION:
1215 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1216 			/* Check attempting to execute from an NOX page */
1217 			if (abort_is_user_exception(ai)) {
1218 				if (!(attr & TEE_MATTR_UX))
1219 					return true;
1220 			} else {
1221 				if (!(attr & TEE_MATTR_PX)) {
1222 					abort_print_error(ai);
1223 					panic();
1224 				}
1225 			}
1226 		}
1227 		/* Since the page is mapped now it's OK */
1228 		break;
1229 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1230 		/* Check attempting to write to an RO page */
1231 		pmem = pmem_find(area, pgidx);
1232 		if (!pmem)
1233 			panic();
1234 		if (abort_is_user_exception(ai)) {
1235 			if (!(area->flags & TEE_MATTR_UW))
1236 				return true;
1237 			if (!(attr & TEE_MATTR_UW)) {
1238 				FMSG("Dirty %p",
1239 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1240 				pmem->flags |= PMEM_FLAG_DIRTY;
1241 				area_set_entry(area, pgidx, pa,
1242 					       get_area_mattr(area->flags));
1243 				area_tlbi_entry(area, pgidx);
1244 			}
1245 
1246 		} else {
1247 			if (!(area->flags & TEE_MATTR_PW)) {
1248 				abort_print_error(ai);
1249 				panic();
1250 			}
1251 			if (!(attr & TEE_MATTR_PW)) {
1252 				FMSG("Dirty %p",
1253 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1254 				pmem->flags |= PMEM_FLAG_DIRTY;
1255 				area_set_entry(area, pgidx, pa,
1256 					       get_area_mattr(area->flags));
1257 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1258 			}
1259 		}
1260 		/* Since permissions has been updated now it's OK */
1261 		break;
1262 	default:
1263 		/* Some fault we can't deal with */
1264 		if (abort_is_user_exception(ai))
1265 			return true;
1266 		abort_print_error(ai);
1267 		panic();
1268 	}
1269 	*handled = true;
1270 	return true;
1271 }
1272 
1273 #ifdef CFG_TEE_CORE_DEBUG
1274 static void stat_handle_fault(void)
1275 {
1276 	static size_t num_faults;
1277 	static size_t min_npages = SIZE_MAX;
1278 	static size_t total_min_npages = SIZE_MAX;
1279 
1280 	num_faults++;
1281 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1282 		DMSG("nfaults %zu npages %zu (min %zu)",
1283 		     num_faults, tee_pager_npages, min_npages);
1284 		min_npages = tee_pager_npages; /* reset */
1285 	}
1286 	if (tee_pager_npages < min_npages)
1287 		min_npages = tee_pager_npages;
1288 	if (tee_pager_npages < total_min_npages)
1289 		total_min_npages = tee_pager_npages;
1290 }
1291 #else
1292 static void stat_handle_fault(void)
1293 {
1294 }
1295 #endif
1296 
1297 bool tee_pager_handle_fault(struct abort_info *ai)
1298 {
1299 	struct tee_pager_area *area;
1300 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1301 	uint32_t exceptions;
1302 	bool ret;
1303 	bool clean_user_cache = false;
1304 
1305 #ifdef TEE_PAGER_DEBUG_PRINT
1306 	if (!abort_is_user_exception(ai))
1307 		abort_print(ai);
1308 #endif
1309 
1310 	/*
1311 	 * We're updating pages that can affect several active CPUs at a
1312 	 * time below. We end up here because a thread tries to access some
1313 	 * memory that isn't available. We have to be careful when making
1314 	 * that memory available as other threads may succeed in accessing
1315 	 * that address the moment after we've made it available.
1316 	 *
1317 	 * That means that we can't just map the memory and populate the
1318 	 * page, instead we use the aliased mapping to populate the page
1319 	 * and once everything is ready we map it.
1320 	 */
1321 	exceptions = pager_lock(ai);
1322 
1323 	stat_handle_fault();
1324 
1325 	/* check if the access is valid */
1326 	if (abort_is_user_exception(ai)) {
1327 		area = find_uta_area(ai->va);
1328 		clean_user_cache = true;
1329 	} else {
1330 		area = find_area(&tee_pager_area_head, ai->va);
1331 		if (!area) {
1332 			area = find_uta_area(ai->va);
1333 			clean_user_cache = true;
1334 		}
1335 	}
1336 	if (!area || !area->pgt) {
1337 		ret = false;
1338 		goto out;
1339 	}
1340 
1341 	if (!tee_pager_unhide_page(area, area_va2idx(area, page_va))) {
1342 		struct tee_pager_pmem *pmem = NULL;
1343 		uint32_t attr = 0;
1344 		paddr_t pa = 0;
1345 		size_t tblidx = 0;
1346 
1347 		/*
1348 		 * The page wasn't hidden, but some other core may have
1349 		 * updated the table entry before we got here or we need
1350 		 * to make a read-only page read-write (dirty).
1351 		 */
1352 		if (pager_update_permissions(area, ai, &ret)) {
1353 			/*
1354 			 * Nothing more to do with the abort. The problem
1355 			 * could already have been dealt with from another
1356 			 * core or if ret is false the TA will be paniced.
1357 			 */
1358 			goto out;
1359 		}
1360 
1361 		pmem = tee_pager_get_page(area->type);
1362 		if (!pmem) {
1363 			abort_print(ai);
1364 			panic();
1365 		}
1366 
1367 		/* load page code & data */
1368 		tee_pager_load_page(area, page_va, pmem->va_alias);
1369 
1370 
1371 		pmem->fobj = area->fobj;
1372 		pmem->fobj_pgidx = area_va2idx(area, page_va) +
1373 				   area->fobj_pgoffs -
1374 				   ((area->base & CORE_MMU_PGDIR_MASK) >>
1375 					SMALL_PAGE_SHIFT);
1376 		tblidx = pmem_get_area_tblidx(pmem, area);
1377 		attr = get_area_mattr(area->flags);
1378 		/*
1379 		 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1380 		 * able to tell when they are updated and should be tagged
1381 		 * as dirty.
1382 		 */
1383 		if (area->type == PAGER_AREA_TYPE_RW)
1384 			attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1385 		pa = get_pmem_pa(pmem);
1386 
1387 		/*
1388 		 * We've updated the page using the aliased mapping and
1389 		 * some cache maintenence is now needed if it's an
1390 		 * executable page.
1391 		 *
1392 		 * Since the d-cache is a Physically-indexed,
1393 		 * physically-tagged (PIPT) cache we can clean either the
1394 		 * aliased address or the real virtual address. In this
1395 		 * case we choose the real virtual address.
1396 		 *
1397 		 * The i-cache can also be PIPT, but may be something else
1398 		 * too like VIPT. The current code requires the caches to
1399 		 * implement the IVIPT extension, that is:
1400 		 * "instruction cache maintenance is required only after
1401 		 * writing new data to a physical address that holds an
1402 		 * instruction."
1403 		 *
1404 		 * To portably invalidate the icache the page has to
1405 		 * be mapped at the final virtual address but not
1406 		 * executable.
1407 		 */
1408 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1409 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1410 					TEE_MATTR_PW | TEE_MATTR_UW;
1411 			void *va = (void *)page_va;
1412 
1413 			/* Set a temporary read-only mapping */
1414 			area_set_entry(area, tblidx, pa, attr & ~mask);
1415 			area_tlbi_entry(area, tblidx);
1416 
1417 			dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1418 			if (clean_user_cache)
1419 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1420 			else
1421 				icache_inv_range(va, SMALL_PAGE_SIZE);
1422 
1423 			/* Set the final mapping */
1424 			area_set_entry(area, tblidx, pa, attr);
1425 			area_tlbi_entry(area, tblidx);
1426 		} else {
1427 			area_set_entry(area, tblidx, pa, attr);
1428 			/*
1429 			 * No need to flush TLB for this entry, it was
1430 			 * invalid. We should use a barrier though, to make
1431 			 * sure that the change is visible.
1432 			 */
1433 			dsb_ishst();
1434 		}
1435 		pgt_inc_used_entries(area->pgt);
1436 
1437 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1438 
1439 	}
1440 
1441 	tee_pager_hide_pages();
1442 	ret = true;
1443 out:
1444 	pager_unlock(exceptions);
1445 	return ret;
1446 }
1447 
1448 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1449 {
1450 	size_t n;
1451 
1452 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1453 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1454 
1455 	/* setup memory */
1456 	for (n = 0; n < npages; n++) {
1457 		struct core_mmu_table_info *ti;
1458 		struct tee_pager_pmem *pmem;
1459 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1460 		unsigned int pgidx;
1461 		paddr_t pa;
1462 		uint32_t attr;
1463 
1464 		ti = find_table_info(va);
1465 		pgidx = core_mmu_va2idx(ti, va);
1466 		/*
1467 		 * Note that we can only support adding pages in the
1468 		 * valid range of this table info, currently not a problem.
1469 		 */
1470 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1471 
1472 		/* Ignore unmapped pages/blocks */
1473 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1474 			continue;
1475 
1476 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1477 		if (!pmem)
1478 			panic("out of mem");
1479 
1480 		pmem->va_alias = pager_add_alias_page(pa);
1481 
1482 		if (unmap) {
1483 			pmem->fobj = NULL;
1484 			pmem->fobj_pgidx = INVALID_PGIDX;
1485 			core_mmu_set_entry(ti, pgidx, 0, 0);
1486 			pgt_dec_used_entries(find_core_pgt(va));
1487 		} else {
1488 			struct tee_pager_area *area = NULL;
1489 
1490 			/*
1491 			 * The page is still mapped, let's assign the area
1492 			 * and update the protection bits accordingly.
1493 			 */
1494 			area = find_area(&tee_pager_area_head, va);
1495 			assert(area && area->pgt == find_core_pgt(va));
1496 			pmem->fobj = area->fobj;
1497 			pmem->fobj_pgidx = pgidx + area->fobj_pgoffs -
1498 					   ((area->base &
1499 							CORE_MMU_PGDIR_MASK) >>
1500 						SMALL_PAGE_SHIFT);
1501 			assert(pgidx == pmem_get_area_tblidx(pmem, area));
1502 			assert(pa == get_pmem_pa(pmem));
1503 			area_set_entry(area, pgidx, pa,
1504 				       get_area_mattr(area->flags));
1505 		}
1506 
1507 		tee_pager_npages++;
1508 		incr_npages_all();
1509 		set_npages();
1510 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1511 	}
1512 
1513 	/*
1514 	 * As this is done at inits, invalidate all TLBs once instead of
1515 	 * targeting only the modified entries.
1516 	 */
1517 	tlbi_all();
1518 }
1519 
1520 #ifdef CFG_PAGED_USER_TA
1521 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1522 {
1523 	struct pgt *p = pgt;
1524 
1525 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1526 		p = SLIST_NEXT(p, link);
1527 	return p;
1528 }
1529 
1530 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1531 {
1532 	struct tee_pager_area *area = NULL;
1533 	struct pgt *pgt = NULL;
1534 
1535 	if (!uctx->areas)
1536 		return;
1537 
1538 	pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1539 	TAILQ_FOREACH(area, uctx->areas, link) {
1540 		if (!area->pgt)
1541 			area->pgt = find_pgt(pgt, area->base);
1542 		else
1543 			assert(area->pgt == find_pgt(pgt, area->base));
1544 		if (!area->pgt)
1545 			panic();
1546 	}
1547 }
1548 
1549 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1550 {
1551 	struct tee_pager_pmem *pmem = NULL;
1552 	struct tee_pager_area *area = NULL;
1553 	struct tee_pager_area_head *areas = NULL;
1554 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1555 
1556 	if (!pgt->num_used_entries)
1557 		goto out;
1558 
1559 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1560 		if (pmem->fobj)
1561 			pmem_unmap(pmem, pgt);
1562 	}
1563 	assert(!pgt->num_used_entries);
1564 
1565 out:
1566 	areas = to_user_ta_ctx(pgt->ctx)->uctx.areas;
1567 	if (areas) {
1568 		TAILQ_FOREACH(area, areas, link) {
1569 			if (area->pgt == pgt)
1570 				area->pgt = NULL;
1571 		}
1572 	}
1573 
1574 	pager_unlock(exceptions);
1575 }
1576 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1577 #endif /*CFG_PAGED_USER_TA*/
1578 
1579 void tee_pager_release_phys(void *addr, size_t size)
1580 {
1581 	bool unmaped = false;
1582 	vaddr_t va = (vaddr_t)addr;
1583 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1584 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1585 	struct tee_pager_area *area;
1586 	uint32_t exceptions;
1587 
1588 	if (end <= begin)
1589 		return;
1590 
1591 	exceptions = pager_lock_check_stack(128);
1592 
1593 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1594 		area = find_area(&tee_pager_area_head, va);
1595 		if (!area)
1596 			panic();
1597 		unmaped |= tee_pager_release_one_phys(area, va);
1598 	}
1599 
1600 	if (unmaped)
1601 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1602 
1603 	pager_unlock(exceptions);
1604 }
1605 KEEP_PAGER(tee_pager_release_phys);
1606 
1607 void *tee_pager_alloc(size_t size)
1608 {
1609 	tee_mm_entry_t *mm = NULL;
1610 	uint8_t *smem = NULL;
1611 	size_t num_pages = 0;
1612 	struct fobj *fobj = NULL;
1613 
1614 	if (!size)
1615 		return NULL;
1616 
1617 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1618 	if (!mm)
1619 		return NULL;
1620 
1621 	smem = (uint8_t *)tee_mm_get_smem(mm);
1622 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1623 	fobj = fobj_locked_paged_alloc(num_pages);
1624 	if (!fobj) {
1625 		tee_mm_free(mm);
1626 		return NULL;
1627 	}
1628 
1629 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1630 	fobj_put(fobj);
1631 
1632 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1633 
1634 	return smem;
1635 }
1636