xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision f7492391a90d5fa10df014c1cf54a4308a6e9a2a)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/panic.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/tee_ta_manager.h>
18 #include <kernel/thread.h>
19 #include <kernel/tlb_helpers.h>
20 #include <mm/core_memprot.h>
21 #include <mm/fobj.h>
22 #include <mm/tee_mm.h>
23 #include <mm/tee_pager.h>
24 #include <stdlib.h>
25 #include <sys/queue.h>
26 #include <tee_api_defines.h>
27 #include <trace.h>
28 #include <types_ext.h>
29 #include <utee_defines.h>
30 #include <util.h>
31 
32 
33 static struct tee_pager_area_head tee_pager_area_head =
34 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
35 
36 #define INVALID_PGIDX		UINT_MAX
37 #define PMEM_FLAG_DIRTY		BIT(0)
38 #define PMEM_FLAG_HIDDEN	BIT(1)
39 
40 /*
41  * struct tee_pager_pmem - Represents a physical page used for paging.
42  *
43  * @flags	flags defined by PMEM_FLAG_* above
44  * @fobj_pgidx	index of the page in the @fobj
45  * @fobj	File object of which a page is made visible.
46  * @va_alias	Virtual address where the physical page always is aliased.
47  *		Used during remapping of the page when the content need to
48  *		be updated before it's available at the new location.
49  */
50 struct tee_pager_pmem {
51 	unsigned int flags;
52 	unsigned int fobj_pgidx;
53 	struct fobj *fobj;
54 	void *va_alias;
55 	TAILQ_ENTRY(tee_pager_pmem) link;
56 };
57 
58 /* The list of physical pages. The first page in the list is the oldest */
59 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
60 
61 static struct tee_pager_pmem_head tee_pager_pmem_head =
62 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
63 
64 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
65 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
66 
67 /* number of pages hidden */
68 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
69 
70 /* Number of registered physical pages, used hiding pages. */
71 static size_t tee_pager_npages;
72 
73 #ifdef CFG_WITH_STATS
74 static struct tee_pager_stats pager_stats;
75 
76 static inline void incr_ro_hits(void)
77 {
78 	pager_stats.ro_hits++;
79 }
80 
81 static inline void incr_rw_hits(void)
82 {
83 	pager_stats.rw_hits++;
84 }
85 
86 static inline void incr_hidden_hits(void)
87 {
88 	pager_stats.hidden_hits++;
89 }
90 
91 static inline void incr_zi_released(void)
92 {
93 	pager_stats.zi_released++;
94 }
95 
96 static inline void incr_npages_all(void)
97 {
98 	pager_stats.npages_all++;
99 }
100 
101 static inline void set_npages(void)
102 {
103 	pager_stats.npages = tee_pager_npages;
104 }
105 
106 void tee_pager_get_stats(struct tee_pager_stats *stats)
107 {
108 	*stats = pager_stats;
109 
110 	pager_stats.hidden_hits = 0;
111 	pager_stats.ro_hits = 0;
112 	pager_stats.rw_hits = 0;
113 	pager_stats.zi_released = 0;
114 }
115 
116 #else /* CFG_WITH_STATS */
117 static inline void incr_ro_hits(void) { }
118 static inline void incr_rw_hits(void) { }
119 static inline void incr_hidden_hits(void) { }
120 static inline void incr_zi_released(void) { }
121 static inline void incr_npages_all(void) { }
122 static inline void set_npages(void) { }
123 
124 void tee_pager_get_stats(struct tee_pager_stats *stats)
125 {
126 	memset(stats, 0, sizeof(struct tee_pager_stats));
127 }
128 #endif /* CFG_WITH_STATS */
129 
130 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
131 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
132 #define TBL_SHIFT	SMALL_PAGE_SHIFT
133 
134 #define EFFECTIVE_VA_SIZE \
135 	(ROUNDUP(TEE_RAM_VA_START + TEE_RAM_VA_SIZE, \
136 		 CORE_MMU_PGDIR_SIZE) - \
137 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
138 
139 static struct pager_table {
140 	struct pgt pgt;
141 	struct core_mmu_table_info tbl_info;
142 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
143 
144 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
145 
146 /* Defines the range of the alias area */
147 static tee_mm_entry_t *pager_alias_area;
148 /*
149  * Physical pages are added in a stack like fashion to the alias area,
150  * @pager_alias_next_free gives the address of next free entry if
151  * @pager_alias_next_free is != 0
152  */
153 static uintptr_t pager_alias_next_free;
154 
155 #ifdef CFG_TEE_CORE_DEBUG
156 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
157 
158 static uint32_t pager_lock_dldetect(const char *func, const int line,
159 				    struct abort_info *ai)
160 {
161 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
162 	unsigned int retries = 0;
163 	unsigned int reminder = 0;
164 
165 	while (!cpu_spin_trylock(&pager_spinlock)) {
166 		retries++;
167 		if (!retries) {
168 			/* wrapped, time to report */
169 			trace_printf(func, line, TRACE_ERROR, true,
170 				     "possible spinlock deadlock reminder %u",
171 				     reminder);
172 			if (reminder < UINT_MAX)
173 				reminder++;
174 			if (ai)
175 				abort_print(ai);
176 		}
177 	}
178 
179 	return exceptions;
180 }
181 #else
182 static uint32_t pager_lock(struct abort_info __unused *ai)
183 {
184 	return cpu_spin_lock_xsave(&pager_spinlock);
185 }
186 #endif
187 
188 static uint32_t pager_lock_check_stack(size_t stack_size)
189 {
190 	if (stack_size) {
191 		int8_t buf[stack_size];
192 		size_t n;
193 
194 		/*
195 		 * Make sure to touch all pages of the stack that we expect
196 		 * to use with this lock held. We need to take eventual
197 		 * page faults before the lock is taken or we'll deadlock
198 		 * the pager. The pages that are populated in this way will
199 		 * eventually be released at certain save transitions of
200 		 * the thread.
201 		 */
202 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
203 			io_write8((vaddr_t)buf + n, 1);
204 		io_write8((vaddr_t)buf + stack_size - 1, 1);
205 	}
206 
207 	return pager_lock(NULL);
208 }
209 
210 static void pager_unlock(uint32_t exceptions)
211 {
212 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
213 }
214 
215 void *tee_pager_phys_to_virt(paddr_t pa)
216 {
217 	struct core_mmu_table_info ti;
218 	unsigned idx;
219 	uint32_t a;
220 	paddr_t p;
221 	vaddr_t v;
222 	size_t n;
223 
224 	/*
225 	 * Most addresses are mapped lineary, try that first if possible.
226 	 */
227 	if (!tee_pager_get_table_info(pa, &ti))
228 		return NULL; /* impossible pa */
229 	idx = core_mmu_va2idx(&ti, pa);
230 	core_mmu_get_entry(&ti, idx, &p, &a);
231 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
232 		return (void *)core_mmu_idx2va(&ti, idx);
233 
234 	n = 0;
235 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
236 	while (true) {
237 		while (idx < TBL_NUM_ENTRIES) {
238 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
239 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
240 				return NULL;
241 
242 			core_mmu_get_entry(&pager_tables[n].tbl_info,
243 					   idx, &p, &a);
244 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
245 				return (void *)v;
246 			idx++;
247 		}
248 
249 		n++;
250 		if (n >= ARRAY_SIZE(pager_tables))
251 			return NULL;
252 		idx = 0;
253 	}
254 
255 	return NULL;
256 }
257 
258 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
259 {
260 	return pmem->flags & PMEM_FLAG_HIDDEN;
261 }
262 
263 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
264 {
265 	return pmem->flags & PMEM_FLAG_DIRTY;
266 }
267 
268 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
269 				    struct tee_pager_area *area)
270 {
271 	if (pmem->fobj != area->fobj)
272 		return false;
273 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
274 		return false;
275 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
276 	    (area->size >> SMALL_PAGE_SHIFT))
277 		return false;
278 
279 	return true;
280 }
281 
282 static size_t pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
283 				   struct tee_pager_area *area)
284 {
285 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
286 
287 	return pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
288 }
289 
290 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
291 {
292 	size_t n;
293 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
294 
295 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
296 	    CORE_MMU_PGDIR_SHIFT;
297 	if (n >= ARRAY_SIZE(pager_tables))
298 		return NULL;
299 
300 	assert(va >= pager_tables[n].tbl_info.va_base &&
301 	       va <= (pager_tables[n].tbl_info.va_base | mask));
302 
303 	return pager_tables + n;
304 }
305 
306 static struct pager_table *find_pager_table(vaddr_t va)
307 {
308 	struct pager_table *pt = find_pager_table_may_fail(va);
309 
310 	assert(pt);
311 	return pt;
312 }
313 
314 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
315 {
316 	struct pager_table *pt = find_pager_table_may_fail(va);
317 
318 	if (!pt)
319 		return false;
320 
321 	*ti = pt->tbl_info;
322 	return true;
323 }
324 
325 static struct core_mmu_table_info *find_table_info(vaddr_t va)
326 {
327 	return &find_pager_table(va)->tbl_info;
328 }
329 
330 static struct pgt *find_core_pgt(vaddr_t va)
331 {
332 	return &find_pager_table(va)->pgt;
333 }
334 
335 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
336 {
337 	struct pager_table *pt;
338 	unsigned idx;
339 	vaddr_t smem = tee_mm_get_smem(mm);
340 	size_t nbytes = tee_mm_get_bytes(mm);
341 	vaddr_t v;
342 	uint32_t a = 0;
343 
344 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
345 
346 	assert(!pager_alias_area);
347 	pager_alias_area = mm;
348 	pager_alias_next_free = smem;
349 
350 	/* Clear all mapping in the alias area */
351 	pt = find_pager_table(smem);
352 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
353 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
354 		while (idx < TBL_NUM_ENTRIES) {
355 			v = core_mmu_idx2va(&pt->tbl_info, idx);
356 			if (v >= (smem + nbytes))
357 				goto out;
358 
359 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
360 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
361 			if (a & TEE_MATTR_VALID_BLOCK)
362 				pgt_dec_used_entries(&pt->pgt);
363 			idx++;
364 		}
365 
366 		pt++;
367 		idx = 0;
368 	}
369 
370 out:
371 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
372 }
373 
374 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
375 {
376 	size_t n;
377 	uint32_t a = 0;
378 	size_t usage = 0;
379 
380 	for (n = 0; n < ti->num_entries; n++) {
381 		core_mmu_get_entry(ti, n, NULL, &a);
382 		if (a & TEE_MATTR_VALID_BLOCK)
383 			usage++;
384 	}
385 	return usage;
386 }
387 
388 static void area_get_entry(struct tee_pager_area *area, size_t idx,
389 			   paddr_t *pa, uint32_t *attr)
390 {
391 	assert(area->pgt);
392 	assert(idx < TBL_NUM_ENTRIES);
393 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
394 }
395 
396 static void area_set_entry(struct tee_pager_area *area, size_t idx,
397 			   paddr_t pa, uint32_t attr)
398 {
399 	assert(area->pgt);
400 	assert(idx < TBL_NUM_ENTRIES);
401 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
402 }
403 
404 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
405 {
406 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
407 }
408 
409 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
410 {
411 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
412 }
413 
414 static void area_tlbi_entry(struct tee_pager_area *area, size_t idx)
415 {
416 	vaddr_t va = area_idx2va(area, idx);
417 
418 #if defined(CFG_PAGED_USER_TA)
419 	assert(area->pgt);
420 	if (area->pgt->ctx) {
421 		uint32_t asid = to_user_ta_ctx(area->pgt->ctx)->vm_info->asid;
422 
423 		tlbi_mva_asid(va, asid);
424 		return;
425 	}
426 #endif
427 	tlbi_mva_allasid(va);
428 }
429 
430 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
431 {
432 	struct tee_pager_area *area = NULL;
433 	size_t tblidx = 0;
434 	uint32_t a = 0;
435 
436 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
437 		/*
438 		 * If only_this_pgt points to a pgt then the pgt of this
439 		 * area has to match or we'll skip over it.
440 		 */
441 		if (only_this_pgt && area->pgt != only_this_pgt)
442 			continue;
443 		if (!area->pgt || !pmem_is_covered_by_area(pmem, area))
444 			continue;
445 		tblidx = pmem_get_area_tblidx(pmem, area);
446 		area_get_entry(area, tblidx, NULL, &a);
447 		if (a & TEE_MATTR_VALID_BLOCK) {
448 			area_set_entry(area, tblidx, 0, 0);
449 			pgt_dec_used_entries(area->pgt);
450 			area_tlbi_entry(area, tblidx);
451 		}
452 	}
453 }
454 
455 void tee_pager_early_init(void)
456 {
457 	size_t n;
458 
459 	/*
460 	 * Note that this depends on add_pager_vaspace() adding vaspace
461 	 * after end of memory.
462 	 */
463 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
464 		if (!core_mmu_find_table(NULL, TEE_RAM_VA_START +
465 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
466 					 &pager_tables[n].tbl_info))
467 			panic("can't find mmu tables");
468 
469 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
470 			panic("Unsupported page size in translation table");
471 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
472 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
473 
474 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
475 		pgt_set_used_entries(&pager_tables[n].pgt,
476 				tbl_usage_count(&pager_tables[n].tbl_info));
477 	}
478 }
479 
480 static void *pager_add_alias_page(paddr_t pa)
481 {
482 	unsigned idx;
483 	struct core_mmu_table_info *ti;
484 	/* Alias pages mapped without write permission: runtime will care */
485 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
486 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
487 			TEE_MATTR_SECURE | TEE_MATTR_PR;
488 
489 	DMSG("0x%" PRIxPA, pa);
490 
491 	ti = find_table_info(pager_alias_next_free);
492 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
493 	core_mmu_set_entry(ti, idx, pa, attr);
494 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
495 	pager_alias_next_free += SMALL_PAGE_SIZE;
496 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
497 				      tee_mm_get_bytes(pager_alias_area)))
498 		pager_alias_next_free = 0;
499 	return (void *)core_mmu_idx2va(ti, idx);
500 }
501 
502 static void area_insert_tail(struct tee_pager_area *area)
503 {
504 	uint32_t exceptions = pager_lock_check_stack(8);
505 
506 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
507 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
508 
509 	pager_unlock(exceptions);
510 }
511 KEEP_PAGER(area_insert_tail);
512 
513 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
514 			     struct fobj *fobj)
515 {
516 	struct tee_pager_area *area = NULL;
517 	uint32_t flags = 0;
518 	size_t fobj_pgoffs = 0;
519 	vaddr_t b = base;
520 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
521 	size_t s2 = 0;
522 
523 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
524 
525 	if (base & SMALL_PAGE_MASK || !s) {
526 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
527 		panic();
528 	}
529 
530 	switch (type) {
531 	case PAGER_AREA_TYPE_RO:
532 		flags = TEE_MATTR_PRX;
533 		break;
534 	case PAGER_AREA_TYPE_RW:
535 	case PAGER_AREA_TYPE_LOCK:
536 		flags = TEE_MATTR_PRW;
537 		break;
538 	default:
539 		panic();
540 	}
541 
542 	if (!fobj)
543 		panic();
544 
545 	while (s) {
546 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
547 		area = calloc(1, sizeof(*area));
548 		if (!area)
549 			panic("alloc_area");
550 
551 		area->fobj = fobj_get(fobj);
552 		area->fobj_pgoffs = fobj_pgoffs;
553 		area->type = type;
554 		area->pgt = find_core_pgt(b);
555 		area->base = b;
556 		area->size = s2;
557 		area->flags = flags;
558 		area_insert_tail(area);
559 
560 		b += s2;
561 		s -= s2;
562 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
563 	}
564 }
565 
566 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
567 					vaddr_t va)
568 {
569 	struct tee_pager_area *area;
570 
571 	if (!areas)
572 		return NULL;
573 
574 	TAILQ_FOREACH(area, areas, link) {
575 		if (core_is_buffer_inside(va, 1, area->base, area->size))
576 			return area;
577 	}
578 	return NULL;
579 }
580 
581 #ifdef CFG_PAGED_USER_TA
582 static struct tee_pager_area *find_uta_area(vaddr_t va)
583 {
584 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
585 
586 	if (!is_user_ta_ctx(ctx))
587 		return NULL;
588 	return find_area(to_user_ta_ctx(ctx)->areas, va);
589 }
590 #else
591 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
592 {
593 	return NULL;
594 }
595 #endif /*CFG_PAGED_USER_TA*/
596 
597 
598 static uint32_t get_area_mattr(uint32_t area_flags)
599 {
600 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
601 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
602 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
603 
604 	return attr;
605 }
606 
607 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
608 {
609 	struct core_mmu_table_info *ti;
610 	paddr_t pa;
611 	unsigned idx;
612 
613 	ti = find_table_info((vaddr_t)pmem->va_alias);
614 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
615 	core_mmu_get_entry(ti, idx, &pa, NULL);
616 	return pa;
617 }
618 
619 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
620 			void *va_alias)
621 {
622 	size_t fobj_pgoffs = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
623 			     area->fobj_pgoffs;
624 	struct core_mmu_table_info *ti;
625 	uint32_t attr_alias;
626 	paddr_t pa_alias;
627 	unsigned int idx_alias;
628 
629 	/* Insure we are allowed to write to aliased virtual page */
630 	ti = find_table_info((vaddr_t)va_alias);
631 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
632 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
633 	if (!(attr_alias & TEE_MATTR_PW)) {
634 		attr_alias |= TEE_MATTR_PW;
635 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
636 		tlbi_mva_allasid((vaddr_t)va_alias);
637 	}
638 
639 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
640 	if (fobj_load_page(area->fobj, fobj_pgoffs, va_alias)) {
641 		EMSG("PH 0x%" PRIxVA " failed", page_va);
642 		panic();
643 	}
644 	switch (area->type) {
645 	case PAGER_AREA_TYPE_RO:
646 		incr_ro_hits();
647 		/* Forbid write to aliases for read-only (maybe exec) pages */
648 		attr_alias &= ~TEE_MATTR_PW;
649 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
650 		tlbi_mva_allasid((vaddr_t)va_alias);
651 		break;
652 	case PAGER_AREA_TYPE_RW:
653 		incr_rw_hits();
654 		break;
655 	case PAGER_AREA_TYPE_LOCK:
656 		break;
657 	default:
658 		panic();
659 	}
660 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
661 }
662 
663 static void tee_pager_save_page(struct tee_pager_pmem *pmem)
664 {
665 	if (pmem_is_dirty(pmem)) {
666 		asan_tag_access(pmem->va_alias,
667 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
668 		if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
669 				   pmem->va_alias))
670 			panic("fobj_save_page");
671 		asan_tag_no_access(pmem->va_alias,
672 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
673 	}
674 }
675 
676 #ifdef CFG_PAGED_USER_TA
677 static void unlink_area(struct tee_pager_area_head *area_head,
678 			struct tee_pager_area *area)
679 {
680 	uint32_t exceptions = pager_lock_check_stack(64);
681 
682 	TAILQ_REMOVE(area_head, area, link);
683 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
684 
685 	pager_unlock(exceptions);
686 }
687 KEEP_PAGER(unlink_area);
688 
689 static void free_area(struct tee_pager_area *area)
690 {
691 	fobj_put(area->fobj);
692 	free(area);
693 }
694 
695 static TEE_Result pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
696 				     struct fobj *fobj, uint32_t prot)
697 {
698 	struct tee_pager_area *area;
699 	vaddr_t b = base;
700 	size_t fobj_pgoffs = 0;
701 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
702 
703 	if (!utc->areas) {
704 		utc->areas = malloc(sizeof(*utc->areas));
705 		if (!utc->areas)
706 			return TEE_ERROR_OUT_OF_MEMORY;
707 		TAILQ_INIT(utc->areas);
708 	}
709 
710 	while (s) {
711 		size_t s2;
712 
713 		if (find_area(utc->areas, b))
714 			return TEE_ERROR_BAD_PARAMETERS;
715 
716 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
717 		area = calloc(1, sizeof(*area));
718 		if (!area)
719 			return TEE_ERROR_OUT_OF_MEMORY;
720 
721 		/* Table info will be set when the context is activated. */
722 		area->fobj = fobj_get(fobj);
723 		area->fobj_pgoffs = fobj_pgoffs;
724 		area->type = PAGER_AREA_TYPE_RW;
725 		area->base = b;
726 		area->size = s2;
727 		area->flags = prot;
728 
729 		TAILQ_INSERT_TAIL(utc->areas, area, link);
730 		TAILQ_INSERT_TAIL(&fobj->areas, area, fobj_link);
731 		b += s2;
732 		s -= s2;
733 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
734 	}
735 
736 	return TEE_SUCCESS;
737 }
738 
739 TEE_Result tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
740 			    struct fobj *fobj, uint32_t prot)
741 {
742 	TEE_Result res = TEE_SUCCESS;
743 	struct thread_specific_data *tsd = thread_get_tsd();
744 	struct tee_pager_area *area = NULL;
745 	struct core_mmu_table_info dir_info = { NULL };
746 
747 	if (&utc->ctx != tsd->ctx) {
748 		/*
749 		 * Changes are to an utc that isn't active. Just add the
750 		 * areas page tables will be dealt with later.
751 		 */
752 		return pager_add_uta_area(utc, base, fobj, prot);
753 	}
754 
755 	/*
756 	 * Assign page tables before adding areas to be able to tell which
757 	 * are newly added and should be removed in case of failure.
758 	 */
759 	tee_pager_assign_uta_tables(utc);
760 	res = pager_add_uta_area(utc, base, fobj, prot);
761 	if (res) {
762 		struct tee_pager_area *next_a;
763 
764 		/* Remove all added areas */
765 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
766 			if (!area->pgt) {
767 				unlink_area(utc->areas, area);
768 				free_area(area);
769 			}
770 		}
771 		return res;
772 	}
773 
774 	/*
775 	 * Assign page tables to the new areas and make sure that the page
776 	 * tables are registered in the upper table.
777 	 */
778 	tee_pager_assign_uta_tables(utc);
779 	core_mmu_get_user_pgdir(&dir_info);
780 	TAILQ_FOREACH(area, utc->areas, link) {
781 		paddr_t pa;
782 		size_t idx;
783 		uint32_t attr;
784 
785 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
786 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
787 
788 		/*
789 		 * Check if the page table already is used, if it is, it's
790 		 * already registered.
791 		 */
792 		if (area->pgt->num_used_entries) {
793 			assert(attr & TEE_MATTR_TABLE);
794 			assert(pa == virt_to_phys(area->pgt->tbl));
795 			continue;
796 		}
797 
798 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
799 		pa = virt_to_phys(area->pgt->tbl);
800 		assert(pa);
801 		/*
802 		 * Note that the update of the table entry is guaranteed to
803 		 * be atomic.
804 		 */
805 		core_mmu_set_entry(&dir_info, idx, pa, attr);
806 	}
807 
808 	return TEE_SUCCESS;
809 }
810 
811 static void rem_area(struct tee_pager_area_head *area_head,
812 		     struct tee_pager_area *area)
813 {
814 	struct tee_pager_pmem *pmem;
815 	size_t last_pgoffs = area->fobj_pgoffs +
816 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
817 	uint32_t exceptions;
818 	size_t idx = 0;
819 	uint32_t a = 0;
820 
821 	exceptions = pager_lock_check_stack(64);
822 
823 	TAILQ_REMOVE(area_head, area, link);
824 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
825 
826 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
827 		if (pmem->fobj != area->fobj ||
828 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
829 		    pmem->fobj_pgidx > last_pgoffs)
830 			continue;
831 
832 		idx = pmem_get_area_tblidx(pmem, area);
833 		area_get_entry(area, idx, NULL, &a);
834 		if (!(a & TEE_MATTR_VALID_BLOCK))
835 			continue;
836 
837 		area_set_entry(area, idx, 0, 0);
838 		area_tlbi_entry(area, idx);
839 		pgt_dec_used_entries(area->pgt);
840 	}
841 
842 	pager_unlock(exceptions);
843 
844 	free_area(area);
845 }
846 KEEP_PAGER(rem_area);
847 
848 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
849 			      size_t size)
850 {
851 	struct tee_pager_area *area;
852 	struct tee_pager_area *next_a;
853 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
854 
855 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
856 		if (core_is_buffer_inside(area->base, area->size, base, s))
857 			rem_area(utc->areas, area);
858 	}
859 	tlbi_asid(utc->vm_info->asid);
860 }
861 
862 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
863 {
864 	struct tee_pager_area *area;
865 
866 	if (!utc->areas)
867 		return;
868 
869 	while (true) {
870 		area = TAILQ_FIRST(utc->areas);
871 		if (!area)
872 			break;
873 		unlink_area(utc->areas, area);
874 		free_area(area);
875 	}
876 
877 	free(utc->areas);
878 }
879 
880 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
881 {
882 	struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas);
883 	void *ctx = a->pgt->ctx;
884 
885 	do {
886 		a = TAILQ_NEXT(a, fobj_link);
887 		if (!a)
888 			return true;
889 	} while (a->pgt->ctx == ctx);
890 
891 	return false;
892 }
893 
894 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
895 				 size_t size, uint32_t flags)
896 {
897 	bool ret = false;
898 	vaddr_t b = base;
899 	size_t s = size;
900 	size_t s2 = 0;
901 	struct tee_pager_area *area = find_area(utc->areas, b);
902 	uint32_t exceptions = 0;
903 	struct tee_pager_pmem *pmem = NULL;
904 	uint32_t a = 0;
905 	uint32_t f = 0;
906 	uint32_t mattr = 0;
907 	uint32_t f2 = 0;
908 	size_t tblidx = 0;
909 
910 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
911 	if (f & TEE_MATTR_UW)
912 		f |= TEE_MATTR_PW;
913 	mattr = get_area_mattr(f);
914 
915 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
916 
917 	while (s) {
918 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
919 		if (!area || area->base != b || area->size != s2) {
920 			ret = false;
921 			goto out;
922 		}
923 		b += s2;
924 		s -= s2;
925 
926 		if (area->flags == f)
927 			goto next_area;
928 
929 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
930 			if (!pmem_is_covered_by_area(pmem, area))
931 				continue;
932 
933 			tblidx = pmem_get_area_tblidx(pmem, area);
934 			area_get_entry(area, tblidx, NULL, &a);
935 			if (a == f)
936 				continue;
937 			area_set_entry(area, tblidx, 0, 0);
938 			area_tlbi_entry(area, tblidx);
939 
940 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
941 			if (pmem_is_dirty(pmem))
942 				f2 = mattr;
943 			else
944 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
945 			area_set_entry(area, tblidx, get_pmem_pa(pmem), f2);
946 			if (!(a & TEE_MATTR_VALID_BLOCK))
947 				pgt_inc_used_entries(area->pgt);
948 			/*
949 			 * Make sure the table update is visible before
950 			 * continuing.
951 			 */
952 			dsb_ishst();
953 
954 			/*
955 			 * Here's a problem if this page already is shared.
956 			 * We need do icache invalidate for each context
957 			 * in which it is shared. In practice this will
958 			 * never happen.
959 			 */
960 			if (flags & TEE_MATTR_UX) {
961 				void *va = (void *)area_idx2va(area, tblidx);
962 
963 				/* Assert that the pmem isn't shared. */
964 				assert(same_context(pmem));
965 
966 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
967 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
968 			}
969 		}
970 
971 		area->flags = f;
972 next_area:
973 		area = TAILQ_NEXT(area, link);
974 	}
975 
976 	ret = true;
977 out:
978 	pager_unlock(exceptions);
979 	return ret;
980 }
981 KEEP_PAGER(tee_pager_set_uta_area_attr);
982 #endif /*CFG_PAGED_USER_TA*/
983 
984 void tee_pager_invalidate_fobj(struct fobj *fobj)
985 {
986 	struct tee_pager_pmem *pmem;
987 	uint32_t exceptions;
988 
989 	exceptions = pager_lock_check_stack(64);
990 
991 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
992 		if (pmem->fobj == fobj) {
993 			pmem->fobj = NULL;
994 			pmem->fobj_pgidx = INVALID_PGIDX;
995 		}
996 	}
997 
998 	pager_unlock(exceptions);
999 }
1000 KEEP_PAGER(tee_pager_invalidate_fobj);
1001 
1002 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area,
1003 					unsigned int tblidx)
1004 {
1005 	struct tee_pager_pmem *pmem = NULL;
1006 
1007 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1008 		if (pmem->fobj == area->fobj &&
1009 		    pmem_get_area_tblidx(pmem, area) == tblidx)
1010 			return pmem;
1011 
1012 	return NULL;
1013 }
1014 
1015 static bool tee_pager_unhide_page(struct tee_pager_area *area,
1016 				  unsigned int tblidx)
1017 {
1018 	struct tee_pager_pmem *pmem = pmem_find(area, tblidx);
1019 	uint32_t a = get_area_mattr(area->flags);
1020 	uint32_t attr = 0;
1021 	paddr_t pa = 0;
1022 
1023 	if (!pmem)
1024 		return false;
1025 
1026 	area_get_entry(area, tblidx, NULL, &attr);
1027 	if (attr & TEE_MATTR_VALID_BLOCK)
1028 		return false;
1029 
1030 	/*
1031 	 * The page is hidden, or not not mapped yet. Unhide the page and
1032 	 * move it to the tail.
1033 	 *
1034 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1035 	 * for this address, so no TLB invalidation is required after setting
1036 	 * the new entry. A DSB is needed though, to make the write visible.
1037 	 *
1038 	 * For user executable pages it's more complicated. Those pages can
1039 	 * be shared between multiple TA mappings and thus populated by
1040 	 * another TA. The reference manual states that:
1041 	 *
1042 	 * "instruction cache maintenance is required only after writing
1043 	 * new data to a physical address that holds an instruction."
1044 	 *
1045 	 * So for hidden pages we would not need to invalidate i-cache, but
1046 	 * for newly populated pages we do. Since we don't know which we
1047 	 * have to assume the worst and always invalidate the i-cache. We
1048 	 * don't need to clean the d-cache though, since that has already
1049 	 * been done earlier.
1050 	 *
1051 	 * Additional bookkeeping to tell if the i-cache invalidation is
1052 	 * needed or not is left as a future optimization.
1053 	 */
1054 
1055 	/* If it's not a dirty block, then it should be read only. */
1056 	if (!pmem_is_dirty(pmem))
1057 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1058 
1059 	pa = get_pmem_pa(pmem);
1060 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1061 	if (area->flags & TEE_MATTR_UX) {
1062 		void *va = (void *)area_idx2va(area, tblidx);
1063 
1064 		/* Set a temporary read-only mapping */
1065 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1066 		area_set_entry(area, tblidx, pa, a & ~TEE_MATTR_UX);
1067 		dsb_ishst();
1068 
1069 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1070 
1071 		/* Set the final mapping */
1072 		area_set_entry(area, tblidx, pa, a);
1073 		area_tlbi_entry(area, tblidx);
1074 	} else {
1075 		area_set_entry(area, tblidx, pa, a);
1076 		dsb_ishst();
1077 	}
1078 	pgt_inc_used_entries(area->pgt);
1079 
1080 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1081 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1082 	incr_hidden_hits();
1083 	return true;
1084 }
1085 
1086 static void tee_pager_hide_pages(void)
1087 {
1088 	struct tee_pager_pmem *pmem = NULL;
1089 	size_t n = 0;
1090 
1091 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1092 		if (n >= TEE_PAGER_NHIDE)
1093 			break;
1094 		n++;
1095 
1096 		/* we cannot hide pages when pmem->fobj is not defined. */
1097 		if (!pmem->fobj)
1098 			continue;
1099 
1100 		if (pmem_is_hidden(pmem))
1101 			continue;
1102 
1103 		pmem->flags |= PMEM_FLAG_HIDDEN;
1104 		pmem_unmap(pmem, NULL);
1105 	}
1106 }
1107 
1108 /*
1109  * Find mapped pmem, hide and move to pageble pmem.
1110  * Return false if page was not mapped, and true if page was mapped.
1111  */
1112 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1113 				       vaddr_t page_va)
1114 {
1115 	struct tee_pager_pmem *pmem;
1116 	size_t tblidx = 0;
1117 	size_t pgidx = area_va2idx(area, page_va) + area->fobj_pgoffs -
1118 		       ((area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT);
1119 
1120 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1121 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != pgidx)
1122 			continue;
1123 
1124 		/*
1125 		 * Locked pages may not be shared, these two asserts checks
1126 		 * that there's only a signed area recorded with this pmem.
1127 		 */
1128 		assert(TAILQ_FIRST(&pmem->fobj->areas) == area);
1129 		assert(TAILQ_LAST(&pmem->fobj->areas,
1130 				  tee_pager_area_head) == area);
1131 
1132 		tblidx = pmem_get_area_tblidx(pmem, area);
1133 		area_set_entry(area, tblidx, 0, 0);
1134 		pgt_dec_used_entries(area->pgt);
1135 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1136 		pmem->fobj = NULL;
1137 		pmem->fobj_pgidx = INVALID_PGIDX;
1138 		tee_pager_npages++;
1139 		set_npages();
1140 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1141 		incr_zi_released();
1142 		return true;
1143 	}
1144 
1145 	return false;
1146 }
1147 
1148 /* Finds the oldest page and unmaps it from all tables */
1149 static struct tee_pager_pmem *tee_pager_get_page(enum tee_pager_area_type at)
1150 {
1151 	struct tee_pager_pmem *pmem;
1152 
1153 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1154 	if (!pmem) {
1155 		EMSG("No pmem entries");
1156 		return NULL;
1157 	}
1158 
1159 	if (pmem->fobj) {
1160 		pmem_unmap(pmem, NULL);
1161 		tee_pager_save_page(pmem);
1162 	}
1163 
1164 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1165 	pmem->fobj = NULL;
1166 	pmem->fobj_pgidx = INVALID_PGIDX;
1167 	pmem->flags = 0;
1168 	if (at == PAGER_AREA_TYPE_LOCK) {
1169 		/* Move page to lock list */
1170 		if (tee_pager_npages <= 0)
1171 			panic("running out of page");
1172 		tee_pager_npages--;
1173 		set_npages();
1174 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1175 	} else {
1176 		/* move page to back */
1177 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1178 	}
1179 
1180 	return pmem;
1181 }
1182 
1183 static bool pager_update_permissions(struct tee_pager_area *area,
1184 			struct abort_info *ai, bool *handled)
1185 {
1186 	unsigned int pgidx = area_va2idx(area, ai->va);
1187 	struct tee_pager_pmem *pmem = NULL;
1188 	uint32_t attr = 0;
1189 	paddr_t pa = 0;
1190 
1191 	*handled = false;
1192 
1193 	area_get_entry(area, pgidx, &pa, &attr);
1194 
1195 	/* Not mapped */
1196 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1197 		return false;
1198 
1199 	/* Not readable, should not happen */
1200 	if (abort_is_user_exception(ai)) {
1201 		if (!(attr & TEE_MATTR_UR))
1202 			return true;
1203 	} else {
1204 		if (!(attr & TEE_MATTR_PR)) {
1205 			abort_print_error(ai);
1206 			panic();
1207 		}
1208 	}
1209 
1210 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1211 	case CORE_MMU_FAULT_TRANSLATION:
1212 	case CORE_MMU_FAULT_READ_PERMISSION:
1213 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1214 			/* Check attempting to execute from an NOX page */
1215 			if (abort_is_user_exception(ai)) {
1216 				if (!(attr & TEE_MATTR_UX))
1217 					return true;
1218 			} else {
1219 				if (!(attr & TEE_MATTR_PX)) {
1220 					abort_print_error(ai);
1221 					panic();
1222 				}
1223 			}
1224 		}
1225 		/* Since the page is mapped now it's OK */
1226 		break;
1227 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1228 		/* Check attempting to write to an RO page */
1229 		pmem = pmem_find(area, pgidx);
1230 		if (!pmem)
1231 			panic();
1232 		if (abort_is_user_exception(ai)) {
1233 			if (!(area->flags & TEE_MATTR_UW))
1234 				return true;
1235 			if (!(attr & TEE_MATTR_UW)) {
1236 				FMSG("Dirty %p",
1237 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1238 				pmem->flags |= PMEM_FLAG_DIRTY;
1239 				area_set_entry(area, pgidx, pa,
1240 					       get_area_mattr(area->flags));
1241 				area_tlbi_entry(area, pgidx);
1242 			}
1243 
1244 		} else {
1245 			if (!(area->flags & TEE_MATTR_PW)) {
1246 				abort_print_error(ai);
1247 				panic();
1248 			}
1249 			if (!(attr & TEE_MATTR_PW)) {
1250 				FMSG("Dirty %p",
1251 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1252 				pmem->flags |= PMEM_FLAG_DIRTY;
1253 				area_set_entry(area, pgidx, pa,
1254 					       get_area_mattr(area->flags));
1255 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1256 			}
1257 		}
1258 		/* Since permissions has been updated now it's OK */
1259 		break;
1260 	default:
1261 		/* Some fault we can't deal with */
1262 		if (abort_is_user_exception(ai))
1263 			return true;
1264 		abort_print_error(ai);
1265 		panic();
1266 	}
1267 	*handled = true;
1268 	return true;
1269 }
1270 
1271 #ifdef CFG_TEE_CORE_DEBUG
1272 static void stat_handle_fault(void)
1273 {
1274 	static size_t num_faults;
1275 	static size_t min_npages = SIZE_MAX;
1276 	static size_t total_min_npages = SIZE_MAX;
1277 
1278 	num_faults++;
1279 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1280 		DMSG("nfaults %zu npages %zu (min %zu)",
1281 		     num_faults, tee_pager_npages, min_npages);
1282 		min_npages = tee_pager_npages; /* reset */
1283 	}
1284 	if (tee_pager_npages < min_npages)
1285 		min_npages = tee_pager_npages;
1286 	if (tee_pager_npages < total_min_npages)
1287 		total_min_npages = tee_pager_npages;
1288 }
1289 #else
1290 static void stat_handle_fault(void)
1291 {
1292 }
1293 #endif
1294 
1295 bool tee_pager_handle_fault(struct abort_info *ai)
1296 {
1297 	struct tee_pager_area *area;
1298 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1299 	uint32_t exceptions;
1300 	bool ret;
1301 	bool clean_user_cache = false;
1302 
1303 #ifdef TEE_PAGER_DEBUG_PRINT
1304 	if (!abort_is_user_exception(ai))
1305 		abort_print(ai);
1306 #endif
1307 
1308 	/*
1309 	 * We're updating pages that can affect several active CPUs at a
1310 	 * time below. We end up here because a thread tries to access some
1311 	 * memory that isn't available. We have to be careful when making
1312 	 * that memory available as other threads may succeed in accessing
1313 	 * that address the moment after we've made it available.
1314 	 *
1315 	 * That means that we can't just map the memory and populate the
1316 	 * page, instead we use the aliased mapping to populate the page
1317 	 * and once everything is ready we map it.
1318 	 */
1319 	exceptions = pager_lock(ai);
1320 
1321 	stat_handle_fault();
1322 
1323 	/* check if the access is valid */
1324 	if (abort_is_user_exception(ai)) {
1325 		area = find_uta_area(ai->va);
1326 		clean_user_cache = true;
1327 	} else {
1328 		area = find_area(&tee_pager_area_head, ai->va);
1329 		if (!area) {
1330 			area = find_uta_area(ai->va);
1331 			clean_user_cache = true;
1332 		}
1333 	}
1334 	if (!area || !area->pgt) {
1335 		ret = false;
1336 		goto out;
1337 	}
1338 
1339 	if (!tee_pager_unhide_page(area, area_va2idx(area, page_va))) {
1340 		struct tee_pager_pmem *pmem = NULL;
1341 		uint32_t attr = 0;
1342 		paddr_t pa = 0;
1343 		size_t tblidx = 0;
1344 
1345 		/*
1346 		 * The page wasn't hidden, but some other core may have
1347 		 * updated the table entry before we got here or we need
1348 		 * to make a read-only page read-write (dirty).
1349 		 */
1350 		if (pager_update_permissions(area, ai, &ret)) {
1351 			/*
1352 			 * Nothing more to do with the abort. The problem
1353 			 * could already have been dealt with from another
1354 			 * core or if ret is false the TA will be paniced.
1355 			 */
1356 			goto out;
1357 		}
1358 
1359 		pmem = tee_pager_get_page(area->type);
1360 		if (!pmem) {
1361 			abort_print(ai);
1362 			panic();
1363 		}
1364 
1365 		/* load page code & data */
1366 		tee_pager_load_page(area, page_va, pmem->va_alias);
1367 
1368 
1369 		pmem->fobj = area->fobj;
1370 		pmem->fobj_pgidx = area_va2idx(area, page_va) +
1371 				   area->fobj_pgoffs -
1372 				   ((area->base & CORE_MMU_PGDIR_MASK) >>
1373 					SMALL_PAGE_SHIFT);
1374 		tblidx = pmem_get_area_tblidx(pmem, area);
1375 		attr = get_area_mattr(area->flags);
1376 		/*
1377 		 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1378 		 * able to tell when they are updated and should be tagged
1379 		 * as dirty.
1380 		 */
1381 		if (area->type == PAGER_AREA_TYPE_RW)
1382 			attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1383 		pa = get_pmem_pa(pmem);
1384 
1385 		/*
1386 		 * We've updated the page using the aliased mapping and
1387 		 * some cache maintenence is now needed if it's an
1388 		 * executable page.
1389 		 *
1390 		 * Since the d-cache is a Physically-indexed,
1391 		 * physically-tagged (PIPT) cache we can clean either the
1392 		 * aliased address or the real virtual address. In this
1393 		 * case we choose the real virtual address.
1394 		 *
1395 		 * The i-cache can also be PIPT, but may be something else
1396 		 * too like VIPT. The current code requires the caches to
1397 		 * implement the IVIPT extension, that is:
1398 		 * "instruction cache maintenance is required only after
1399 		 * writing new data to a physical address that holds an
1400 		 * instruction."
1401 		 *
1402 		 * To portably invalidate the icache the page has to
1403 		 * be mapped at the final virtual address but not
1404 		 * executable.
1405 		 */
1406 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1407 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1408 					TEE_MATTR_PW | TEE_MATTR_UW;
1409 			void *va = (void *)page_va;
1410 
1411 			/* Set a temporary read-only mapping */
1412 			area_set_entry(area, tblidx, pa, attr & ~mask);
1413 			area_tlbi_entry(area, tblidx);
1414 
1415 			dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1416 			if (clean_user_cache)
1417 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1418 			else
1419 				icache_inv_range(va, SMALL_PAGE_SIZE);
1420 
1421 			/* Set the final mapping */
1422 			area_set_entry(area, tblidx, pa, attr);
1423 			area_tlbi_entry(area, tblidx);
1424 		} else {
1425 			area_set_entry(area, tblidx, pa, attr);
1426 			/*
1427 			 * No need to flush TLB for this entry, it was
1428 			 * invalid. We should use a barrier though, to make
1429 			 * sure that the change is visible.
1430 			 */
1431 			dsb_ishst();
1432 		}
1433 		pgt_inc_used_entries(area->pgt);
1434 
1435 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1436 
1437 	}
1438 
1439 	tee_pager_hide_pages();
1440 	ret = true;
1441 out:
1442 	pager_unlock(exceptions);
1443 	return ret;
1444 }
1445 
1446 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1447 {
1448 	size_t n;
1449 
1450 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1451 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1452 
1453 	/* setup memory */
1454 	for (n = 0; n < npages; n++) {
1455 		struct core_mmu_table_info *ti;
1456 		struct tee_pager_pmem *pmem;
1457 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1458 		unsigned int pgidx;
1459 		paddr_t pa;
1460 		uint32_t attr;
1461 
1462 		ti = find_table_info(va);
1463 		pgidx = core_mmu_va2idx(ti, va);
1464 		/*
1465 		 * Note that we can only support adding pages in the
1466 		 * valid range of this table info, currently not a problem.
1467 		 */
1468 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1469 
1470 		/* Ignore unmapped pages/blocks */
1471 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1472 			continue;
1473 
1474 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1475 		if (!pmem)
1476 			panic("out of mem");
1477 
1478 		pmem->va_alias = pager_add_alias_page(pa);
1479 
1480 		if (unmap) {
1481 			pmem->fobj = NULL;
1482 			pmem->fobj_pgidx = INVALID_PGIDX;
1483 			core_mmu_set_entry(ti, pgidx, 0, 0);
1484 			pgt_dec_used_entries(find_core_pgt(va));
1485 		} else {
1486 			struct tee_pager_area *area = NULL;
1487 
1488 			/*
1489 			 * The page is still mapped, let's assign the area
1490 			 * and update the protection bits accordingly.
1491 			 */
1492 			area = find_area(&tee_pager_area_head, va);
1493 			assert(area && area->pgt == find_core_pgt(va));
1494 			pmem->fobj = area->fobj;
1495 			pmem->fobj_pgidx = pgidx + area->fobj_pgoffs -
1496 					   ((area->base &
1497 							CORE_MMU_PGDIR_MASK) >>
1498 						SMALL_PAGE_SHIFT);
1499 			assert(pgidx == pmem_get_area_tblidx(pmem, area));
1500 			assert(pa == get_pmem_pa(pmem));
1501 			area_set_entry(area, pgidx, pa,
1502 				       get_area_mattr(area->flags));
1503 		}
1504 
1505 		tee_pager_npages++;
1506 		incr_npages_all();
1507 		set_npages();
1508 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1509 	}
1510 
1511 	/*
1512 	 * As this is done at inits, invalidate all TLBs once instead of
1513 	 * targeting only the modified entries.
1514 	 */
1515 	tlbi_all();
1516 }
1517 
1518 #ifdef CFG_PAGED_USER_TA
1519 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1520 {
1521 	struct pgt *p = pgt;
1522 
1523 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1524 		p = SLIST_NEXT(p, link);
1525 	return p;
1526 }
1527 
1528 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1529 {
1530 	struct tee_pager_area *area = NULL;
1531 	struct pgt *pgt = NULL;
1532 
1533 	if (!utc->areas)
1534 		return;
1535 
1536 	pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1537 	TAILQ_FOREACH(area, utc->areas, link) {
1538 		if (!area->pgt)
1539 			area->pgt = find_pgt(pgt, area->base);
1540 		else
1541 			assert(area->pgt == find_pgt(pgt, area->base));
1542 		if (!area->pgt)
1543 			panic();
1544 	}
1545 }
1546 
1547 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1548 {
1549 	struct tee_pager_pmem *pmem = NULL;
1550 	struct tee_pager_area *area = NULL;
1551 	struct tee_pager_area_head *areas = NULL;
1552 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1553 
1554 	if (!pgt->num_used_entries)
1555 		goto out;
1556 
1557 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1558 		if (pmem->fobj)
1559 			pmem_unmap(pmem, pgt);
1560 	}
1561 	assert(!pgt->num_used_entries);
1562 
1563 out:
1564 	areas = to_user_ta_ctx(pgt->ctx)->areas;
1565 	if (areas) {
1566 		TAILQ_FOREACH(area, areas, link) {
1567 			if (area->pgt == pgt)
1568 				area->pgt = NULL;
1569 		}
1570 	}
1571 
1572 	pager_unlock(exceptions);
1573 }
1574 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1575 #endif /*CFG_PAGED_USER_TA*/
1576 
1577 void tee_pager_release_phys(void *addr, size_t size)
1578 {
1579 	bool unmaped = false;
1580 	vaddr_t va = (vaddr_t)addr;
1581 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1582 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1583 	struct tee_pager_area *area;
1584 	uint32_t exceptions;
1585 
1586 	if (end <= begin)
1587 		return;
1588 
1589 	exceptions = pager_lock_check_stack(128);
1590 
1591 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1592 		area = find_area(&tee_pager_area_head, va);
1593 		if (!area)
1594 			panic();
1595 		unmaped |= tee_pager_release_one_phys(area, va);
1596 	}
1597 
1598 	if (unmaped)
1599 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1600 
1601 	pager_unlock(exceptions);
1602 }
1603 KEEP_PAGER(tee_pager_release_phys);
1604 
1605 void *tee_pager_alloc(size_t size)
1606 {
1607 	tee_mm_entry_t *mm = NULL;
1608 	uint8_t *smem = NULL;
1609 	size_t num_pages = 0;
1610 	struct fobj *fobj = NULL;
1611 
1612 	if (!size)
1613 		return NULL;
1614 
1615 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1616 	if (!mm)
1617 		return NULL;
1618 
1619 	smem = (uint8_t *)tee_mm_get_smem(mm);
1620 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1621 	fobj = fobj_locked_paged_alloc(num_pages);
1622 	if (!fobj) {
1623 		tee_mm_free(mm);
1624 		return NULL;
1625 	}
1626 
1627 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1628 	fobj_put(fobj);
1629 
1630 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1631 
1632 	return smem;
1633 }
1634