xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision c6e827c0ef7f04c1e4cad39dd46e033e895178f6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/tlb_helpers.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <mm/core_memprot.h>
23 #include <mm/fobj.h>
24 #include <mm/tee_mm.h>
25 #include <mm/tee_pager.h>
26 #include <stdlib.h>
27 #include <sys/queue.h>
28 #include <tee_api_defines.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <utee_defines.h>
32 #include <util.h>
33 
34 
35 static struct tee_pager_area_head tee_pager_area_head =
36 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
37 
38 #define INVALID_PGIDX		UINT_MAX
39 #define PMEM_FLAG_DIRTY		BIT(0)
40 #define PMEM_FLAG_HIDDEN	BIT(1)
41 
42 /*
43  * struct tee_pager_pmem - Represents a physical page used for paging.
44  *
45  * @flags	flags defined by PMEM_FLAG_* above
46  * @fobj_pgidx	index of the page in the @fobj
47  * @fobj	File object of which a page is made visible.
48  * @va_alias	Virtual address where the physical page always is aliased.
49  *		Used during remapping of the page when the content need to
50  *		be updated before it's available at the new location.
51  */
52 struct tee_pager_pmem {
53 	unsigned int flags;
54 	unsigned int fobj_pgidx;
55 	struct fobj *fobj;
56 	void *va_alias;
57 	TAILQ_ENTRY(tee_pager_pmem) link;
58 };
59 
60 struct tblidx {
61 	struct pgt *pgt;
62 	unsigned int idx;
63 };
64 
65 /* The list of physical pages. The first page in the list is the oldest */
66 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
67 
68 static struct tee_pager_pmem_head tee_pager_pmem_head =
69 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
70 
71 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
72 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
73 
74 /* number of pages hidden */
75 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
76 
77 /* Number of registered physical pages, used hiding pages. */
78 static size_t tee_pager_npages;
79 
80 /* This area covers the IVs for all fobjs with paged IVs */
81 static struct tee_pager_area *pager_iv_area;
82 /* Used by make_iv_available(), see make_iv_available() for details. */
83 static struct tee_pager_pmem *pager_spare_pmem;
84 
85 #ifdef CFG_WITH_STATS
86 static struct tee_pager_stats pager_stats;
87 
88 static inline void incr_ro_hits(void)
89 {
90 	pager_stats.ro_hits++;
91 }
92 
93 static inline void incr_rw_hits(void)
94 {
95 	pager_stats.rw_hits++;
96 }
97 
98 static inline void incr_hidden_hits(void)
99 {
100 	pager_stats.hidden_hits++;
101 }
102 
103 static inline void incr_zi_released(void)
104 {
105 	pager_stats.zi_released++;
106 }
107 
108 static inline void incr_npages_all(void)
109 {
110 	pager_stats.npages_all++;
111 }
112 
113 static inline void set_npages(void)
114 {
115 	pager_stats.npages = tee_pager_npages;
116 }
117 
118 void tee_pager_get_stats(struct tee_pager_stats *stats)
119 {
120 	*stats = pager_stats;
121 
122 	pager_stats.hidden_hits = 0;
123 	pager_stats.ro_hits = 0;
124 	pager_stats.rw_hits = 0;
125 	pager_stats.zi_released = 0;
126 }
127 
128 #else /* CFG_WITH_STATS */
129 static inline void incr_ro_hits(void) { }
130 static inline void incr_rw_hits(void) { }
131 static inline void incr_hidden_hits(void) { }
132 static inline void incr_zi_released(void) { }
133 static inline void incr_npages_all(void) { }
134 static inline void set_npages(void) { }
135 
136 void tee_pager_get_stats(struct tee_pager_stats *stats)
137 {
138 	memset(stats, 0, sizeof(struct tee_pager_stats));
139 }
140 #endif /* CFG_WITH_STATS */
141 
142 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
143 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
144 #define TBL_SHIFT	SMALL_PAGE_SHIFT
145 
146 #define EFFECTIVE_VA_SIZE \
147 	(ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \
148 	 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE))
149 
150 static struct pager_table {
151 	struct pgt pgt;
152 	struct core_mmu_table_info tbl_info;
153 } *pager_tables;
154 static unsigned int num_pager_tables;
155 
156 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
157 
158 /* Defines the range of the alias area */
159 static tee_mm_entry_t *pager_alias_area;
160 /*
161  * Physical pages are added in a stack like fashion to the alias area,
162  * @pager_alias_next_free gives the address of next free entry if
163  * @pager_alias_next_free is != 0
164  */
165 static uintptr_t pager_alias_next_free;
166 
167 #ifdef CFG_TEE_CORE_DEBUG
168 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
169 
170 static uint32_t pager_lock_dldetect(const char *func, const int line,
171 				    struct abort_info *ai)
172 {
173 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
174 	unsigned int retries = 0;
175 	unsigned int reminder = 0;
176 
177 	while (!cpu_spin_trylock(&pager_spinlock)) {
178 		retries++;
179 		if (!retries) {
180 			/* wrapped, time to report */
181 			trace_printf(func, line, TRACE_ERROR, true,
182 				     "possible spinlock deadlock reminder %u",
183 				     reminder);
184 			if (reminder < UINT_MAX)
185 				reminder++;
186 			if (ai)
187 				abort_print(ai);
188 		}
189 	}
190 
191 	return exceptions;
192 }
193 #else
194 static uint32_t pager_lock(struct abort_info __unused *ai)
195 {
196 	return cpu_spin_lock_xsave(&pager_spinlock);
197 }
198 #endif
199 
200 static uint32_t pager_lock_check_stack(size_t stack_size)
201 {
202 	if (stack_size) {
203 		int8_t buf[stack_size];
204 		size_t n;
205 
206 		/*
207 		 * Make sure to touch all pages of the stack that we expect
208 		 * to use with this lock held. We need to take eventual
209 		 * page faults before the lock is taken or we'll deadlock
210 		 * the pager. The pages that are populated in this way will
211 		 * eventually be released at certain save transitions of
212 		 * the thread.
213 		 */
214 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
215 			io_write8((vaddr_t)buf + n, 1);
216 		io_write8((vaddr_t)buf + stack_size - 1, 1);
217 	}
218 
219 	return pager_lock(NULL);
220 }
221 
222 static void pager_unlock(uint32_t exceptions)
223 {
224 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
225 }
226 
227 void *tee_pager_phys_to_virt(paddr_t pa)
228 {
229 	struct core_mmu_table_info ti;
230 	unsigned idx;
231 	uint32_t a;
232 	paddr_t p;
233 	vaddr_t v;
234 	size_t n;
235 
236 	/*
237 	 * Most addresses are mapped lineary, try that first if possible.
238 	 */
239 	if (!tee_pager_get_table_info(pa, &ti))
240 		return NULL; /* impossible pa */
241 	idx = core_mmu_va2idx(&ti, pa);
242 	core_mmu_get_entry(&ti, idx, &p, &a);
243 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
244 		return (void *)core_mmu_idx2va(&ti, idx);
245 
246 	n = 0;
247 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
248 	while (true) {
249 		while (idx < TBL_NUM_ENTRIES) {
250 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
251 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
252 				return NULL;
253 
254 			core_mmu_get_entry(&pager_tables[n].tbl_info,
255 					   idx, &p, &a);
256 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
257 				return (void *)v;
258 			idx++;
259 		}
260 
261 		n++;
262 		if (n >= num_pager_tables)
263 			return NULL;
264 		idx = 0;
265 	}
266 
267 	return NULL;
268 }
269 
270 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
271 {
272 	return pmem->flags & PMEM_FLAG_HIDDEN;
273 }
274 
275 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
276 {
277 	return pmem->flags & PMEM_FLAG_DIRTY;
278 }
279 
280 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
281 				    struct tee_pager_area *area)
282 {
283 	if (pmem->fobj != area->fobj)
284 		return false;
285 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
286 		return false;
287 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
288 	    (area->size >> SMALL_PAGE_SHIFT))
289 		return false;
290 
291 	return true;
292 }
293 
294 static size_t get_pgt_count(vaddr_t base, size_t size)
295 {
296 	assert(size);
297 
298 	return (base + size - 1) / CORE_MMU_PGDIR_SIZE + 1 -
299 	       base / CORE_MMU_PGDIR_SIZE;
300 }
301 
302 static bool area_have_pgt(struct tee_pager_area *area, struct pgt *pgt)
303 {
304 	size_t n = 0;
305 
306 	for (n = 0; n < get_pgt_count(area->base, area->size); n++)
307 		if (area->pgt_array[n] == pgt)
308 			return true;
309 
310 	return false;
311 }
312 
313 static struct tblidx pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
314 					  struct tee_pager_area *area)
315 {
316 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
317 	size_t idx = pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
318 
319 	assert(pmem->fobj && pmem->fobj_pgidx != INVALID_PGIDX);
320 	assert(idx / TBL_NUM_ENTRIES < get_pgt_count(area->base, area->size));
321 
322 	return (struct tblidx){
323 		.idx = idx % TBL_NUM_ENTRIES,
324 		.pgt = area->pgt_array[idx / TBL_NUM_ENTRIES],
325 	};
326 }
327 
328 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
329 {
330 	size_t n;
331 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
332 
333 	if (!pager_tables)
334 		return NULL;
335 
336 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
337 	    CORE_MMU_PGDIR_SHIFT;
338 	if (n >= num_pager_tables)
339 		return NULL;
340 
341 	assert(va >= pager_tables[n].tbl_info.va_base &&
342 	       va <= (pager_tables[n].tbl_info.va_base | mask));
343 
344 	return pager_tables + n;
345 }
346 
347 static struct pager_table *find_pager_table(vaddr_t va)
348 {
349 	struct pager_table *pt = find_pager_table_may_fail(va);
350 
351 	assert(pt);
352 	return pt;
353 }
354 
355 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
356 {
357 	struct pager_table *pt = find_pager_table_may_fail(va);
358 
359 	if (!pt)
360 		return false;
361 
362 	*ti = pt->tbl_info;
363 	return true;
364 }
365 
366 static struct core_mmu_table_info *find_table_info(vaddr_t va)
367 {
368 	return &find_pager_table(va)->tbl_info;
369 }
370 
371 static struct pgt *find_core_pgt(vaddr_t va)
372 {
373 	return &find_pager_table(va)->pgt;
374 }
375 
376 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
377 {
378 	struct pager_table *pt;
379 	unsigned idx;
380 	vaddr_t smem = tee_mm_get_smem(mm);
381 	size_t nbytes = tee_mm_get_bytes(mm);
382 	vaddr_t v;
383 	uint32_t a = 0;
384 
385 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
386 
387 	assert(!pager_alias_area);
388 	pager_alias_area = mm;
389 	pager_alias_next_free = smem;
390 
391 	/* Clear all mapping in the alias area */
392 	pt = find_pager_table(smem);
393 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
394 	while (pt <= (pager_tables + num_pager_tables - 1)) {
395 		while (idx < TBL_NUM_ENTRIES) {
396 			v = core_mmu_idx2va(&pt->tbl_info, idx);
397 			if (v >= (smem + nbytes))
398 				goto out;
399 
400 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
401 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
402 			if (a & TEE_MATTR_VALID_BLOCK)
403 				pgt_dec_used_entries(&pt->pgt);
404 			idx++;
405 		}
406 
407 		pt++;
408 		idx = 0;
409 	}
410 
411 out:
412 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
413 }
414 
415 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
416 {
417 	size_t n;
418 	uint32_t a = 0;
419 	size_t usage = 0;
420 
421 	for (n = 0; n < ti->num_entries; n++) {
422 		core_mmu_get_entry(ti, n, NULL, &a);
423 		if (a & TEE_MATTR_VALID_BLOCK)
424 			usage++;
425 	}
426 	return usage;
427 }
428 
429 static void tblidx_get_entry(struct tblidx tblidx, paddr_t *pa, uint32_t *attr)
430 {
431 	assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES);
432 	core_mmu_get_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx,
433 				     pa, attr);
434 }
435 
436 static void tblidx_set_entry(struct tblidx tblidx, paddr_t pa, uint32_t attr)
437 {
438 	assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES);
439 	core_mmu_set_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx,
440 				     pa, attr);
441 }
442 
443 static struct tblidx area_va2tblidx(struct tee_pager_area *area, vaddr_t va)
444 {
445 	paddr_t mask = CORE_MMU_PGDIR_MASK;
446 	size_t n = 0;
447 
448 	assert(va >= area->base && va < (area->base + area->size));
449 	n = (va - (area->base & ~mask)) / CORE_MMU_PGDIR_SIZE;
450 
451 	return (struct tblidx){
452 		.idx = (va & mask) / SMALL_PAGE_SIZE,
453 		.pgt = area->pgt_array[n],
454 	};
455 }
456 
457 static vaddr_t tblidx2va(struct tblidx tblidx)
458 {
459 	return tblidx.pgt->vabase + (tblidx.idx << SMALL_PAGE_SHIFT);
460 }
461 
462 static void tblidx_tlbi_entry(struct tblidx tblidx)
463 {
464 	vaddr_t va = tblidx2va(tblidx);
465 
466 #if defined(CFG_PAGED_USER_TA)
467 	if (tblidx.pgt->ctx) {
468 		uint32_t asid = to_user_mode_ctx(tblidx.pgt->ctx)->vm_info.asid;
469 
470 		tlbi_mva_asid(va, asid);
471 		return;
472 	}
473 #endif
474 	tlbi_mva_allasid(va);
475 }
476 
477 static void pmem_assign_fobj_page(struct tee_pager_pmem *pmem,
478 				  struct tee_pager_area *area, vaddr_t va)
479 {
480 	struct tee_pager_pmem *p = NULL;
481 	unsigned int fobj_pgidx = 0;
482 
483 	assert(!pmem->fobj && pmem->fobj_pgidx == INVALID_PGIDX);
484 
485 	assert(va >= area->base && va < (area->base + area->size));
486 	fobj_pgidx = (va - area->base) / SMALL_PAGE_SIZE + area->fobj_pgoffs;
487 
488 	TAILQ_FOREACH(p, &tee_pager_pmem_head, link)
489 		assert(p->fobj != area->fobj || p->fobj_pgidx != fobj_pgidx);
490 
491 	pmem->fobj = area->fobj;
492 	pmem->fobj_pgidx = fobj_pgidx;
493 }
494 
495 static void pmem_clear(struct tee_pager_pmem *pmem)
496 {
497 	pmem->fobj = NULL;
498 	pmem->fobj_pgidx = INVALID_PGIDX;
499 	pmem->flags = 0;
500 }
501 
502 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
503 {
504 	struct tee_pager_area *area = NULL;
505 	struct tblidx tblidx = { };
506 	uint32_t a = 0;
507 
508 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
509 		/*
510 		 * If only_this_pgt points to a pgt then the pgt of this
511 		 * area has to match or we'll skip over it.
512 		 */
513 		if (only_this_pgt && !area_have_pgt(area, only_this_pgt))
514 			continue;
515 		if (!pmem_is_covered_by_area(pmem, area))
516 			continue;
517 		tblidx = pmem_get_area_tblidx(pmem, area);
518 		if (!tblidx.pgt)
519 			continue;
520 		tblidx_get_entry(tblidx, NULL, &a);
521 		if (a & TEE_MATTR_VALID_BLOCK) {
522 			tblidx_set_entry(tblidx, 0, 0);
523 			pgt_dec_used_entries(tblidx.pgt);
524 			tblidx_tlbi_entry(tblidx);
525 		}
526 	}
527 }
528 
529 void tee_pager_early_init(void)
530 {
531 	size_t n = 0;
532 
533 	num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE;
534 	pager_tables = calloc(num_pager_tables, sizeof(*pager_tables));
535 	if (!pager_tables)
536 		panic("Cannot allocate pager_tables");
537 
538 	/*
539 	 * Note that this depends on add_pager_vaspace() adding vaspace
540 	 * after end of memory.
541 	 */
542 	for (n = 0; n < num_pager_tables; n++) {
543 		if (!core_mmu_find_table(NULL, VCORE_START_VA +
544 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
545 					 &pager_tables[n].tbl_info))
546 			panic("can't find mmu tables");
547 
548 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
549 			panic("Unsupported page size in translation table");
550 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
551 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
552 
553 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
554 		pager_tables[n].pgt.vabase = pager_tables[n].tbl_info.va_base;
555 		pgt_set_used_entries(&pager_tables[n].pgt,
556 				tbl_usage_count(&pager_tables[n].tbl_info));
557 	}
558 }
559 
560 static void *pager_add_alias_page(paddr_t pa)
561 {
562 	unsigned idx;
563 	struct core_mmu_table_info *ti;
564 	/* Alias pages mapped without write permission: runtime will care */
565 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
566 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
567 			TEE_MATTR_SECURE | TEE_MATTR_PR;
568 
569 	DMSG("0x%" PRIxPA, pa);
570 
571 	ti = find_table_info(pager_alias_next_free);
572 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
573 	core_mmu_set_entry(ti, idx, pa, attr);
574 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
575 	pager_alias_next_free += SMALL_PAGE_SIZE;
576 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
577 				      tee_mm_get_bytes(pager_alias_area)))
578 		pager_alias_next_free = 0;
579 	return (void *)core_mmu_idx2va(ti, idx);
580 }
581 
582 static void area_insert(struct tee_pager_area_head *head,
583 			struct tee_pager_area *area,
584 			struct tee_pager_area *a_prev)
585 {
586 	uint32_t exceptions = pager_lock_check_stack(8);
587 
588 	if (a_prev)
589 		TAILQ_INSERT_AFTER(head, a_prev, area, link);
590 	else
591 		TAILQ_INSERT_HEAD(head, area, link);
592 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
593 
594 	pager_unlock(exceptions);
595 }
596 DECLARE_KEEP_PAGER(area_insert);
597 
598 static struct tee_pager_area *alloc_area(vaddr_t base, size_t size)
599 {
600 	struct tee_pager_area *area = NULL;
601 
602 	if ((base & SMALL_PAGE_MASK) || !size) {
603 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
604 		panic();
605 	}
606 
607 	area = calloc(1, sizeof(*area));
608 	if (!area)
609 		return NULL;
610 	area->pgt_array = calloc(get_pgt_count(base, size),
611 				 sizeof(struct pgt *));
612 	if (!area->pgt_array) {
613 		free(area);
614 		return NULL;
615 	}
616 
617 	area->base = base;
618 	area->size = size;
619 	return area;
620 }
621 
622 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
623 			     struct fobj *fobj)
624 {
625 	struct tee_pager_area *area = NULL;
626 	size_t n = 0;
627 
628 	assert(fobj);
629 
630 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d",
631 	     base, base + fobj->num_pages * SMALL_PAGE_SIZE, type);
632 
633 	area = alloc_area(base, fobj->num_pages * SMALL_PAGE_SIZE);
634 	if (!area)
635 		panic("alloc_area");
636 
637 	area->fobj = fobj_get(fobj);
638 	area->fobj_pgoffs = 0;
639 	area->type = type;
640 
641 	switch (type) {
642 	case PAGER_AREA_TYPE_RO:
643 		area->flags = TEE_MATTR_PRX;
644 		break;
645 	case PAGER_AREA_TYPE_RW:
646 	case PAGER_AREA_TYPE_LOCK:
647 		area->flags = TEE_MATTR_PRW;
648 		break;
649 	default:
650 		panic();
651 	}
652 
653 	for (n = 0; n < get_pgt_count(area->base, area->size); n++)
654 		area->pgt_array[n] = find_core_pgt(base +
655 						   n * CORE_MMU_PGDIR_SIZE);
656 	area_insert(&tee_pager_area_head, area, NULL);
657 }
658 
659 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
660 					vaddr_t va)
661 {
662 	struct tee_pager_area *area;
663 
664 	if (!areas)
665 		return NULL;
666 
667 	TAILQ_FOREACH(area, areas, link) {
668 		if (core_is_buffer_inside(va, 1, area->base, area->size))
669 			return area;
670 	}
671 	return NULL;
672 }
673 
674 #ifdef CFG_PAGED_USER_TA
675 static struct tee_pager_area *find_uta_area(vaddr_t va)
676 {
677 	struct ts_ctx *ctx = thread_get_tsd()->ctx;
678 
679 	if (!is_user_mode_ctx(ctx))
680 		return NULL;
681 	return find_area(to_user_mode_ctx(ctx)->areas, va);
682 }
683 #else
684 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
685 {
686 	return NULL;
687 }
688 #endif /*CFG_PAGED_USER_TA*/
689 
690 
691 static uint32_t get_area_mattr(uint32_t area_flags)
692 {
693 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
694 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
695 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
696 
697 	return attr;
698 }
699 
700 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
701 {
702 	struct core_mmu_table_info *ti;
703 	paddr_t pa;
704 	unsigned idx;
705 
706 	ti = find_table_info((vaddr_t)pmem->va_alias);
707 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
708 	core_mmu_get_entry(ti, idx, &pa, NULL);
709 	return pa;
710 }
711 
712 #ifdef CFG_PAGED_USER_TA
713 static void unlink_area(struct tee_pager_area_head *area_head,
714 			struct tee_pager_area *area)
715 {
716 	uint32_t exceptions = pager_lock_check_stack(64);
717 
718 	TAILQ_REMOVE(area_head, area, link);
719 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
720 
721 	pager_unlock(exceptions);
722 }
723 DECLARE_KEEP_PAGER(unlink_area);
724 
725 static void free_area(struct tee_pager_area *area)
726 {
727 	fobj_put(area->fobj);
728 	free(area->pgt_array);
729 	free(area);
730 }
731 
732 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
733 				    struct fobj *fobj, uint32_t prot)
734 {
735 	struct tee_pager_area *a_prev = NULL;
736 	struct tee_pager_area *area = NULL;
737 	vaddr_t b = base;
738 	size_t fobj_pgoffs = 0;
739 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
740 
741 	if (!uctx->areas) {
742 		uctx->areas = malloc(sizeof(*uctx->areas));
743 		if (!uctx->areas)
744 			return TEE_ERROR_OUT_OF_MEMORY;
745 		TAILQ_INIT(uctx->areas);
746 	}
747 
748 	area = TAILQ_FIRST(uctx->areas);
749 	while (area) {
750 		if (core_is_buffer_intersect(b, s, area->base,
751 					     area->size))
752 			return TEE_ERROR_BAD_PARAMETERS;
753 		if (b < area->base)
754 			break;
755 		a_prev = area;
756 		area = TAILQ_NEXT(area, link);
757 	}
758 
759 	area = alloc_area(b, s);
760 	if (!area)
761 		return TEE_ERROR_OUT_OF_MEMORY;
762 
763 	/* Table info will be set when the context is activated. */
764 	area->fobj = fobj_get(fobj);
765 	area->fobj_pgoffs = fobj_pgoffs;
766 	area->type = PAGER_AREA_TYPE_RW;
767 	area->flags = prot;
768 
769 	area_insert(uctx->areas, area, a_prev);
770 
771 	return TEE_SUCCESS;
772 }
773 
774 static void map_pgts(struct tee_pager_area *area)
775 {
776 	struct core_mmu_table_info dir_info = { NULL };
777 	size_t n = 0;
778 
779 	core_mmu_get_user_pgdir(&dir_info);
780 
781 	for (n = 0; n < get_pgt_count(area->base, area->size); n++) {
782 		struct pgt *pgt = area->pgt_array[n];
783 		uint32_t attr = 0;
784 		paddr_t pa = 0;
785 		size_t idx = 0;
786 
787 		idx = core_mmu_va2idx(&dir_info, pgt->vabase);
788 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
789 
790 		/*
791 		 * Check if the page table already is used, if it is, it's
792 		 * already registered.
793 		 */
794 		if (pgt->num_used_entries) {
795 			assert(attr & TEE_MATTR_TABLE);
796 			assert(pa == virt_to_phys(pgt->tbl));
797 			continue;
798 		}
799 
800 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
801 		pa = virt_to_phys(pgt->tbl);
802 		assert(pa);
803 		/*
804 		 * Note that the update of the table entry is guaranteed to
805 		 * be atomic.
806 		 */
807 		core_mmu_set_entry(&dir_info, idx, pa, attr);
808 	}
809 }
810 
811 TEE_Result tee_pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
812 				 struct fobj *fobj, uint32_t prot)
813 {
814 	TEE_Result res = TEE_SUCCESS;
815 	struct thread_specific_data *tsd = thread_get_tsd();
816 	struct tee_pager_area *area = NULL;
817 
818 	res = pager_add_um_area(uctx, base, fobj, prot);
819 	if (res)
820 		return res;
821 
822 	if (uctx->ts_ctx == tsd->ctx) {
823 		/*
824 		 * We're chaning the currently active utc. Assign page
825 		 * tables to the new areas and make sure that the page
826 		 * tables are registered in the upper table.
827 		 */
828 		tee_pager_assign_um_tables(uctx);
829 		TAILQ_FOREACH(area, uctx->areas, link)
830 			map_pgts(area);
831 	}
832 
833 	return TEE_SUCCESS;
834 }
835 
836 static void split_area(struct tee_pager_area_head *area_head,
837 		       struct tee_pager_area *area, struct tee_pager_area *a2,
838 		       vaddr_t va)
839 {
840 	uint32_t exceptions = pager_lock_check_stack(64);
841 	size_t diff = va - area->base;
842 	size_t a2_pgt_count = 0;
843 	size_t n0 = 0;
844 	size_t n = 0;
845 
846 	assert(a2->base == va);
847 	assert(a2->size == area->size - diff);
848 
849 	a2->fobj = fobj_get(area->fobj);
850 	a2->fobj_pgoffs = area->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
851 	a2->type = area->type;
852 	a2->flags = area->flags;
853 
854 	a2_pgt_count = get_pgt_count(a2->base, a2->size);
855 	n0 = get_pgt_count(area->base, area->size) - a2_pgt_count;
856 	for (n = n0; n < a2_pgt_count; n++)
857 		a2->pgt_array[n - n0] = area->pgt_array[n];
858 	area->size = diff;
859 
860 	TAILQ_INSERT_AFTER(area_head, area, a2, link);
861 	TAILQ_INSERT_AFTER(&area->fobj->areas, area, a2, fobj_link);
862 
863 	pager_unlock(exceptions);
864 }
865 DECLARE_KEEP_PAGER(split_area);
866 
867 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
868 {
869 	struct tee_pager_area *area = NULL;
870 	struct tee_pager_area *a2 = NULL;
871 
872 	if (va & SMALL_PAGE_MASK)
873 		return TEE_ERROR_BAD_PARAMETERS;
874 
875 	TAILQ_FOREACH(area, uctx->areas, link) {
876 		if (va == area->base || va == area->base + area->size)
877 			return TEE_SUCCESS;
878 		if (va > area->base && va < area->base + area->size) {
879 			size_t diff = va - area->base;
880 
881 			a2 = alloc_area(va, area->size - diff);
882 			if (!a2)
883 				return TEE_ERROR_OUT_OF_MEMORY;
884 			split_area(uctx->areas, area, a2, va);
885 			return TEE_SUCCESS;
886 		}
887 	}
888 
889 	return TEE_SUCCESS;
890 }
891 
892 static struct pgt **merge_area_with_next(struct tee_pager_area_head *area_head,
893 					 struct tee_pager_area *a,
894 					 struct tee_pager_area *a_next,
895 					 struct pgt **pgt_array)
896 {
897 	uint32_t exceptions = pager_lock_check_stack(64);
898 	struct pgt **old_pgt_array = a->pgt_array;
899 
900 	a->pgt_array = pgt_array;
901 	TAILQ_REMOVE(area_head, a_next, link);
902 	TAILQ_REMOVE(&a_next->fobj->areas, a_next, fobj_link);
903 
904 	pager_unlock(exceptions);
905 	return old_pgt_array;
906 }
907 DECLARE_KEEP_PAGER(merge_area_with_next);
908 
909 static struct pgt **alloc_merged_pgt_array(struct tee_pager_area *a,
910 					   struct tee_pager_area *a_next)
911 {
912 	size_t a_next_pgt_count = get_pgt_count(a_next->base, a_next->size);
913 	size_t a_pgt_count = get_pgt_count(a->base, a->size);
914 	struct pgt **pgt_array = NULL;
915 
916 	/* In case there's a shared pgt they must match */
917 	if ((a->base & ~CORE_MMU_PGDIR_MASK) ==
918 	    (a_next->base & ~CORE_MMU_PGDIR_MASK) &&
919 	    a->pgt_array[a_pgt_count - 1] != a_next->pgt_array[0])
920 		return NULL;
921 
922 	pgt_array = calloc(sizeof(struct pgt *),
923 			   get_pgt_count(a->base, a->size + a_next->size));
924 	if (!pgt_array)
925 		return NULL;
926 
927 	/*
928 	 * Copy and merge the two pgt_arrays, note the special case
929 	 * where a pgt is shared.
930 	 */
931 	memcpy(pgt_array, a->pgt_array, a_pgt_count * sizeof(struct pgt *));
932 	if ((a->base & ~CORE_MMU_PGDIR_MASK) ==
933 	    (a_next->base & ~CORE_MMU_PGDIR_MASK))
934 		memcpy(pgt_array + a_pgt_count, a_next->pgt_array + 1,
935 		       (a_next_pgt_count - 1) * sizeof(struct pgt *));
936 	else
937 		memcpy(pgt_array + a_pgt_count, a_next->pgt_array,
938 		       a_next_pgt_count * sizeof(struct pgt *));
939 
940 	return pgt_array;
941 }
942 
943 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
944 			       size_t len)
945 {
946 	struct tee_pager_area *a_next = NULL;
947 	struct tee_pager_area *a = NULL;
948 	struct pgt **pgt_array = NULL;
949 	vaddr_t end_va = 0;
950 
951 	if ((va | len) & SMALL_PAGE_MASK)
952 		return;
953 	if (ADD_OVERFLOW(va, len, &end_va))
954 		return;
955 
956 	for (a = TAILQ_FIRST(uctx->areas);; a = a_next) {
957 		a_next = TAILQ_NEXT(a, link);
958 		if (!a_next)
959 			return;
960 
961 		/* Try merging with the area just before va */
962 		if (a->base + a->size < va)
963 			continue;
964 
965 		/*
966 		 * If a->base is well past our range we're done.
967 		 * Note that if it's just the page after our range we'll
968 		 * try to merge.
969 		 */
970 		if (a->base > end_va)
971 			return;
972 
973 		if (a->base + a->size != a_next->base)
974 			continue;
975 		if (a->fobj != a_next->fobj || a->type != a_next->type ||
976 		    a->flags != a_next->flags)
977 			continue;
978 		if (a->fobj_pgoffs + a->size / SMALL_PAGE_SIZE !=
979 		    a_next->fobj_pgoffs)
980 			continue;
981 
982 		pgt_array = alloc_merged_pgt_array(a, a_next);
983 		if (!pgt_array)
984 			continue;
985 
986 		/*
987 		 * merge_area_with_next() returns the old pgt array which
988 		 * was replaced in a. We don't want to call free() directly
989 		 * from merge_area_with_next() that would pull free() and
990 		 * its dependencies into the unpaged area.
991 		 */
992 		free(merge_area_with_next(uctx->areas, a, a_next, pgt_array));
993 		free_area(a_next);
994 		a_next = a;
995 	}
996 }
997 
998 static void rem_area(struct tee_pager_area_head *area_head,
999 		     struct tee_pager_area *area)
1000 {
1001 	struct tee_pager_pmem *pmem;
1002 	size_t last_pgoffs = area->fobj_pgoffs +
1003 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
1004 	uint32_t exceptions;
1005 	struct tblidx tblidx = { };
1006 	uint32_t a = 0;
1007 
1008 	exceptions = pager_lock_check_stack(64);
1009 
1010 	TAILQ_REMOVE(area_head, area, link);
1011 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
1012 
1013 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1014 		if (pmem->fobj != area->fobj ||
1015 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
1016 		    pmem->fobj_pgidx > last_pgoffs)
1017 			continue;
1018 
1019 		tblidx = pmem_get_area_tblidx(pmem, area);
1020 		tblidx_get_entry(tblidx, NULL, &a);
1021 		if (!(a & TEE_MATTR_VALID_BLOCK))
1022 			continue;
1023 
1024 		tblidx_set_entry(tblidx, 0, 0);
1025 		tblidx_tlbi_entry(tblidx);
1026 		pgt_dec_used_entries(tblidx.pgt);
1027 	}
1028 
1029 	pager_unlock(exceptions);
1030 
1031 	free_area(area);
1032 }
1033 DECLARE_KEEP_PAGER(rem_area);
1034 
1035 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
1036 			     size_t size)
1037 {
1038 	struct tee_pager_area *area;
1039 	struct tee_pager_area *next_a;
1040 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
1041 
1042 	TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
1043 		if (core_is_buffer_inside(area->base, area->size, base, s))
1044 			rem_area(uctx->areas, area);
1045 	}
1046 	tlbi_asid(uctx->vm_info.asid);
1047 }
1048 
1049 void tee_pager_rem_um_areas(struct user_mode_ctx *uctx)
1050 {
1051 	struct tee_pager_area *area = NULL;
1052 
1053 	if (!uctx->areas)
1054 		return;
1055 
1056 	while (true) {
1057 		area = TAILQ_FIRST(uctx->areas);
1058 		if (!area)
1059 			break;
1060 		unlink_area(uctx->areas, area);
1061 		free_area(area);
1062 	}
1063 
1064 	free(uctx->areas);
1065 }
1066 
1067 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
1068 {
1069 	struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas);
1070 	void *ctx = a->pgt_array[0]->ctx;
1071 
1072 	do {
1073 		a = TAILQ_NEXT(a, fobj_link);
1074 		if (!a)
1075 			return true;
1076 	} while (a->pgt_array[0]->ctx == ctx);
1077 
1078 	return false;
1079 }
1080 
1081 bool tee_pager_set_um_area_attr(struct user_mode_ctx *uctx, vaddr_t base,
1082 				size_t size, uint32_t flags)
1083 {
1084 	bool ret = false;
1085 	vaddr_t b = base;
1086 	size_t s = size;
1087 	size_t s2 = 0;
1088 	struct tee_pager_area *area = find_area(uctx->areas, b);
1089 	uint32_t exceptions = 0;
1090 	struct tee_pager_pmem *pmem = NULL;
1091 	uint32_t a = 0;
1092 	uint32_t f = 0;
1093 	uint32_t mattr = 0;
1094 	uint32_t f2 = 0;
1095 	struct tblidx tblidx = { };
1096 
1097 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1098 	if (f & TEE_MATTR_UW)
1099 		f |= TEE_MATTR_PW;
1100 	mattr = get_area_mattr(f);
1101 
1102 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1103 
1104 	while (s) {
1105 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1106 		if (!area || area->base != b || area->size != s2) {
1107 			ret = false;
1108 			goto out;
1109 		}
1110 		b += s2;
1111 		s -= s2;
1112 
1113 		if (area->flags == f)
1114 			goto next_area;
1115 
1116 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1117 			if (!pmem_is_covered_by_area(pmem, area))
1118 				continue;
1119 
1120 			tblidx = pmem_get_area_tblidx(pmem, area);
1121 			tblidx_get_entry(tblidx, NULL, &a);
1122 			if (a == f)
1123 				continue;
1124 			tblidx_set_entry(tblidx, 0, 0);
1125 			tblidx_tlbi_entry(tblidx);
1126 
1127 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1128 			if (pmem_is_dirty(pmem))
1129 				f2 = mattr;
1130 			else
1131 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
1132 			tblidx_set_entry(tblidx, get_pmem_pa(pmem), f2);
1133 			if (!(a & TEE_MATTR_VALID_BLOCK))
1134 				pgt_inc_used_entries(tblidx.pgt);
1135 			/*
1136 			 * Make sure the table update is visible before
1137 			 * continuing.
1138 			 */
1139 			dsb_ishst();
1140 
1141 			/*
1142 			 * Here's a problem if this page already is shared.
1143 			 * We need do icache invalidate for each context
1144 			 * in which it is shared. In practice this will
1145 			 * never happen.
1146 			 */
1147 			if (flags & TEE_MATTR_UX) {
1148 				void *va = (void *)tblidx2va(tblidx);
1149 
1150 				/* Assert that the pmem isn't shared. */
1151 				assert(same_context(pmem));
1152 
1153 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1154 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1155 			}
1156 		}
1157 
1158 		area->flags = f;
1159 next_area:
1160 		area = TAILQ_NEXT(area, link);
1161 	}
1162 
1163 	ret = true;
1164 out:
1165 	pager_unlock(exceptions);
1166 	return ret;
1167 }
1168 
1169 DECLARE_KEEP_PAGER(tee_pager_set_um_area_attr);
1170 #endif /*CFG_PAGED_USER_TA*/
1171 
1172 void tee_pager_invalidate_fobj(struct fobj *fobj)
1173 {
1174 	struct tee_pager_pmem *pmem;
1175 	uint32_t exceptions;
1176 
1177 	exceptions = pager_lock_check_stack(64);
1178 
1179 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1180 		if (pmem->fobj == fobj)
1181 			pmem_clear(pmem);
1182 
1183 	pager_unlock(exceptions);
1184 }
1185 DECLARE_KEEP_PAGER(tee_pager_invalidate_fobj);
1186 
1187 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area, vaddr_t va)
1188 {
1189 	struct tee_pager_pmem *pmem = NULL;
1190 	size_t fobj_pgidx = 0;
1191 
1192 	assert(va >= area->base && va < (area->base + area->size));
1193 	fobj_pgidx = (va - area->base) / SMALL_PAGE_SIZE + area->fobj_pgoffs;
1194 
1195 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1196 		if (pmem->fobj == area->fobj && pmem->fobj_pgidx == fobj_pgidx)
1197 			return pmem;
1198 
1199 	return NULL;
1200 }
1201 
1202 static bool tee_pager_unhide_page(struct tee_pager_area *area, vaddr_t page_va)
1203 {
1204 	struct tblidx tblidx = area_va2tblidx(area, page_va);
1205 	struct tee_pager_pmem *pmem = pmem_find(area, page_va);
1206 	uint32_t a = get_area_mattr(area->flags);
1207 	uint32_t attr = 0;
1208 	paddr_t pa = 0;
1209 
1210 	if (!pmem)
1211 		return false;
1212 
1213 	tblidx_get_entry(tblidx, NULL, &attr);
1214 	if (attr & TEE_MATTR_VALID_BLOCK)
1215 		return false;
1216 
1217 	/*
1218 	 * The page is hidden, or not not mapped yet. Unhide the page and
1219 	 * move it to the tail.
1220 	 *
1221 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1222 	 * for this address, so no TLB invalidation is required after setting
1223 	 * the new entry. A DSB is needed though, to make the write visible.
1224 	 *
1225 	 * For user executable pages it's more complicated. Those pages can
1226 	 * be shared between multiple TA mappings and thus populated by
1227 	 * another TA. The reference manual states that:
1228 	 *
1229 	 * "instruction cache maintenance is required only after writing
1230 	 * new data to a physical address that holds an instruction."
1231 	 *
1232 	 * So for hidden pages we would not need to invalidate i-cache, but
1233 	 * for newly populated pages we do. Since we don't know which we
1234 	 * have to assume the worst and always invalidate the i-cache. We
1235 	 * don't need to clean the d-cache though, since that has already
1236 	 * been done earlier.
1237 	 *
1238 	 * Additional bookkeeping to tell if the i-cache invalidation is
1239 	 * needed or not is left as a future optimization.
1240 	 */
1241 
1242 	/* If it's not a dirty block, then it should be read only. */
1243 	if (!pmem_is_dirty(pmem))
1244 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1245 
1246 	pa = get_pmem_pa(pmem);
1247 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1248 	if (area->flags & TEE_MATTR_UX) {
1249 		void *va = (void *)tblidx2va(tblidx);
1250 
1251 		/* Set a temporary read-only mapping */
1252 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1253 		tblidx_set_entry(tblidx, pa, a & ~TEE_MATTR_UX);
1254 		dsb_ishst();
1255 
1256 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1257 
1258 		/* Set the final mapping */
1259 		tblidx_set_entry(tblidx, pa, a);
1260 		tblidx_tlbi_entry(tblidx);
1261 	} else {
1262 		tblidx_set_entry(tblidx, pa, a);
1263 		dsb_ishst();
1264 	}
1265 	pgt_inc_used_entries(tblidx.pgt);
1266 
1267 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1268 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1269 	incr_hidden_hits();
1270 	return true;
1271 }
1272 
1273 static void tee_pager_hide_pages(void)
1274 {
1275 	struct tee_pager_pmem *pmem = NULL;
1276 	size_t n = 0;
1277 
1278 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1279 		if (n >= TEE_PAGER_NHIDE)
1280 			break;
1281 		n++;
1282 
1283 		/* we cannot hide pages when pmem->fobj is not defined. */
1284 		if (!pmem->fobj)
1285 			continue;
1286 
1287 		if (pmem_is_hidden(pmem))
1288 			continue;
1289 
1290 		pmem->flags |= PMEM_FLAG_HIDDEN;
1291 		pmem_unmap(pmem, NULL);
1292 	}
1293 }
1294 
1295 static unsigned int __maybe_unused
1296 num_areas_with_pmem(struct tee_pager_pmem *pmem)
1297 {
1298 	struct tee_pager_area *a = NULL;
1299 	unsigned int num_matches = 0;
1300 
1301 	TAILQ_FOREACH(a, &pmem->fobj->areas, fobj_link)
1302 		if (pmem_is_covered_by_area(pmem, a))
1303 			num_matches++;
1304 
1305 	return num_matches;
1306 }
1307 
1308 /*
1309  * Find mapped pmem, hide and move to pageble pmem.
1310  * Return false if page was not mapped, and true if page was mapped.
1311  */
1312 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1313 				       vaddr_t page_va)
1314 {
1315 	struct tee_pager_pmem *pmem = NULL;
1316 	struct tblidx tblidx = { };
1317 	size_t fobj_pgidx = 0;
1318 
1319 	assert(page_va >= area->base && page_va < (area->base + area->size));
1320 	fobj_pgidx = (page_va - area->base) / SMALL_PAGE_SIZE +
1321 		     area->fobj_pgoffs;
1322 
1323 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1324 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != fobj_pgidx)
1325 			continue;
1326 
1327 		/*
1328 		 * Locked pages may not be shared. We're asserting that the
1329 		 * number of areas using this pmem is one and only one as
1330 		 * we're about to unmap it.
1331 		 */
1332 		assert(num_areas_with_pmem(pmem) == 1);
1333 
1334 		tblidx = pmem_get_area_tblidx(pmem, area);
1335 		tblidx_set_entry(tblidx, 0, 0);
1336 		pgt_dec_used_entries(tblidx.pgt);
1337 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1338 		pmem_clear(pmem);
1339 		tee_pager_npages++;
1340 		set_npages();
1341 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1342 		incr_zi_released();
1343 		return true;
1344 	}
1345 
1346 	return false;
1347 }
1348 
1349 static void pager_deploy_page(struct tee_pager_pmem *pmem,
1350 			      struct tee_pager_area *area, vaddr_t page_va,
1351 			      bool clean_user_cache, bool writable)
1352 {
1353 	struct tblidx tblidx = area_va2tblidx(area, page_va);
1354 	uint32_t attr = get_area_mattr(area->flags);
1355 	struct core_mmu_table_info *ti = NULL;
1356 	uint8_t *va_alias = pmem->va_alias;
1357 	paddr_t pa = get_pmem_pa(pmem);
1358 	unsigned int idx_alias = 0;
1359 	uint32_t attr_alias = 0;
1360 	paddr_t pa_alias = 0;
1361 
1362 	/* Ensure we are allowed to write to aliased virtual page */
1363 	ti = find_table_info((vaddr_t)va_alias);
1364 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
1365 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
1366 	if (!(attr_alias & TEE_MATTR_PW)) {
1367 		attr_alias |= TEE_MATTR_PW;
1368 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1369 		tlbi_mva_allasid((vaddr_t)va_alias);
1370 	}
1371 
1372 	asan_tag_access(va_alias, va_alias + SMALL_PAGE_SIZE);
1373 	if (fobj_load_page(pmem->fobj, pmem->fobj_pgidx, va_alias)) {
1374 		EMSG("PH 0x%" PRIxVA " failed", page_va);
1375 		panic();
1376 	}
1377 	switch (area->type) {
1378 	case PAGER_AREA_TYPE_RO:
1379 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1380 		incr_ro_hits();
1381 		/* Forbid write to aliases for read-only (maybe exec) pages */
1382 		attr_alias &= ~TEE_MATTR_PW;
1383 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1384 		tlbi_mva_allasid((vaddr_t)va_alias);
1385 		break;
1386 	case PAGER_AREA_TYPE_RW:
1387 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1388 		if (writable && (attr & (TEE_MATTR_PW | TEE_MATTR_UW)))
1389 			pmem->flags |= PMEM_FLAG_DIRTY;
1390 		incr_rw_hits();
1391 		break;
1392 	case PAGER_AREA_TYPE_LOCK:
1393 		/* Move page to lock list */
1394 		if (tee_pager_npages <= 0)
1395 			panic("Running out of pages");
1396 		tee_pager_npages--;
1397 		set_npages();
1398 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1399 		break;
1400 	default:
1401 		panic();
1402 	}
1403 	asan_tag_no_access(va_alias, va_alias + SMALL_PAGE_SIZE);
1404 
1405 	if (!writable)
1406 		attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1407 
1408 	/*
1409 	 * We've updated the page using the aliased mapping and
1410 	 * some cache maintenance is now needed if it's an
1411 	 * executable page.
1412 	 *
1413 	 * Since the d-cache is a Physically-indexed,
1414 	 * physically-tagged (PIPT) cache we can clean either the
1415 	 * aliased address or the real virtual address. In this
1416 	 * case we choose the real virtual address.
1417 	 *
1418 	 * The i-cache can also be PIPT, but may be something else
1419 	 * too like VIPT. The current code requires the caches to
1420 	 * implement the IVIPT extension, that is:
1421 	 * "instruction cache maintenance is required only after
1422 	 * writing new data to a physical address that holds an
1423 	 * instruction."
1424 	 *
1425 	 * To portably invalidate the icache the page has to
1426 	 * be mapped at the final virtual address but not
1427 	 * executable.
1428 	 */
1429 	if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1430 		uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1431 				TEE_MATTR_PW | TEE_MATTR_UW;
1432 		void *va = (void *)page_va;
1433 
1434 		/* Set a temporary read-only mapping */
1435 		tblidx_set_entry(tblidx, pa, attr & ~mask);
1436 		tblidx_tlbi_entry(tblidx);
1437 
1438 		dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1439 		if (clean_user_cache)
1440 			icache_inv_user_range(va, SMALL_PAGE_SIZE);
1441 		else
1442 			icache_inv_range(va, SMALL_PAGE_SIZE);
1443 
1444 		/* Set the final mapping */
1445 		tblidx_set_entry(tblidx, pa, attr);
1446 		tblidx_tlbi_entry(tblidx);
1447 	} else {
1448 		tblidx_set_entry(tblidx, pa, attr);
1449 		/*
1450 		 * No need to flush TLB for this entry, it was
1451 		 * invalid. We should use a barrier though, to make
1452 		 * sure that the change is visible.
1453 		 */
1454 		dsb_ishst();
1455 	}
1456 	pgt_inc_used_entries(tblidx.pgt);
1457 
1458 	FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1459 }
1460 
1461 static void make_dirty_page(struct tee_pager_pmem *pmem,
1462 			    struct tee_pager_area *area, struct tblidx tblidx,
1463 			    paddr_t pa)
1464 {
1465 	assert(area->flags & (TEE_MATTR_UW | TEE_MATTR_PW));
1466 	assert(!(pmem->flags & PMEM_FLAG_DIRTY));
1467 
1468 	FMSG("Dirty %#"PRIxVA, tblidx2va(tblidx));
1469 	pmem->flags |= PMEM_FLAG_DIRTY;
1470 	tblidx_set_entry(tblidx, pa, get_area_mattr(area->flags));
1471 	tblidx_tlbi_entry(tblidx);
1472 }
1473 
1474 /*
1475  * This function takes a reference to a page (@fobj + fobj_pgidx) and makes
1476  * the corresponding IV available.
1477  *
1478  * In case the page needs to be saved the IV must be writable, consequently
1479  * is the page holding the IV made dirty. If the page instead only is to
1480  * be verified it's enough that the page holding the IV is readonly and
1481  * thus doesn't have to be made dirty too.
1482  *
1483  * This function depends on pager_spare_pmem pointing to a free pmem when
1484  * entered. In case the page holding the needed IV isn't mapped this spare
1485  * pmem is used to map the page. If this function has used pager_spare_pmem
1486  * and assigned it to NULL it must be reassigned with a new free pmem
1487  * before this function can be called again.
1488  */
1489 static void make_iv_available(struct fobj *fobj, unsigned int fobj_pgidx,
1490 			      bool writable)
1491 {
1492 	struct tee_pager_area *area = pager_iv_area;
1493 	struct tee_pager_pmem *pmem = NULL;
1494 	struct tblidx tblidx = { };
1495 	vaddr_t page_va = 0;
1496 	uint32_t attr = 0;
1497 	paddr_t pa = 0;
1498 
1499 	page_va = fobj_get_iv_vaddr(fobj, fobj_pgidx) & ~SMALL_PAGE_MASK;
1500 	if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || !page_va) {
1501 		assert(!page_va);
1502 		return;
1503 	}
1504 
1505 	assert(area && area->type == PAGER_AREA_TYPE_RW);
1506 	assert(pager_spare_pmem);
1507 	assert(core_is_buffer_inside(page_va, 1, area->base, area->size));
1508 
1509 	tblidx = area_va2tblidx(area, page_va);
1510 	/*
1511 	 * We don't care if tee_pager_unhide_page() succeeds or not, we're
1512 	 * still checking the attributes afterwards.
1513 	 */
1514 	tee_pager_unhide_page(area, page_va);
1515 	tblidx_get_entry(tblidx, &pa, &attr);
1516 	if (!(attr & TEE_MATTR_VALID_BLOCK)) {
1517 		/*
1518 		 * We're using the spare pmem to map the IV corresponding
1519 		 * to another page.
1520 		 */
1521 		pmem = pager_spare_pmem;
1522 		pager_spare_pmem = NULL;
1523 		pmem_assign_fobj_page(pmem, area, page_va);
1524 
1525 		if (writable)
1526 			pmem->flags |= PMEM_FLAG_DIRTY;
1527 
1528 		pager_deploy_page(pmem, area, page_va,
1529 				  false /*!clean_user_cache*/, writable);
1530 	} else if (writable && !(attr & TEE_MATTR_PW)) {
1531 		pmem = pmem_find(area, page_va);
1532 		/* Note that pa is valid since TEE_MATTR_VALID_BLOCK is set */
1533 		make_dirty_page(pmem, area, tblidx, pa);
1534 	}
1535 }
1536 
1537 static void pager_get_page(struct tee_pager_area *area, struct abort_info *ai,
1538 			   bool clean_user_cache)
1539 {
1540 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1541 	struct tblidx tblidx = area_va2tblidx(area, page_va);
1542 	struct tee_pager_pmem *pmem = NULL;
1543 	bool writable = false;
1544 	uint32_t attr = 0;
1545 
1546 	/*
1547 	 * Get a pmem to load code and data into, also make sure
1548 	 * the corresponding IV page is available.
1549 	 */
1550 	while (true) {
1551 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1552 		if (!pmem) {
1553 			EMSG("No pmem entries");
1554 			abort_print(ai);
1555 			panic();
1556 		}
1557 
1558 		if (pmem->fobj) {
1559 			pmem_unmap(pmem, NULL);
1560 			if (pmem_is_dirty(pmem)) {
1561 				uint8_t *va = pmem->va_alias;
1562 
1563 				make_iv_available(pmem->fobj, pmem->fobj_pgidx,
1564 						  true /*writable*/);
1565 				asan_tag_access(va, va + SMALL_PAGE_SIZE);
1566 				if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
1567 						   pmem->va_alias))
1568 					panic("fobj_save_page");
1569 				asan_tag_no_access(va, va + SMALL_PAGE_SIZE);
1570 
1571 				pmem_clear(pmem);
1572 
1573 				/*
1574 				 * If the spare pmem was used by
1575 				 * make_iv_available() we need to replace
1576 				 * it with the just freed pmem.
1577 				 *
1578 				 * See make_iv_available() for details.
1579 				 */
1580 				if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) &&
1581 				    !pager_spare_pmem) {
1582 					TAILQ_REMOVE(&tee_pager_pmem_head,
1583 						     pmem, link);
1584 					pager_spare_pmem = pmem;
1585 					pmem = NULL;
1586 				}
1587 
1588 				/*
1589 				 * Check if the needed virtual page was
1590 				 * made available as a side effect of the
1591 				 * call to make_iv_available() above. If so
1592 				 * we're done.
1593 				 */
1594 				tblidx_get_entry(tblidx, NULL, &attr);
1595 				if (attr & TEE_MATTR_VALID_BLOCK)
1596 					return;
1597 
1598 				/*
1599 				 * The freed pmem was used to replace the
1600 				 * consumed pager_spare_pmem above. Restart
1601 				 * to find another pmem.
1602 				 */
1603 				if (!pmem)
1604 					continue;
1605 			}
1606 		}
1607 
1608 		TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1609 		pmem_clear(pmem);
1610 
1611 		pmem_assign_fobj_page(pmem, area, page_va);
1612 		make_iv_available(pmem->fobj, pmem->fobj_pgidx,
1613 				  false /*!writable*/);
1614 		if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || pager_spare_pmem)
1615 			break;
1616 
1617 		/*
1618 		 * The spare pmem was used by make_iv_available(). We need
1619 		 * to replace it with the just freed pmem. And get another
1620 		 * pmem.
1621 		 *
1622 		 * See make_iv_available() for details.
1623 		 */
1624 		pmem_clear(pmem);
1625 		pager_spare_pmem = pmem;
1626 	}
1627 
1628 	/*
1629 	 * PAGER_AREA_TYPE_LOCK are always writable while PAGER_AREA_TYPE_RO
1630 	 * are never writable.
1631 	 *
1632 	 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1633 	 * able to tell when they are updated and should be tagged
1634 	 * as dirty.
1635 	 */
1636 	if (area->type == PAGER_AREA_TYPE_LOCK ||
1637 	    (area->type == PAGER_AREA_TYPE_RW && abort_is_write_fault(ai)))
1638 		writable = true;
1639 	else
1640 		writable = false;
1641 
1642 	pager_deploy_page(pmem, area, page_va, clean_user_cache, writable);
1643 }
1644 
1645 static bool pager_update_permissions(struct tee_pager_area *area,
1646 			struct abort_info *ai, bool *handled)
1647 {
1648 	struct tblidx tblidx = area_va2tblidx(area, ai->va);
1649 	struct tee_pager_pmem *pmem = NULL;
1650 	uint32_t attr = 0;
1651 	paddr_t pa = 0;
1652 
1653 	*handled = false;
1654 
1655 	tblidx_get_entry(tblidx, &pa, &attr);
1656 
1657 	/* Not mapped */
1658 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1659 		return false;
1660 
1661 	/* Not readable, should not happen */
1662 	if (abort_is_user_exception(ai)) {
1663 		if (!(attr & TEE_MATTR_UR))
1664 			return true;
1665 	} else {
1666 		if (!(attr & TEE_MATTR_PR)) {
1667 			abort_print_error(ai);
1668 			panic();
1669 		}
1670 	}
1671 
1672 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1673 	case CORE_MMU_FAULT_TRANSLATION:
1674 	case CORE_MMU_FAULT_READ_PERMISSION:
1675 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1676 			/* Check attempting to execute from an NOX page */
1677 			if (abort_is_user_exception(ai)) {
1678 				if (!(attr & TEE_MATTR_UX))
1679 					return true;
1680 			} else {
1681 				if (!(attr & TEE_MATTR_PX)) {
1682 					abort_print_error(ai);
1683 					panic();
1684 				}
1685 			}
1686 		}
1687 		/* Since the page is mapped now it's OK */
1688 		break;
1689 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1690 		/* Check attempting to write to an RO page */
1691 		pmem = pmem_find(area, ai->va);
1692 		if (!pmem)
1693 			panic();
1694 		if (abort_is_user_exception(ai)) {
1695 			if (!(area->flags & TEE_MATTR_UW))
1696 				return true;
1697 			if (!(attr & TEE_MATTR_UW))
1698 				make_dirty_page(pmem, area, tblidx, pa);
1699 		} else {
1700 			if (!(area->flags & TEE_MATTR_PW)) {
1701 				abort_print_error(ai);
1702 				panic();
1703 			}
1704 			if (!(attr & TEE_MATTR_PW))
1705 				make_dirty_page(pmem, area, tblidx, pa);
1706 		}
1707 		/* Since permissions has been updated now it's OK */
1708 		break;
1709 	default:
1710 		/* Some fault we can't deal with */
1711 		if (abort_is_user_exception(ai))
1712 			return true;
1713 		abort_print_error(ai);
1714 		panic();
1715 	}
1716 	*handled = true;
1717 	return true;
1718 }
1719 
1720 #ifdef CFG_TEE_CORE_DEBUG
1721 static void stat_handle_fault(void)
1722 {
1723 	static size_t num_faults;
1724 	static size_t min_npages = SIZE_MAX;
1725 	static size_t total_min_npages = SIZE_MAX;
1726 
1727 	num_faults++;
1728 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1729 		DMSG("nfaults %zu npages %zu (min %zu)",
1730 		     num_faults, tee_pager_npages, min_npages);
1731 		min_npages = tee_pager_npages; /* reset */
1732 	}
1733 	if (tee_pager_npages < min_npages)
1734 		min_npages = tee_pager_npages;
1735 	if (tee_pager_npages < total_min_npages)
1736 		total_min_npages = tee_pager_npages;
1737 }
1738 #else
1739 static void stat_handle_fault(void)
1740 {
1741 }
1742 #endif
1743 
1744 bool tee_pager_handle_fault(struct abort_info *ai)
1745 {
1746 	struct tee_pager_area *area;
1747 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1748 	uint32_t exceptions;
1749 	bool ret;
1750 	bool clean_user_cache = false;
1751 
1752 #ifdef TEE_PAGER_DEBUG_PRINT
1753 	if (!abort_is_user_exception(ai))
1754 		abort_print(ai);
1755 #endif
1756 
1757 	/*
1758 	 * We're updating pages that can affect several active CPUs at a
1759 	 * time below. We end up here because a thread tries to access some
1760 	 * memory that isn't available. We have to be careful when making
1761 	 * that memory available as other threads may succeed in accessing
1762 	 * that address the moment after we've made it available.
1763 	 *
1764 	 * That means that we can't just map the memory and populate the
1765 	 * page, instead we use the aliased mapping to populate the page
1766 	 * and once everything is ready we map it.
1767 	 */
1768 	exceptions = pager_lock(ai);
1769 
1770 	stat_handle_fault();
1771 
1772 	/* check if the access is valid */
1773 	if (abort_is_user_exception(ai)) {
1774 		area = find_uta_area(ai->va);
1775 		clean_user_cache = true;
1776 	} else {
1777 		area = find_area(&tee_pager_area_head, ai->va);
1778 		if (!area) {
1779 			area = find_uta_area(ai->va);
1780 			clean_user_cache = true;
1781 		}
1782 	}
1783 	if (!area || !area->pgt_array[0]) {
1784 		ret = false;
1785 		goto out;
1786 	}
1787 
1788 	if (tee_pager_unhide_page(area, page_va))
1789 		goto out_success;
1790 
1791 	/*
1792 	 * The page wasn't hidden, but some other core may have
1793 	 * updated the table entry before we got here or we need
1794 	 * to make a read-only page read-write (dirty).
1795 	 */
1796 	if (pager_update_permissions(area, ai, &ret)) {
1797 		/*
1798 		 * Nothing more to do with the abort. The problem
1799 		 * could already have been dealt with from another
1800 		 * core or if ret is false the TA will be paniced.
1801 		 */
1802 		goto out;
1803 	}
1804 
1805 	pager_get_page(area, ai, clean_user_cache);
1806 
1807 out_success:
1808 	tee_pager_hide_pages();
1809 	ret = true;
1810 out:
1811 	pager_unlock(exceptions);
1812 	return ret;
1813 }
1814 
1815 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1816 {
1817 	size_t n = 0;
1818 
1819 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1820 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1821 
1822 	/* setup memory */
1823 	for (n = 0; n < npages; n++) {
1824 		struct core_mmu_table_info *ti = NULL;
1825 		struct tee_pager_pmem *pmem = NULL;
1826 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1827 		struct tblidx tblidx = { };
1828 		unsigned int pgidx = 0;
1829 		paddr_t pa = 0;
1830 		uint32_t attr = 0;
1831 
1832 		ti = find_table_info(va);
1833 		pgidx = core_mmu_va2idx(ti, va);
1834 		/*
1835 		 * Note that we can only support adding pages in the
1836 		 * valid range of this table info, currently not a problem.
1837 		 */
1838 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1839 
1840 		/* Ignore unmapped pages/blocks */
1841 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1842 			continue;
1843 
1844 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1845 		if (!pmem)
1846 			panic("out of mem");
1847 		pmem_clear(pmem);
1848 
1849 		pmem->va_alias = pager_add_alias_page(pa);
1850 
1851 		if (unmap) {
1852 			core_mmu_set_entry(ti, pgidx, 0, 0);
1853 			pgt_dec_used_entries(find_core_pgt(va));
1854 		} else {
1855 			struct tee_pager_area *area = NULL;
1856 
1857 			/*
1858 			 * The page is still mapped, let's assign the area
1859 			 * and update the protection bits accordingly.
1860 			 */
1861 			area = find_area(&tee_pager_area_head, va);
1862 			assert(area);
1863 			pmem_assign_fobj_page(pmem, area, va);
1864 			tblidx = pmem_get_area_tblidx(pmem, area);
1865 			assert(tblidx.pgt == find_core_pgt(va));
1866 			assert(pa == get_pmem_pa(pmem));
1867 			tblidx_set_entry(tblidx, pa,
1868 					 get_area_mattr(area->flags));
1869 		}
1870 
1871 		if (unmap && IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) &&
1872 		    !pager_spare_pmem) {
1873 			pager_spare_pmem = pmem;
1874 		} else {
1875 			tee_pager_npages++;
1876 			incr_npages_all();
1877 			set_npages();
1878 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1879 		}
1880 	}
1881 
1882 	/*
1883 	 * As this is done at inits, invalidate all TLBs once instead of
1884 	 * targeting only the modified entries.
1885 	 */
1886 	tlbi_all();
1887 }
1888 
1889 #ifdef CFG_PAGED_USER_TA
1890 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1891 {
1892 	struct pgt *p = pgt;
1893 
1894 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1895 		p = SLIST_NEXT(p, link);
1896 	return p;
1897 }
1898 
1899 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1900 {
1901 	struct tee_pager_area *area = NULL;
1902 	struct pgt *pgt = NULL;
1903 	size_t n = 0;
1904 
1905 	if (!uctx->areas)
1906 		return;
1907 
1908 	pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1909 	TAILQ_FOREACH(area, uctx->areas, link) {
1910 		for (n = 0; n < get_pgt_count(area->base, area->size); n++) {
1911 			vaddr_t va = area->base + CORE_MMU_PGDIR_SIZE * n;
1912 			struct pgt *p __maybe_unused = find_pgt(pgt, va);
1913 
1914 			if (!area->pgt_array[n])
1915 				area->pgt_array[n] = p;
1916 			else
1917 				assert(area->pgt_array[n] == p);
1918 		}
1919 	}
1920 }
1921 
1922 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1923 {
1924 	struct tee_pager_pmem *pmem = NULL;
1925 	struct tee_pager_area *area = NULL;
1926 	struct tee_pager_area_head *areas = NULL;
1927 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1928 	size_t n = 0;
1929 
1930 	if (!pgt->num_used_entries)
1931 		goto out;
1932 
1933 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1934 		if (pmem->fobj)
1935 			pmem_unmap(pmem, pgt);
1936 	}
1937 	assert(!pgt->num_used_entries);
1938 
1939 out:
1940 	areas = to_user_mode_ctx(pgt->ctx)->areas;
1941 	if (areas) {
1942 		TAILQ_FOREACH(area, areas, link) {
1943 			for (n = 0; n < get_pgt_count(area->base, area->size);
1944 			     n++) {
1945 				if (area->pgt_array[n] == pgt) {
1946 					area->pgt_array[n] = NULL;
1947 					break;
1948 				}
1949 			}
1950 		}
1951 	}
1952 
1953 	pager_unlock(exceptions);
1954 }
1955 DECLARE_KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1956 #endif /*CFG_PAGED_USER_TA*/
1957 
1958 void tee_pager_release_phys(void *addr, size_t size)
1959 {
1960 	bool unmaped = false;
1961 	vaddr_t va = (vaddr_t)addr;
1962 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1963 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1964 	struct tee_pager_area *area;
1965 	uint32_t exceptions;
1966 
1967 	if (end <= begin)
1968 		return;
1969 
1970 	exceptions = pager_lock_check_stack(128);
1971 
1972 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1973 		area = find_area(&tee_pager_area_head, va);
1974 		if (!area)
1975 			panic();
1976 		unmaped |= tee_pager_release_one_phys(area, va);
1977 	}
1978 
1979 	if (unmaped)
1980 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1981 
1982 	pager_unlock(exceptions);
1983 }
1984 DECLARE_KEEP_PAGER(tee_pager_release_phys);
1985 
1986 void *tee_pager_alloc(size_t size)
1987 {
1988 	tee_mm_entry_t *mm = NULL;
1989 	uint8_t *smem = NULL;
1990 	size_t num_pages = 0;
1991 	struct fobj *fobj = NULL;
1992 
1993 	if (!size)
1994 		return NULL;
1995 
1996 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1997 	if (!mm)
1998 		return NULL;
1999 
2000 	smem = (uint8_t *)tee_mm_get_smem(mm);
2001 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
2002 	fobj = fobj_locked_paged_alloc(num_pages);
2003 	if (!fobj) {
2004 		tee_mm_free(mm);
2005 		return NULL;
2006 	}
2007 
2008 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
2009 	fobj_put(fobj);
2010 
2011 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
2012 
2013 	return smem;
2014 }
2015 
2016 vaddr_t tee_pager_init_iv_area(struct fobj *fobj)
2017 {
2018 	tee_mm_entry_t *mm = NULL;
2019 	uint8_t *smem = NULL;
2020 
2021 	assert(!pager_iv_area);
2022 
2023 	mm = tee_mm_alloc(&tee_mm_vcore, fobj->num_pages * SMALL_PAGE_SIZE);
2024 	if (!mm)
2025 		panic();
2026 
2027 	smem = (uint8_t *)tee_mm_get_smem(mm);
2028 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_RW, fobj);
2029 	fobj_put(fobj);
2030 
2031 	asan_tag_access(smem, smem + fobj->num_pages * SMALL_PAGE_SIZE);
2032 
2033 	pager_iv_area = find_area(&tee_pager_area_head, (vaddr_t)smem);
2034 	assert(pager_iv_area && pager_iv_area->fobj == fobj);
2035 
2036 	return (vaddr_t)smem;
2037 }
2038