xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision f6e2b9e2d1a270542c6f6f5e36ed4e36abe18256)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/tlb_helpers.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <mm/core_memprot.h>
23 #include <mm/fobj.h>
24 #include <mm/tee_mm.h>
25 #include <mm/tee_pager.h>
26 #include <stdlib.h>
27 #include <sys/queue.h>
28 #include <tee_api_defines.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <utee_defines.h>
32 #include <util.h>
33 
34 
35 static struct tee_pager_area_head tee_pager_area_head =
36 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
37 
38 #define INVALID_PGIDX		UINT_MAX
39 #define PMEM_FLAG_DIRTY		BIT(0)
40 #define PMEM_FLAG_HIDDEN	BIT(1)
41 
42 /*
43  * struct tee_pager_pmem - Represents a physical page used for paging.
44  *
45  * @flags	flags defined by PMEM_FLAG_* above
46  * @fobj_pgidx	index of the page in the @fobj
47  * @fobj	File object of which a page is made visible.
48  * @va_alias	Virtual address where the physical page always is aliased.
49  *		Used during remapping of the page when the content need to
50  *		be updated before it's available at the new location.
51  */
52 struct tee_pager_pmem {
53 	unsigned int flags;
54 	unsigned int fobj_pgidx;
55 	struct fobj *fobj;
56 	void *va_alias;
57 	TAILQ_ENTRY(tee_pager_pmem) link;
58 };
59 
60 struct tblidx {
61 	struct pgt *pgt;
62 	unsigned int idx;
63 };
64 
65 /* The list of physical pages. The first page in the list is the oldest */
66 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
67 
68 static struct tee_pager_pmem_head tee_pager_pmem_head =
69 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
70 
71 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
72 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
73 
74 /* number of pages hidden */
75 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
76 
77 /* Number of registered physical pages, used hiding pages. */
78 static size_t tee_pager_npages;
79 
80 /* This area covers the IVs for all fobjs with paged IVs */
81 static struct tee_pager_area *pager_iv_area;
82 /* Used by make_iv_available(), see make_iv_available() for details. */
83 static struct tee_pager_pmem *pager_spare_pmem;
84 
85 #ifdef CFG_WITH_STATS
86 static struct tee_pager_stats pager_stats;
87 
88 static inline void incr_ro_hits(void)
89 {
90 	pager_stats.ro_hits++;
91 }
92 
93 static inline void incr_rw_hits(void)
94 {
95 	pager_stats.rw_hits++;
96 }
97 
98 static inline void incr_hidden_hits(void)
99 {
100 	pager_stats.hidden_hits++;
101 }
102 
103 static inline void incr_zi_released(void)
104 {
105 	pager_stats.zi_released++;
106 }
107 
108 static inline void incr_npages_all(void)
109 {
110 	pager_stats.npages_all++;
111 }
112 
113 static inline void set_npages(void)
114 {
115 	pager_stats.npages = tee_pager_npages;
116 }
117 
118 void tee_pager_get_stats(struct tee_pager_stats *stats)
119 {
120 	*stats = pager_stats;
121 
122 	pager_stats.hidden_hits = 0;
123 	pager_stats.ro_hits = 0;
124 	pager_stats.rw_hits = 0;
125 	pager_stats.zi_released = 0;
126 }
127 
128 #else /* CFG_WITH_STATS */
129 static inline void incr_ro_hits(void) { }
130 static inline void incr_rw_hits(void) { }
131 static inline void incr_hidden_hits(void) { }
132 static inline void incr_zi_released(void) { }
133 static inline void incr_npages_all(void) { }
134 static inline void set_npages(void) { }
135 
136 void tee_pager_get_stats(struct tee_pager_stats *stats)
137 {
138 	memset(stats, 0, sizeof(struct tee_pager_stats));
139 }
140 #endif /* CFG_WITH_STATS */
141 
142 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
143 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
144 #define TBL_SHIFT	SMALL_PAGE_SHIFT
145 
146 #define EFFECTIVE_VA_SIZE \
147 	(ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \
148 	 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE))
149 
150 static struct pager_table {
151 	struct pgt pgt;
152 	struct core_mmu_table_info tbl_info;
153 } *pager_tables;
154 static unsigned int num_pager_tables;
155 
156 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
157 
158 /* Defines the range of the alias area */
159 static tee_mm_entry_t *pager_alias_area;
160 /*
161  * Physical pages are added in a stack like fashion to the alias area,
162  * @pager_alias_next_free gives the address of next free entry if
163  * @pager_alias_next_free is != 0
164  */
165 static uintptr_t pager_alias_next_free;
166 
167 #ifdef CFG_TEE_CORE_DEBUG
168 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
169 
170 static uint32_t pager_lock_dldetect(const char *func, const int line,
171 				    struct abort_info *ai)
172 {
173 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
174 	unsigned int retries = 0;
175 	unsigned int reminder = 0;
176 
177 	while (!cpu_spin_trylock(&pager_spinlock)) {
178 		retries++;
179 		if (!retries) {
180 			/* wrapped, time to report */
181 			trace_printf(func, line, TRACE_ERROR, true,
182 				     "possible spinlock deadlock reminder %u",
183 				     reminder);
184 			if (reminder < UINT_MAX)
185 				reminder++;
186 			if (ai)
187 				abort_print(ai);
188 		}
189 	}
190 
191 	return exceptions;
192 }
193 #else
194 static uint32_t pager_lock(struct abort_info __unused *ai)
195 {
196 	return cpu_spin_lock_xsave(&pager_spinlock);
197 }
198 #endif
199 
200 static uint32_t pager_lock_check_stack(size_t stack_size)
201 {
202 	if (stack_size) {
203 		int8_t buf[stack_size];
204 		size_t n;
205 
206 		/*
207 		 * Make sure to touch all pages of the stack that we expect
208 		 * to use with this lock held. We need to take eventual
209 		 * page faults before the lock is taken or we'll deadlock
210 		 * the pager. The pages that are populated in this way will
211 		 * eventually be released at certain save transitions of
212 		 * the thread.
213 		 */
214 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
215 			io_write8((vaddr_t)buf + n, 1);
216 		io_write8((vaddr_t)buf + stack_size - 1, 1);
217 	}
218 
219 	return pager_lock(NULL);
220 }
221 
222 static void pager_unlock(uint32_t exceptions)
223 {
224 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
225 }
226 
227 void *tee_pager_phys_to_virt(paddr_t pa)
228 {
229 	struct core_mmu_table_info ti;
230 	unsigned idx;
231 	uint32_t a;
232 	paddr_t p;
233 	vaddr_t v;
234 	size_t n;
235 
236 	/*
237 	 * Most addresses are mapped lineary, try that first if possible.
238 	 */
239 	if (!tee_pager_get_table_info(pa, &ti))
240 		return NULL; /* impossible pa */
241 	idx = core_mmu_va2idx(&ti, pa);
242 	core_mmu_get_entry(&ti, idx, &p, &a);
243 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
244 		return (void *)core_mmu_idx2va(&ti, idx);
245 
246 	n = 0;
247 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
248 	while (true) {
249 		while (idx < TBL_NUM_ENTRIES) {
250 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
251 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
252 				return NULL;
253 
254 			core_mmu_get_entry(&pager_tables[n].tbl_info,
255 					   idx, &p, &a);
256 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
257 				return (void *)v;
258 			idx++;
259 		}
260 
261 		n++;
262 		if (n >= num_pager_tables)
263 			return NULL;
264 		idx = 0;
265 	}
266 
267 	return NULL;
268 }
269 
270 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
271 {
272 	return pmem->flags & PMEM_FLAG_HIDDEN;
273 }
274 
275 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
276 {
277 	return pmem->flags & PMEM_FLAG_DIRTY;
278 }
279 
280 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
281 				    struct tee_pager_area *area)
282 {
283 	if (pmem->fobj != area->fobj)
284 		return false;
285 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
286 		return false;
287 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
288 	    (area->size >> SMALL_PAGE_SHIFT))
289 		return false;
290 
291 	return true;
292 }
293 
294 static struct tblidx pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
295 					  struct tee_pager_area *area)
296 {
297 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
298 	size_t idx = pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
299 	struct pgt *pgt = area->pgt;
300 
301 	assert(pmem->fobj && pmem->fobj_pgidx != INVALID_PGIDX);
302 	assert(idx < TBL_NUM_ENTRIES);
303 
304 	return (struct tblidx){ .idx = idx, .pgt = pgt };
305 }
306 
307 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
308 {
309 	size_t n;
310 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
311 
312 	if (!pager_tables)
313 		return NULL;
314 
315 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
316 	    CORE_MMU_PGDIR_SHIFT;
317 	if (n >= num_pager_tables)
318 		return NULL;
319 
320 	assert(va >= pager_tables[n].tbl_info.va_base &&
321 	       va <= (pager_tables[n].tbl_info.va_base | mask));
322 
323 	return pager_tables + n;
324 }
325 
326 static struct pager_table *find_pager_table(vaddr_t va)
327 {
328 	struct pager_table *pt = find_pager_table_may_fail(va);
329 
330 	assert(pt);
331 	return pt;
332 }
333 
334 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
335 {
336 	struct pager_table *pt = find_pager_table_may_fail(va);
337 
338 	if (!pt)
339 		return false;
340 
341 	*ti = pt->tbl_info;
342 	return true;
343 }
344 
345 static struct core_mmu_table_info *find_table_info(vaddr_t va)
346 {
347 	return &find_pager_table(va)->tbl_info;
348 }
349 
350 static struct pgt *find_core_pgt(vaddr_t va)
351 {
352 	return &find_pager_table(va)->pgt;
353 }
354 
355 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
356 {
357 	struct pager_table *pt;
358 	unsigned idx;
359 	vaddr_t smem = tee_mm_get_smem(mm);
360 	size_t nbytes = tee_mm_get_bytes(mm);
361 	vaddr_t v;
362 	uint32_t a = 0;
363 
364 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
365 
366 	assert(!pager_alias_area);
367 	pager_alias_area = mm;
368 	pager_alias_next_free = smem;
369 
370 	/* Clear all mapping in the alias area */
371 	pt = find_pager_table(smem);
372 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
373 	while (pt <= (pager_tables + num_pager_tables - 1)) {
374 		while (idx < TBL_NUM_ENTRIES) {
375 			v = core_mmu_idx2va(&pt->tbl_info, idx);
376 			if (v >= (smem + nbytes))
377 				goto out;
378 
379 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
380 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
381 			if (a & TEE_MATTR_VALID_BLOCK)
382 				pgt_dec_used_entries(&pt->pgt);
383 			idx++;
384 		}
385 
386 		pt++;
387 		idx = 0;
388 	}
389 
390 out:
391 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
392 }
393 
394 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
395 {
396 	size_t n;
397 	uint32_t a = 0;
398 	size_t usage = 0;
399 
400 	for (n = 0; n < ti->num_entries; n++) {
401 		core_mmu_get_entry(ti, n, NULL, &a);
402 		if (a & TEE_MATTR_VALID_BLOCK)
403 			usage++;
404 	}
405 	return usage;
406 }
407 
408 static void tblidx_get_entry(struct tblidx tblidx, paddr_t *pa, uint32_t *attr)
409 {
410 	assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES);
411 	core_mmu_get_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx,
412 				     pa, attr);
413 }
414 
415 static void tblidx_set_entry(struct tblidx tblidx, paddr_t pa, uint32_t attr)
416 {
417 	assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES);
418 	core_mmu_set_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx,
419 				     pa, attr);
420 }
421 
422 static struct tblidx area_va2tblidx(struct tee_pager_area *area, vaddr_t va)
423 {
424 	struct pgt *pgt = area->pgt;
425 	paddr_t mask = CORE_MMU_PGDIR_MASK;
426 
427 	assert(va >= area->base && va < (area->base + area->size));
428 
429 	return (struct tblidx){
430 		.idx = (va & mask) / SMALL_PAGE_SIZE,
431 		.pgt = pgt
432 	};
433 }
434 
435 static vaddr_t tblidx2va(struct tblidx tblidx)
436 {
437 	return tblidx.pgt->vabase + (tblidx.idx << SMALL_PAGE_SHIFT);
438 }
439 
440 static void tblidx_tlbi_entry(struct tblidx tblidx)
441 {
442 	vaddr_t va = tblidx2va(tblidx);
443 
444 #if defined(CFG_PAGED_USER_TA)
445 	if (tblidx.pgt->ctx) {
446 		uint32_t asid = to_user_mode_ctx(tblidx.pgt->ctx)->vm_info.asid;
447 
448 		tlbi_mva_asid(va, asid);
449 		return;
450 	}
451 #endif
452 	tlbi_mva_allasid(va);
453 }
454 
455 static void pmem_assign_fobj_page(struct tee_pager_pmem *pmem,
456 				  struct tee_pager_area *area, vaddr_t va)
457 {
458 	struct tee_pager_pmem *p = NULL;
459 	unsigned int fobj_pgidx = 0;
460 
461 	assert(!pmem->fobj && pmem->fobj_pgidx == INVALID_PGIDX);
462 
463 	assert(va >= area->base && va < (area->base + area->size));
464 	fobj_pgidx = (va - area->base) / SMALL_PAGE_SIZE + area->fobj_pgoffs;
465 
466 	TAILQ_FOREACH(p, &tee_pager_pmem_head, link)
467 		assert(p->fobj != area->fobj || p->fobj_pgidx != fobj_pgidx);
468 
469 	pmem->fobj = area->fobj;
470 	pmem->fobj_pgidx = fobj_pgidx;
471 }
472 
473 static void pmem_clear(struct tee_pager_pmem *pmem)
474 {
475 	pmem->fobj = NULL;
476 	pmem->fobj_pgidx = INVALID_PGIDX;
477 	pmem->flags = 0;
478 }
479 
480 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
481 {
482 	struct tee_pager_area *area = NULL;
483 	struct tblidx tblidx = { };
484 	uint32_t a = 0;
485 
486 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
487 		/*
488 		 * If only_this_pgt points to a pgt then the pgt of this
489 		 * area has to match or we'll skip over it.
490 		 */
491 		if (only_this_pgt && area->pgt != only_this_pgt)
492 			continue;
493 		if (!area->pgt || !pmem_is_covered_by_area(pmem, area))
494 			continue;
495 		tblidx = pmem_get_area_tblidx(pmem, area);
496 		tblidx_get_entry(tblidx, NULL, &a);
497 		if (a & TEE_MATTR_VALID_BLOCK) {
498 			tblidx_set_entry(tblidx, 0, 0);
499 			pgt_dec_used_entries(tblidx.pgt);
500 			tblidx_tlbi_entry(tblidx);
501 		}
502 	}
503 }
504 
505 void tee_pager_early_init(void)
506 {
507 	size_t n = 0;
508 
509 	num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE;
510 	pager_tables = calloc(num_pager_tables, sizeof(*pager_tables));
511 	if (!pager_tables)
512 		panic("Cannot allocate pager_tables");
513 
514 	/*
515 	 * Note that this depends on add_pager_vaspace() adding vaspace
516 	 * after end of memory.
517 	 */
518 	for (n = 0; n < num_pager_tables; n++) {
519 		if (!core_mmu_find_table(NULL, VCORE_START_VA +
520 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
521 					 &pager_tables[n].tbl_info))
522 			panic("can't find mmu tables");
523 
524 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
525 			panic("Unsupported page size in translation table");
526 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
527 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
528 
529 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
530 		pager_tables[n].pgt.vabase = pager_tables[n].tbl_info.va_base;
531 		pgt_set_used_entries(&pager_tables[n].pgt,
532 				tbl_usage_count(&pager_tables[n].tbl_info));
533 	}
534 }
535 
536 static void *pager_add_alias_page(paddr_t pa)
537 {
538 	unsigned idx;
539 	struct core_mmu_table_info *ti;
540 	/* Alias pages mapped without write permission: runtime will care */
541 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
542 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
543 			TEE_MATTR_SECURE | TEE_MATTR_PR;
544 
545 	DMSG("0x%" PRIxPA, pa);
546 
547 	ti = find_table_info(pager_alias_next_free);
548 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
549 	core_mmu_set_entry(ti, idx, pa, attr);
550 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
551 	pager_alias_next_free += SMALL_PAGE_SIZE;
552 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
553 				      tee_mm_get_bytes(pager_alias_area)))
554 		pager_alias_next_free = 0;
555 	return (void *)core_mmu_idx2va(ti, idx);
556 }
557 
558 static void area_insert(struct tee_pager_area_head *head,
559 			struct tee_pager_area *area,
560 			struct tee_pager_area *a_prev)
561 {
562 	uint32_t exceptions = pager_lock_check_stack(8);
563 
564 	if (a_prev)
565 		TAILQ_INSERT_AFTER(head, a_prev, area, link);
566 	else
567 		TAILQ_INSERT_HEAD(head, area, link);
568 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
569 
570 	pager_unlock(exceptions);
571 }
572 DECLARE_KEEP_PAGER(area_insert);
573 
574 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
575 			     struct fobj *fobj)
576 {
577 	struct tee_pager_area *area = NULL;
578 	uint32_t flags = 0;
579 	size_t fobj_pgoffs = 0;
580 	vaddr_t b = base;
581 	size_t s = 0;
582 	size_t s2 = 0;
583 
584 	assert(fobj);
585 	s = fobj->num_pages * SMALL_PAGE_SIZE;
586 
587 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
588 
589 	if (base & SMALL_PAGE_MASK || !s) {
590 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
591 		panic();
592 	}
593 
594 	switch (type) {
595 	case PAGER_AREA_TYPE_RO:
596 		flags = TEE_MATTR_PRX;
597 		break;
598 	case PAGER_AREA_TYPE_RW:
599 	case PAGER_AREA_TYPE_LOCK:
600 		flags = TEE_MATTR_PRW;
601 		break;
602 	default:
603 		panic();
604 	}
605 
606 	while (s) {
607 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
608 		area = calloc(1, sizeof(*area));
609 		if (!area)
610 			panic("alloc_area");
611 
612 		area->fobj = fobj_get(fobj);
613 		area->fobj_pgoffs = fobj_pgoffs;
614 		area->type = type;
615 		area->pgt = find_core_pgt(b);
616 		area->base = b;
617 		area->size = s2;
618 		area->flags = flags;
619 		area_insert(&tee_pager_area_head, area, NULL);
620 
621 		b += s2;
622 		s -= s2;
623 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
624 	}
625 }
626 
627 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
628 					vaddr_t va)
629 {
630 	struct tee_pager_area *area;
631 
632 	if (!areas)
633 		return NULL;
634 
635 	TAILQ_FOREACH(area, areas, link) {
636 		if (core_is_buffer_inside(va, 1, area->base, area->size))
637 			return area;
638 	}
639 	return NULL;
640 }
641 
642 #ifdef CFG_PAGED_USER_TA
643 static struct tee_pager_area *find_uta_area(vaddr_t va)
644 {
645 	struct ts_ctx *ctx = thread_get_tsd()->ctx;
646 
647 	if (!is_user_mode_ctx(ctx))
648 		return NULL;
649 	return find_area(to_user_mode_ctx(ctx)->areas, va);
650 }
651 #else
652 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
653 {
654 	return NULL;
655 }
656 #endif /*CFG_PAGED_USER_TA*/
657 
658 
659 static uint32_t get_area_mattr(uint32_t area_flags)
660 {
661 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
662 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
663 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
664 
665 	return attr;
666 }
667 
668 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
669 {
670 	struct core_mmu_table_info *ti;
671 	paddr_t pa;
672 	unsigned idx;
673 
674 	ti = find_table_info((vaddr_t)pmem->va_alias);
675 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
676 	core_mmu_get_entry(ti, idx, &pa, NULL);
677 	return pa;
678 }
679 
680 #ifdef CFG_PAGED_USER_TA
681 static void unlink_area(struct tee_pager_area_head *area_head,
682 			struct tee_pager_area *area)
683 {
684 	uint32_t exceptions = pager_lock_check_stack(64);
685 
686 	TAILQ_REMOVE(area_head, area, link);
687 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
688 
689 	pager_unlock(exceptions);
690 }
691 DECLARE_KEEP_PAGER(unlink_area);
692 
693 static void free_area(struct tee_pager_area *area)
694 {
695 	fobj_put(area->fobj);
696 	free(area);
697 }
698 
699 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
700 				    struct fobj *fobj, uint32_t prot)
701 {
702 	struct tee_pager_area *a_prev = NULL;
703 	struct tee_pager_area *area = NULL;
704 	vaddr_t b = base;
705 	size_t fobj_pgoffs = 0;
706 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
707 
708 	if (!uctx->areas) {
709 		uctx->areas = malloc(sizeof(*uctx->areas));
710 		if (!uctx->areas)
711 			return TEE_ERROR_OUT_OF_MEMORY;
712 		TAILQ_INIT(uctx->areas);
713 	}
714 
715 	area = TAILQ_FIRST(uctx->areas);
716 	while (area) {
717 		if (core_is_buffer_intersect(b, s, area->base,
718 					     area->size))
719 			return TEE_ERROR_BAD_PARAMETERS;
720 		if (b < area->base)
721 			break;
722 		a_prev = area;
723 		area = TAILQ_NEXT(area, link);
724 	}
725 
726 	while (s) {
727 		size_t s2;
728 
729 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
730 		area = calloc(1, sizeof(*area));
731 		if (!area)
732 			return TEE_ERROR_OUT_OF_MEMORY;
733 
734 		/* Table info will be set when the context is activated. */
735 		area->fobj = fobj_get(fobj);
736 		area->fobj_pgoffs = fobj_pgoffs;
737 		area->type = PAGER_AREA_TYPE_RW;
738 		area->base = b;
739 		area->size = s2;
740 		area->flags = prot;
741 
742 		area_insert(uctx->areas, area, a_prev);
743 
744 		a_prev = area;
745 		b += s2;
746 		s -= s2;
747 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
748 	}
749 
750 	return TEE_SUCCESS;
751 }
752 
753 TEE_Result tee_pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
754 				 struct fobj *fobj, uint32_t prot)
755 {
756 	TEE_Result res = TEE_SUCCESS;
757 	struct thread_specific_data *tsd = thread_get_tsd();
758 	struct tee_pager_area *area = NULL;
759 	struct core_mmu_table_info dir_info = { NULL };
760 
761 	if (uctx->ts_ctx != tsd->ctx) {
762 		/*
763 		 * Changes are to an utc that isn't active. Just add the
764 		 * areas page tables will be dealt with later.
765 		 */
766 		return pager_add_um_area(uctx, base, fobj, prot);
767 	}
768 
769 	/*
770 	 * Assign page tables before adding areas to be able to tell which
771 	 * are newly added and should be removed in case of failure.
772 	 */
773 	tee_pager_assign_um_tables(uctx);
774 	res = pager_add_um_area(uctx, base, fobj, prot);
775 	if (res) {
776 		struct tee_pager_area *next_a;
777 
778 		/* Remove all added areas */
779 		TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
780 			if (!area->pgt) {
781 				unlink_area(uctx->areas, area);
782 				free_area(area);
783 			}
784 		}
785 		return res;
786 	}
787 
788 	/*
789 	 * Assign page tables to the new areas and make sure that the page
790 	 * tables are registered in the upper table.
791 	 */
792 	tee_pager_assign_um_tables(uctx);
793 	core_mmu_get_user_pgdir(&dir_info);
794 	TAILQ_FOREACH(area, uctx->areas, link) {
795 		paddr_t pa;
796 		size_t idx;
797 		uint32_t attr;
798 
799 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
800 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
801 
802 		/*
803 		 * Check if the page table already is used, if it is, it's
804 		 * already registered.
805 		 */
806 		if (area->pgt->num_used_entries) {
807 			assert(attr & TEE_MATTR_TABLE);
808 			assert(pa == virt_to_phys(area->pgt->tbl));
809 			continue;
810 		}
811 
812 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
813 		pa = virt_to_phys(area->pgt->tbl);
814 		assert(pa);
815 		/*
816 		 * Note that the update of the table entry is guaranteed to
817 		 * be atomic.
818 		 */
819 		core_mmu_set_entry(&dir_info, idx, pa, attr);
820 	}
821 
822 	return TEE_SUCCESS;
823 }
824 
825 static void split_area(struct tee_pager_area_head *area_head,
826 		       struct tee_pager_area *area, struct tee_pager_area *a2,
827 		       vaddr_t va)
828 {
829 	uint32_t exceptions = pager_lock_check_stack(64);
830 	size_t diff = va - area->base;
831 
832 	a2->fobj = fobj_get(area->fobj);
833 	a2->fobj_pgoffs = area->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
834 	a2->type = area->type;
835 	a2->flags = area->flags;
836 	a2->base = va;
837 	a2->size = area->size - diff;
838 	a2->pgt = area->pgt;
839 	area->size = diff;
840 
841 	TAILQ_INSERT_AFTER(area_head, area, a2, link);
842 	TAILQ_INSERT_AFTER(&area->fobj->areas, area, a2, fobj_link);
843 
844 	pager_unlock(exceptions);
845 }
846 DECLARE_KEEP_PAGER(split_area);
847 
848 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
849 {
850 	struct tee_pager_area *area = NULL;
851 	struct tee_pager_area *a2 = NULL;
852 
853 	if (va & SMALL_PAGE_MASK)
854 		return TEE_ERROR_BAD_PARAMETERS;
855 
856 	TAILQ_FOREACH(area, uctx->areas, link) {
857 		if (va == area->base || va == area->base + area->size)
858 			return TEE_SUCCESS;
859 		if (va > area->base && va < area->base + area->size) {
860 			a2 = calloc(1, sizeof(*a2));
861 			if (!a2)
862 				return TEE_ERROR_OUT_OF_MEMORY;
863 			split_area(uctx->areas, area, a2, va);
864 			return TEE_SUCCESS;
865 		}
866 	}
867 
868 	return TEE_SUCCESS;
869 }
870 
871 static void merge_area_with_next(struct tee_pager_area_head *area_head,
872 				 struct tee_pager_area *a,
873 				 struct tee_pager_area *a_next)
874 {
875 	uint32_t exceptions = pager_lock_check_stack(64);
876 
877 	TAILQ_REMOVE(area_head, a_next, link);
878 	TAILQ_REMOVE(&a_next->fobj->areas, a_next, fobj_link);
879 	a->size += a_next->size;
880 
881 	pager_unlock(exceptions);
882 }
883 DECLARE_KEEP_PAGER(merge_area_with_next);
884 
885 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
886 			       size_t len)
887 {
888 	struct tee_pager_area *a_next = NULL;
889 	struct tee_pager_area *a = NULL;
890 	vaddr_t end_va = 0;
891 
892 	if ((va | len) & SMALL_PAGE_MASK)
893 		return;
894 	if (ADD_OVERFLOW(va, len, &end_va))
895 		return;
896 
897 	for (a = TAILQ_FIRST(uctx->areas);; a = a_next) {
898 		a_next = TAILQ_NEXT(a, link);
899 		if (!a_next)
900 			return;
901 
902 		/* Try merging with the area just before va */
903 		if (a->base + a->size < va)
904 			continue;
905 
906 		/*
907 		 * If a->base is well past our range we're done.
908 		 * Note that if it's just the page after our range we'll
909 		 * try to merge.
910 		 */
911 		if (a->base > end_va)
912 			return;
913 
914 		if (a->base + a->size != a_next->base)
915 			continue;
916 		if (a->fobj != a_next->fobj || a->type != a_next->type ||
917 		    a->flags != a_next->flags || a->pgt != a_next->pgt)
918 			continue;
919 		if (a->fobj_pgoffs + a->size / SMALL_PAGE_SIZE !=
920 		    a_next->fobj_pgoffs)
921 			continue;
922 
923 		merge_area_with_next(uctx->areas, a, a_next);
924 		free_area(a_next);
925 		a_next = a;
926 	}
927 }
928 
929 static void rem_area(struct tee_pager_area_head *area_head,
930 		     struct tee_pager_area *area)
931 {
932 	struct tee_pager_pmem *pmem;
933 	size_t last_pgoffs = area->fobj_pgoffs +
934 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
935 	uint32_t exceptions;
936 	struct tblidx tblidx = { };
937 	uint32_t a = 0;
938 
939 	exceptions = pager_lock_check_stack(64);
940 
941 	TAILQ_REMOVE(area_head, area, link);
942 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
943 
944 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
945 		if (pmem->fobj != area->fobj ||
946 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
947 		    pmem->fobj_pgidx > last_pgoffs)
948 			continue;
949 
950 		tblidx = pmem_get_area_tblidx(pmem, area);
951 		tblidx_get_entry(tblidx, NULL, &a);
952 		if (!(a & TEE_MATTR_VALID_BLOCK))
953 			continue;
954 
955 		tblidx_set_entry(tblidx, 0, 0);
956 		tblidx_tlbi_entry(tblidx);
957 		pgt_dec_used_entries(tblidx.pgt);
958 	}
959 
960 	pager_unlock(exceptions);
961 
962 	free_area(area);
963 }
964 DECLARE_KEEP_PAGER(rem_area);
965 
966 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
967 			     size_t size)
968 {
969 	struct tee_pager_area *area;
970 	struct tee_pager_area *next_a;
971 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
972 
973 	TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
974 		if (core_is_buffer_inside(area->base, area->size, base, s))
975 			rem_area(uctx->areas, area);
976 	}
977 	tlbi_asid(uctx->vm_info.asid);
978 }
979 
980 void tee_pager_rem_um_areas(struct user_mode_ctx *uctx)
981 {
982 	struct tee_pager_area *area = NULL;
983 
984 	if (!uctx->areas)
985 		return;
986 
987 	while (true) {
988 		area = TAILQ_FIRST(uctx->areas);
989 		if (!area)
990 			break;
991 		unlink_area(uctx->areas, area);
992 		free_area(area);
993 	}
994 
995 	free(uctx->areas);
996 }
997 
998 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
999 {
1000 	struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas);
1001 	void *ctx = a->pgt->ctx;
1002 
1003 	do {
1004 		a = TAILQ_NEXT(a, fobj_link);
1005 		if (!a)
1006 			return true;
1007 	} while (a->pgt->ctx == ctx);
1008 
1009 	return false;
1010 }
1011 
1012 bool tee_pager_set_um_area_attr(struct user_mode_ctx *uctx, vaddr_t base,
1013 				size_t size, uint32_t flags)
1014 {
1015 	bool ret = false;
1016 	vaddr_t b = base;
1017 	size_t s = size;
1018 	size_t s2 = 0;
1019 	struct tee_pager_area *area = find_area(uctx->areas, b);
1020 	uint32_t exceptions = 0;
1021 	struct tee_pager_pmem *pmem = NULL;
1022 	uint32_t a = 0;
1023 	uint32_t f = 0;
1024 	uint32_t mattr = 0;
1025 	uint32_t f2 = 0;
1026 	struct tblidx tblidx = { };
1027 
1028 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1029 	if (f & TEE_MATTR_UW)
1030 		f |= TEE_MATTR_PW;
1031 	mattr = get_area_mattr(f);
1032 
1033 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1034 
1035 	while (s) {
1036 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1037 		if (!area || area->base != b || area->size != s2) {
1038 			ret = false;
1039 			goto out;
1040 		}
1041 		b += s2;
1042 		s -= s2;
1043 
1044 		if (area->flags == f)
1045 			goto next_area;
1046 
1047 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1048 			if (!pmem_is_covered_by_area(pmem, area))
1049 				continue;
1050 
1051 			tblidx = pmem_get_area_tblidx(pmem, area);
1052 			tblidx_get_entry(tblidx, NULL, &a);
1053 			if (a == f)
1054 				continue;
1055 			tblidx_set_entry(tblidx, 0, 0);
1056 			tblidx_tlbi_entry(tblidx);
1057 
1058 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1059 			if (pmem_is_dirty(pmem))
1060 				f2 = mattr;
1061 			else
1062 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
1063 			tblidx_set_entry(tblidx, get_pmem_pa(pmem), f2);
1064 			if (!(a & TEE_MATTR_VALID_BLOCK))
1065 				pgt_inc_used_entries(area->pgt);
1066 			/*
1067 			 * Make sure the table update is visible before
1068 			 * continuing.
1069 			 */
1070 			dsb_ishst();
1071 
1072 			/*
1073 			 * Here's a problem if this page already is shared.
1074 			 * We need do icache invalidate for each context
1075 			 * in which it is shared. In practice this will
1076 			 * never happen.
1077 			 */
1078 			if (flags & TEE_MATTR_UX) {
1079 				void *va = (void *)tblidx2va(tblidx);
1080 
1081 				/* Assert that the pmem isn't shared. */
1082 				assert(same_context(pmem));
1083 
1084 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1085 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1086 			}
1087 		}
1088 
1089 		area->flags = f;
1090 next_area:
1091 		area = TAILQ_NEXT(area, link);
1092 	}
1093 
1094 	ret = true;
1095 out:
1096 	pager_unlock(exceptions);
1097 	return ret;
1098 }
1099 
1100 DECLARE_KEEP_PAGER(tee_pager_set_um_area_attr);
1101 #endif /*CFG_PAGED_USER_TA*/
1102 
1103 void tee_pager_invalidate_fobj(struct fobj *fobj)
1104 {
1105 	struct tee_pager_pmem *pmem;
1106 	uint32_t exceptions;
1107 
1108 	exceptions = pager_lock_check_stack(64);
1109 
1110 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1111 		if (pmem->fobj == fobj)
1112 			pmem_clear(pmem);
1113 
1114 	pager_unlock(exceptions);
1115 }
1116 DECLARE_KEEP_PAGER(tee_pager_invalidate_fobj);
1117 
1118 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area, vaddr_t va)
1119 {
1120 	struct tee_pager_pmem *pmem = NULL;
1121 	size_t fobj_pgidx = 0;
1122 
1123 	assert(va >= area->base && va < (area->base + area->size));
1124 	fobj_pgidx = (va - area->base) / SMALL_PAGE_SIZE + area->fobj_pgoffs;
1125 
1126 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1127 		if (pmem->fobj == area->fobj && pmem->fobj_pgidx == fobj_pgidx)
1128 			return pmem;
1129 
1130 	return NULL;
1131 }
1132 
1133 static bool tee_pager_unhide_page(struct tee_pager_area *area, vaddr_t page_va)
1134 {
1135 	struct tblidx tblidx = area_va2tblidx(area, page_va);
1136 	struct tee_pager_pmem *pmem = pmem_find(area, page_va);
1137 	uint32_t a = get_area_mattr(area->flags);
1138 	uint32_t attr = 0;
1139 	paddr_t pa = 0;
1140 
1141 	if (!pmem)
1142 		return false;
1143 
1144 	tblidx_get_entry(tblidx, NULL, &attr);
1145 	if (attr & TEE_MATTR_VALID_BLOCK)
1146 		return false;
1147 
1148 	/*
1149 	 * The page is hidden, or not not mapped yet. Unhide the page and
1150 	 * move it to the tail.
1151 	 *
1152 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1153 	 * for this address, so no TLB invalidation is required after setting
1154 	 * the new entry. A DSB is needed though, to make the write visible.
1155 	 *
1156 	 * For user executable pages it's more complicated. Those pages can
1157 	 * be shared between multiple TA mappings and thus populated by
1158 	 * another TA. The reference manual states that:
1159 	 *
1160 	 * "instruction cache maintenance is required only after writing
1161 	 * new data to a physical address that holds an instruction."
1162 	 *
1163 	 * So for hidden pages we would not need to invalidate i-cache, but
1164 	 * for newly populated pages we do. Since we don't know which we
1165 	 * have to assume the worst and always invalidate the i-cache. We
1166 	 * don't need to clean the d-cache though, since that has already
1167 	 * been done earlier.
1168 	 *
1169 	 * Additional bookkeeping to tell if the i-cache invalidation is
1170 	 * needed or not is left as a future optimization.
1171 	 */
1172 
1173 	/* If it's not a dirty block, then it should be read only. */
1174 	if (!pmem_is_dirty(pmem))
1175 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1176 
1177 	pa = get_pmem_pa(pmem);
1178 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1179 	if (area->flags & TEE_MATTR_UX) {
1180 		void *va = (void *)tblidx2va(tblidx);
1181 
1182 		/* Set a temporary read-only mapping */
1183 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1184 		tblidx_set_entry(tblidx, pa, a & ~TEE_MATTR_UX);
1185 		dsb_ishst();
1186 
1187 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1188 
1189 		/* Set the final mapping */
1190 		tblidx_set_entry(tblidx, pa, a);
1191 		tblidx_tlbi_entry(tblidx);
1192 	} else {
1193 		tblidx_set_entry(tblidx, pa, a);
1194 		dsb_ishst();
1195 	}
1196 	pgt_inc_used_entries(tblidx.pgt);
1197 
1198 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1199 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1200 	incr_hidden_hits();
1201 	return true;
1202 }
1203 
1204 static void tee_pager_hide_pages(void)
1205 {
1206 	struct tee_pager_pmem *pmem = NULL;
1207 	size_t n = 0;
1208 
1209 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1210 		if (n >= TEE_PAGER_NHIDE)
1211 			break;
1212 		n++;
1213 
1214 		/* we cannot hide pages when pmem->fobj is not defined. */
1215 		if (!pmem->fobj)
1216 			continue;
1217 
1218 		if (pmem_is_hidden(pmem))
1219 			continue;
1220 
1221 		pmem->flags |= PMEM_FLAG_HIDDEN;
1222 		pmem_unmap(pmem, NULL);
1223 	}
1224 }
1225 
1226 static unsigned int __maybe_unused
1227 num_areas_with_pmem(struct tee_pager_pmem *pmem)
1228 {
1229 	struct tee_pager_area *a = NULL;
1230 	unsigned int num_matches = 0;
1231 
1232 	TAILQ_FOREACH(a, &pmem->fobj->areas, fobj_link)
1233 		if (pmem_is_covered_by_area(pmem, a))
1234 			num_matches++;
1235 
1236 	return num_matches;
1237 }
1238 
1239 /*
1240  * Find mapped pmem, hide and move to pageble pmem.
1241  * Return false if page was not mapped, and true if page was mapped.
1242  */
1243 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1244 				       vaddr_t page_va)
1245 {
1246 	struct tee_pager_pmem *pmem = NULL;
1247 	struct tblidx tblidx = { };
1248 	size_t fobj_pgidx = 0;
1249 
1250 	assert(page_va >= area->base && page_va < (area->base + area->size));
1251 	fobj_pgidx = (page_va - area->base) / SMALL_PAGE_SIZE +
1252 		     area->fobj_pgoffs;
1253 
1254 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1255 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != fobj_pgidx)
1256 			continue;
1257 
1258 		/*
1259 		 * Locked pages may not be shared. We're asserting that the
1260 		 * number of areas using this pmem is one and only one as
1261 		 * we're about to unmap it.
1262 		 */
1263 		assert(num_areas_with_pmem(pmem) == 1);
1264 
1265 		tblidx = pmem_get_area_tblidx(pmem, area);
1266 		tblidx_set_entry(tblidx, 0, 0);
1267 		pgt_dec_used_entries(tblidx.pgt);
1268 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1269 		pmem_clear(pmem);
1270 		tee_pager_npages++;
1271 		set_npages();
1272 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1273 		incr_zi_released();
1274 		return true;
1275 	}
1276 
1277 	return false;
1278 }
1279 
1280 static void pager_deploy_page(struct tee_pager_pmem *pmem,
1281 			      struct tee_pager_area *area, vaddr_t page_va,
1282 			      bool clean_user_cache, bool writable)
1283 {
1284 	struct tblidx tblidx = area_va2tblidx(area, page_va);
1285 	uint32_t attr = get_area_mattr(area->flags);
1286 	struct core_mmu_table_info *ti = NULL;
1287 	uint8_t *va_alias = pmem->va_alias;
1288 	paddr_t pa = get_pmem_pa(pmem);
1289 	unsigned int idx_alias = 0;
1290 	uint32_t attr_alias = 0;
1291 	paddr_t pa_alias = 0;
1292 
1293 	/* Ensure we are allowed to write to aliased virtual page */
1294 	ti = find_table_info((vaddr_t)va_alias);
1295 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
1296 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
1297 	if (!(attr_alias & TEE_MATTR_PW)) {
1298 		attr_alias |= TEE_MATTR_PW;
1299 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1300 		tlbi_mva_allasid((vaddr_t)va_alias);
1301 	}
1302 
1303 	asan_tag_access(va_alias, va_alias + SMALL_PAGE_SIZE);
1304 	if (fobj_load_page(pmem->fobj, pmem->fobj_pgidx, va_alias)) {
1305 		EMSG("PH 0x%" PRIxVA " failed", page_va);
1306 		panic();
1307 	}
1308 	switch (area->type) {
1309 	case PAGER_AREA_TYPE_RO:
1310 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1311 		incr_ro_hits();
1312 		/* Forbid write to aliases for read-only (maybe exec) pages */
1313 		attr_alias &= ~TEE_MATTR_PW;
1314 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1315 		tlbi_mva_allasid((vaddr_t)va_alias);
1316 		break;
1317 	case PAGER_AREA_TYPE_RW:
1318 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1319 		if (writable && (attr & (TEE_MATTR_PW | TEE_MATTR_UW)))
1320 			pmem->flags |= PMEM_FLAG_DIRTY;
1321 		incr_rw_hits();
1322 		break;
1323 	case PAGER_AREA_TYPE_LOCK:
1324 		/* Move page to lock list */
1325 		if (tee_pager_npages <= 0)
1326 			panic("Running out of pages");
1327 		tee_pager_npages--;
1328 		set_npages();
1329 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1330 		break;
1331 	default:
1332 		panic();
1333 	}
1334 	asan_tag_no_access(va_alias, va_alias + SMALL_PAGE_SIZE);
1335 
1336 	if (!writable)
1337 		attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1338 
1339 	/*
1340 	 * We've updated the page using the aliased mapping and
1341 	 * some cache maintenance is now needed if it's an
1342 	 * executable page.
1343 	 *
1344 	 * Since the d-cache is a Physically-indexed,
1345 	 * physically-tagged (PIPT) cache we can clean either the
1346 	 * aliased address or the real virtual address. In this
1347 	 * case we choose the real virtual address.
1348 	 *
1349 	 * The i-cache can also be PIPT, but may be something else
1350 	 * too like VIPT. The current code requires the caches to
1351 	 * implement the IVIPT extension, that is:
1352 	 * "instruction cache maintenance is required only after
1353 	 * writing new data to a physical address that holds an
1354 	 * instruction."
1355 	 *
1356 	 * To portably invalidate the icache the page has to
1357 	 * be mapped at the final virtual address but not
1358 	 * executable.
1359 	 */
1360 	if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1361 		uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1362 				TEE_MATTR_PW | TEE_MATTR_UW;
1363 		void *va = (void *)page_va;
1364 
1365 		/* Set a temporary read-only mapping */
1366 		tblidx_set_entry(tblidx, pa, attr & ~mask);
1367 		tblidx_tlbi_entry(tblidx);
1368 
1369 		dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1370 		if (clean_user_cache)
1371 			icache_inv_user_range(va, SMALL_PAGE_SIZE);
1372 		else
1373 			icache_inv_range(va, SMALL_PAGE_SIZE);
1374 
1375 		/* Set the final mapping */
1376 		tblidx_set_entry(tblidx, pa, attr);
1377 		tblidx_tlbi_entry(tblidx);
1378 	} else {
1379 		tblidx_set_entry(tblidx, pa, attr);
1380 		/*
1381 		 * No need to flush TLB for this entry, it was
1382 		 * invalid. We should use a barrier though, to make
1383 		 * sure that the change is visible.
1384 		 */
1385 		dsb_ishst();
1386 	}
1387 	pgt_inc_used_entries(tblidx.pgt);
1388 
1389 	FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1390 }
1391 
1392 static void make_dirty_page(struct tee_pager_pmem *pmem,
1393 			    struct tee_pager_area *area, struct tblidx tblidx,
1394 			    paddr_t pa)
1395 {
1396 	assert(area->flags & (TEE_MATTR_UW | TEE_MATTR_PW));
1397 	assert(!(pmem->flags & PMEM_FLAG_DIRTY));
1398 
1399 	FMSG("Dirty %#"PRIxVA, tblidx2va(tblidx));
1400 	pmem->flags |= PMEM_FLAG_DIRTY;
1401 	tblidx_set_entry(tblidx, pa, get_area_mattr(area->flags));
1402 	tblidx_tlbi_entry(tblidx);
1403 }
1404 
1405 /*
1406  * This function takes a reference to a page (@fobj + fobj_pgidx) and makes
1407  * the corresponding IV available.
1408  *
1409  * In case the page needs to be saved the IV must be writable, consequently
1410  * is the page holding the IV made dirty. If the page instead only is to
1411  * be verified it's enough that the page holding the IV is readonly and
1412  * thus doesn't have to be made dirty too.
1413  *
1414  * This function depends on pager_spare_pmem pointing to a free pmem when
1415  * entered. In case the page holding the needed IV isn't mapped this spare
1416  * pmem is used to map the page. If this function has used pager_spare_pmem
1417  * and assigned it to NULL it must be reassigned with a new free pmem
1418  * before this function can be called again.
1419  */
1420 static void make_iv_available(struct fobj *fobj, unsigned int fobj_pgidx,
1421 			      bool writable)
1422 {
1423 	struct tee_pager_area *area = pager_iv_area;
1424 	struct tee_pager_pmem *pmem = NULL;
1425 	struct tblidx tblidx = { };
1426 	vaddr_t page_va = 0;
1427 	uint32_t attr = 0;
1428 	paddr_t pa = 0;
1429 
1430 	page_va = fobj_get_iv_vaddr(fobj, fobj_pgidx) & ~SMALL_PAGE_MASK;
1431 	if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || !page_va) {
1432 		assert(!page_va);
1433 		return;
1434 	}
1435 
1436 	assert(area && area->type == PAGER_AREA_TYPE_RW);
1437 	assert(pager_spare_pmem);
1438 	assert(core_is_buffer_inside(page_va, 1, area->base, area->size));
1439 
1440 	tblidx = area_va2tblidx(area, page_va);
1441 	/*
1442 	 * We don't care if tee_pager_unhide_page() succeeds or not, we're
1443 	 * still checking the attributes afterwards.
1444 	 */
1445 	tee_pager_unhide_page(area, page_va);
1446 	tblidx_get_entry(tblidx, &pa, &attr);
1447 	if (!(attr & TEE_MATTR_VALID_BLOCK)) {
1448 		/*
1449 		 * We're using the spare pmem to map the IV corresponding
1450 		 * to another page.
1451 		 */
1452 		pmem = pager_spare_pmem;
1453 		pager_spare_pmem = NULL;
1454 		pmem_assign_fobj_page(pmem, area, page_va);
1455 
1456 		if (writable)
1457 			pmem->flags |= PMEM_FLAG_DIRTY;
1458 
1459 		pager_deploy_page(pmem, area, page_va,
1460 				  false /*!clean_user_cache*/, writable);
1461 	} else if (writable && !(attr & TEE_MATTR_PW)) {
1462 		pmem = pmem_find(area, page_va);
1463 		/* Note that pa is valid since TEE_MATTR_VALID_BLOCK is set */
1464 		make_dirty_page(pmem, area, tblidx, pa);
1465 	}
1466 }
1467 
1468 static void pager_get_page(struct tee_pager_area *area, struct abort_info *ai,
1469 			   bool clean_user_cache)
1470 {
1471 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1472 	struct tblidx tblidx = area_va2tblidx(area, page_va);
1473 	struct tee_pager_pmem *pmem = NULL;
1474 	bool writable = false;
1475 	uint32_t attr = 0;
1476 
1477 	/*
1478 	 * Get a pmem to load code and data into, also make sure
1479 	 * the corresponding IV page is available.
1480 	 */
1481 	while (true) {
1482 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1483 		if (!pmem) {
1484 			EMSG("No pmem entries");
1485 			abort_print(ai);
1486 			panic();
1487 		}
1488 
1489 		if (pmem->fobj) {
1490 			pmem_unmap(pmem, NULL);
1491 			if (pmem_is_dirty(pmem)) {
1492 				uint8_t *va = pmem->va_alias;
1493 
1494 				make_iv_available(pmem->fobj, pmem->fobj_pgidx,
1495 						  true /*writable*/);
1496 				asan_tag_access(va, va + SMALL_PAGE_SIZE);
1497 				if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
1498 						   pmem->va_alias))
1499 					panic("fobj_save_page");
1500 				asan_tag_no_access(va, va + SMALL_PAGE_SIZE);
1501 
1502 				pmem_clear(pmem);
1503 
1504 				/*
1505 				 * If the spare pmem was used by
1506 				 * make_iv_available() we need to replace
1507 				 * it with the just freed pmem.
1508 				 *
1509 				 * See make_iv_available() for details.
1510 				 */
1511 				if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) &&
1512 				    !pager_spare_pmem) {
1513 					TAILQ_REMOVE(&tee_pager_pmem_head,
1514 						     pmem, link);
1515 					pager_spare_pmem = pmem;
1516 					pmem = NULL;
1517 				}
1518 
1519 				/*
1520 				 * Check if the needed virtual page was
1521 				 * made available as a side effect of the
1522 				 * call to make_iv_available() above. If so
1523 				 * we're done.
1524 				 */
1525 				tblidx_get_entry(tblidx, NULL, &attr);
1526 				if (attr & TEE_MATTR_VALID_BLOCK)
1527 					return;
1528 
1529 				/*
1530 				 * The freed pmem was used to replace the
1531 				 * consumed pager_spare_pmem above. Restart
1532 				 * to find another pmem.
1533 				 */
1534 				if (!pmem)
1535 					continue;
1536 			}
1537 		}
1538 
1539 		TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1540 		pmem_clear(pmem);
1541 
1542 		pmem_assign_fobj_page(pmem, area, page_va);
1543 		make_iv_available(pmem->fobj, pmem->fobj_pgidx,
1544 				  false /*!writable*/);
1545 		if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || pager_spare_pmem)
1546 			break;
1547 
1548 		/*
1549 		 * The spare pmem was used by make_iv_available(). We need
1550 		 * to replace it with the just freed pmem. And get another
1551 		 * pmem.
1552 		 *
1553 		 * See make_iv_available() for details.
1554 		 */
1555 		pmem_clear(pmem);
1556 		pager_spare_pmem = pmem;
1557 	}
1558 
1559 	/*
1560 	 * PAGER_AREA_TYPE_LOCK are always writable while PAGER_AREA_TYPE_RO
1561 	 * are never writable.
1562 	 *
1563 	 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1564 	 * able to tell when they are updated and should be tagged
1565 	 * as dirty.
1566 	 */
1567 	if (area->type == PAGER_AREA_TYPE_LOCK ||
1568 	    (area->type == PAGER_AREA_TYPE_RW && abort_is_write_fault(ai)))
1569 		writable = true;
1570 	else
1571 		writable = false;
1572 
1573 	pager_deploy_page(pmem, area, page_va, clean_user_cache, writable);
1574 }
1575 
1576 static bool pager_update_permissions(struct tee_pager_area *area,
1577 			struct abort_info *ai, bool *handled)
1578 {
1579 	struct tblidx tblidx = area_va2tblidx(area, ai->va);
1580 	struct tee_pager_pmem *pmem = NULL;
1581 	uint32_t attr = 0;
1582 	paddr_t pa = 0;
1583 
1584 	*handled = false;
1585 
1586 	tblidx_get_entry(tblidx, &pa, &attr);
1587 
1588 	/* Not mapped */
1589 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1590 		return false;
1591 
1592 	/* Not readable, should not happen */
1593 	if (abort_is_user_exception(ai)) {
1594 		if (!(attr & TEE_MATTR_UR))
1595 			return true;
1596 	} else {
1597 		if (!(attr & TEE_MATTR_PR)) {
1598 			abort_print_error(ai);
1599 			panic();
1600 		}
1601 	}
1602 
1603 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1604 	case CORE_MMU_FAULT_TRANSLATION:
1605 	case CORE_MMU_FAULT_READ_PERMISSION:
1606 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1607 			/* Check attempting to execute from an NOX page */
1608 			if (abort_is_user_exception(ai)) {
1609 				if (!(attr & TEE_MATTR_UX))
1610 					return true;
1611 			} else {
1612 				if (!(attr & TEE_MATTR_PX)) {
1613 					abort_print_error(ai);
1614 					panic();
1615 				}
1616 			}
1617 		}
1618 		/* Since the page is mapped now it's OK */
1619 		break;
1620 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1621 		/* Check attempting to write to an RO page */
1622 		pmem = pmem_find(area, ai->va);
1623 		if (!pmem)
1624 			panic();
1625 		if (abort_is_user_exception(ai)) {
1626 			if (!(area->flags & TEE_MATTR_UW))
1627 				return true;
1628 			if (!(attr & TEE_MATTR_UW))
1629 				make_dirty_page(pmem, area, tblidx, pa);
1630 		} else {
1631 			if (!(area->flags & TEE_MATTR_PW)) {
1632 				abort_print_error(ai);
1633 				panic();
1634 			}
1635 			if (!(attr & TEE_MATTR_PW))
1636 				make_dirty_page(pmem, area, tblidx, pa);
1637 		}
1638 		/* Since permissions has been updated now it's OK */
1639 		break;
1640 	default:
1641 		/* Some fault we can't deal with */
1642 		if (abort_is_user_exception(ai))
1643 			return true;
1644 		abort_print_error(ai);
1645 		panic();
1646 	}
1647 	*handled = true;
1648 	return true;
1649 }
1650 
1651 #ifdef CFG_TEE_CORE_DEBUG
1652 static void stat_handle_fault(void)
1653 {
1654 	static size_t num_faults;
1655 	static size_t min_npages = SIZE_MAX;
1656 	static size_t total_min_npages = SIZE_MAX;
1657 
1658 	num_faults++;
1659 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1660 		DMSG("nfaults %zu npages %zu (min %zu)",
1661 		     num_faults, tee_pager_npages, min_npages);
1662 		min_npages = tee_pager_npages; /* reset */
1663 	}
1664 	if (tee_pager_npages < min_npages)
1665 		min_npages = tee_pager_npages;
1666 	if (tee_pager_npages < total_min_npages)
1667 		total_min_npages = tee_pager_npages;
1668 }
1669 #else
1670 static void stat_handle_fault(void)
1671 {
1672 }
1673 #endif
1674 
1675 bool tee_pager_handle_fault(struct abort_info *ai)
1676 {
1677 	struct tee_pager_area *area;
1678 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1679 	uint32_t exceptions;
1680 	bool ret;
1681 	bool clean_user_cache = false;
1682 
1683 #ifdef TEE_PAGER_DEBUG_PRINT
1684 	if (!abort_is_user_exception(ai))
1685 		abort_print(ai);
1686 #endif
1687 
1688 	/*
1689 	 * We're updating pages that can affect several active CPUs at a
1690 	 * time below. We end up here because a thread tries to access some
1691 	 * memory that isn't available. We have to be careful when making
1692 	 * that memory available as other threads may succeed in accessing
1693 	 * that address the moment after we've made it available.
1694 	 *
1695 	 * That means that we can't just map the memory and populate the
1696 	 * page, instead we use the aliased mapping to populate the page
1697 	 * and once everything is ready we map it.
1698 	 */
1699 	exceptions = pager_lock(ai);
1700 
1701 	stat_handle_fault();
1702 
1703 	/* check if the access is valid */
1704 	if (abort_is_user_exception(ai)) {
1705 		area = find_uta_area(ai->va);
1706 		clean_user_cache = true;
1707 	} else {
1708 		area = find_area(&tee_pager_area_head, ai->va);
1709 		if (!area) {
1710 			area = find_uta_area(ai->va);
1711 			clean_user_cache = true;
1712 		}
1713 	}
1714 	if (!area || !area->pgt) {
1715 		ret = false;
1716 		goto out;
1717 	}
1718 
1719 	if (tee_pager_unhide_page(area, page_va))
1720 		goto out_success;
1721 
1722 	/*
1723 	 * The page wasn't hidden, but some other core may have
1724 	 * updated the table entry before we got here or we need
1725 	 * to make a read-only page read-write (dirty).
1726 	 */
1727 	if (pager_update_permissions(area, ai, &ret)) {
1728 		/*
1729 		 * Nothing more to do with the abort. The problem
1730 		 * could already have been dealt with from another
1731 		 * core or if ret is false the TA will be paniced.
1732 		 */
1733 		goto out;
1734 	}
1735 
1736 	pager_get_page(area, ai, clean_user_cache);
1737 
1738 out_success:
1739 	tee_pager_hide_pages();
1740 	ret = true;
1741 out:
1742 	pager_unlock(exceptions);
1743 	return ret;
1744 }
1745 
1746 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1747 {
1748 	size_t n = 0;
1749 
1750 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1751 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1752 
1753 	/* setup memory */
1754 	for (n = 0; n < npages; n++) {
1755 		struct core_mmu_table_info *ti = NULL;
1756 		struct tee_pager_pmem *pmem = NULL;
1757 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1758 		struct tblidx tblidx = { };
1759 		unsigned int pgidx = 0;
1760 		paddr_t pa = 0;
1761 		uint32_t attr = 0;
1762 
1763 		ti = find_table_info(va);
1764 		pgidx = core_mmu_va2idx(ti, va);
1765 		/*
1766 		 * Note that we can only support adding pages in the
1767 		 * valid range of this table info, currently not a problem.
1768 		 */
1769 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1770 
1771 		/* Ignore unmapped pages/blocks */
1772 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1773 			continue;
1774 
1775 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1776 		if (!pmem)
1777 			panic("out of mem");
1778 		pmem_clear(pmem);
1779 
1780 		pmem->va_alias = pager_add_alias_page(pa);
1781 
1782 		if (unmap) {
1783 			core_mmu_set_entry(ti, pgidx, 0, 0);
1784 			pgt_dec_used_entries(find_core_pgt(va));
1785 		} else {
1786 			struct tee_pager_area *area = NULL;
1787 
1788 			/*
1789 			 * The page is still mapped, let's assign the area
1790 			 * and update the protection bits accordingly.
1791 			 */
1792 			area = find_area(&tee_pager_area_head, va);
1793 			assert(area);
1794 			pmem_assign_fobj_page(pmem, area, va);
1795 			tblidx = pmem_get_area_tblidx(pmem, area);
1796 			assert(tblidx.pgt == find_core_pgt(va));
1797 			assert(pa == get_pmem_pa(pmem));
1798 			tblidx_set_entry(tblidx, pa,
1799 					 get_area_mattr(area->flags));
1800 		}
1801 
1802 		if (unmap && IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) &&
1803 		    !pager_spare_pmem) {
1804 			pager_spare_pmem = pmem;
1805 		} else {
1806 			tee_pager_npages++;
1807 			incr_npages_all();
1808 			set_npages();
1809 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1810 		}
1811 	}
1812 
1813 	/*
1814 	 * As this is done at inits, invalidate all TLBs once instead of
1815 	 * targeting only the modified entries.
1816 	 */
1817 	tlbi_all();
1818 }
1819 
1820 #ifdef CFG_PAGED_USER_TA
1821 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1822 {
1823 	struct pgt *p = pgt;
1824 
1825 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1826 		p = SLIST_NEXT(p, link);
1827 	return p;
1828 }
1829 
1830 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1831 {
1832 	struct tee_pager_area *area = NULL;
1833 	struct pgt *pgt = NULL;
1834 
1835 	if (!uctx->areas)
1836 		return;
1837 
1838 	pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1839 	TAILQ_FOREACH(area, uctx->areas, link) {
1840 		if (!area->pgt)
1841 			area->pgt = find_pgt(pgt, area->base);
1842 		else
1843 			assert(area->pgt == find_pgt(pgt, area->base));
1844 		if (!area->pgt)
1845 			panic();
1846 	}
1847 }
1848 
1849 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1850 {
1851 	struct tee_pager_pmem *pmem = NULL;
1852 	struct tee_pager_area *area = NULL;
1853 	struct tee_pager_area_head *areas = NULL;
1854 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1855 
1856 	if (!pgt->num_used_entries)
1857 		goto out;
1858 
1859 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1860 		if (pmem->fobj)
1861 			pmem_unmap(pmem, pgt);
1862 	}
1863 	assert(!pgt->num_used_entries);
1864 
1865 out:
1866 	areas = to_user_mode_ctx(pgt->ctx)->areas;
1867 	if (areas) {
1868 		TAILQ_FOREACH(area, areas, link) {
1869 			if (area->pgt == pgt)
1870 				area->pgt = NULL;
1871 		}
1872 	}
1873 
1874 	pager_unlock(exceptions);
1875 }
1876 DECLARE_KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1877 #endif /*CFG_PAGED_USER_TA*/
1878 
1879 void tee_pager_release_phys(void *addr, size_t size)
1880 {
1881 	bool unmaped = false;
1882 	vaddr_t va = (vaddr_t)addr;
1883 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1884 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1885 	struct tee_pager_area *area;
1886 	uint32_t exceptions;
1887 
1888 	if (end <= begin)
1889 		return;
1890 
1891 	exceptions = pager_lock_check_stack(128);
1892 
1893 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1894 		area = find_area(&tee_pager_area_head, va);
1895 		if (!area)
1896 			panic();
1897 		unmaped |= tee_pager_release_one_phys(area, va);
1898 	}
1899 
1900 	if (unmaped)
1901 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1902 
1903 	pager_unlock(exceptions);
1904 }
1905 DECLARE_KEEP_PAGER(tee_pager_release_phys);
1906 
1907 void *tee_pager_alloc(size_t size)
1908 {
1909 	tee_mm_entry_t *mm = NULL;
1910 	uint8_t *smem = NULL;
1911 	size_t num_pages = 0;
1912 	struct fobj *fobj = NULL;
1913 
1914 	if (!size)
1915 		return NULL;
1916 
1917 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1918 	if (!mm)
1919 		return NULL;
1920 
1921 	smem = (uint8_t *)tee_mm_get_smem(mm);
1922 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1923 	fobj = fobj_locked_paged_alloc(num_pages);
1924 	if (!fobj) {
1925 		tee_mm_free(mm);
1926 		return NULL;
1927 	}
1928 
1929 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1930 	fobj_put(fobj);
1931 
1932 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1933 
1934 	return smem;
1935 }
1936 
1937 vaddr_t tee_pager_init_iv_area(struct fobj *fobj)
1938 {
1939 	tee_mm_entry_t *mm = NULL;
1940 	uint8_t *smem = NULL;
1941 
1942 	assert(!pager_iv_area);
1943 
1944 	mm = tee_mm_alloc(&tee_mm_vcore, fobj->num_pages * SMALL_PAGE_SIZE);
1945 	if (!mm)
1946 		panic();
1947 
1948 	smem = (uint8_t *)tee_mm_get_smem(mm);
1949 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_RW, fobj);
1950 	fobj_put(fobj);
1951 
1952 	asan_tag_access(smem, smem + fobj->num_pages * SMALL_PAGE_SIZE);
1953 
1954 	pager_iv_area = find_area(&tee_pager_area_head, (vaddr_t)smem);
1955 	assert(pager_iv_area && pager_iv_area->fobj == fobj);
1956 
1957 	return (vaddr_t)smem;
1958 }
1959