xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 5ca851ec83ba0e0388e952b467bbd5bb8b90c6b7)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/tlb_helpers.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <mm/core_memprot.h>
23 #include <mm/fobj.h>
24 #include <mm/tee_mm.h>
25 #include <mm/tee_pager.h>
26 #include <stdlib.h>
27 #include <sys/queue.h>
28 #include <tee_api_defines.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <utee_defines.h>
32 #include <util.h>
33 
34 
35 static struct tee_pager_area_head tee_pager_area_head =
36 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
37 
38 #define INVALID_PGIDX		UINT_MAX
39 #define PMEM_FLAG_DIRTY		BIT(0)
40 #define PMEM_FLAG_HIDDEN	BIT(1)
41 
42 /*
43  * struct tee_pager_pmem - Represents a physical page used for paging.
44  *
45  * @flags	flags defined by PMEM_FLAG_* above
46  * @fobj_pgidx	index of the page in the @fobj
47  * @fobj	File object of which a page is made visible.
48  * @va_alias	Virtual address where the physical page always is aliased.
49  *		Used during remapping of the page when the content need to
50  *		be updated before it's available at the new location.
51  */
52 struct tee_pager_pmem {
53 	unsigned int flags;
54 	unsigned int fobj_pgidx;
55 	struct fobj *fobj;
56 	void *va_alias;
57 	TAILQ_ENTRY(tee_pager_pmem) link;
58 };
59 
60 struct tblidx {
61 	struct pgt *pgt;
62 	unsigned int idx;
63 };
64 
65 /* The list of physical pages. The first page in the list is the oldest */
66 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
67 
68 static struct tee_pager_pmem_head tee_pager_pmem_head =
69 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
70 
71 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
72 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
73 
74 /* number of pages hidden */
75 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
76 
77 /* Number of registered physical pages, used hiding pages. */
78 static size_t tee_pager_npages;
79 
80 /* This area covers the IVs for all fobjs with paged IVs */
81 static struct tee_pager_area *pager_iv_area;
82 /* Used by make_iv_available(), see make_iv_available() for details. */
83 static struct tee_pager_pmem *pager_spare_pmem;
84 
85 #ifdef CFG_WITH_STATS
86 static struct tee_pager_stats pager_stats;
87 
88 static inline void incr_ro_hits(void)
89 {
90 	pager_stats.ro_hits++;
91 }
92 
93 static inline void incr_rw_hits(void)
94 {
95 	pager_stats.rw_hits++;
96 }
97 
98 static inline void incr_hidden_hits(void)
99 {
100 	pager_stats.hidden_hits++;
101 }
102 
103 static inline void incr_zi_released(void)
104 {
105 	pager_stats.zi_released++;
106 }
107 
108 static inline void incr_npages_all(void)
109 {
110 	pager_stats.npages_all++;
111 }
112 
113 static inline void set_npages(void)
114 {
115 	pager_stats.npages = tee_pager_npages;
116 }
117 
118 void tee_pager_get_stats(struct tee_pager_stats *stats)
119 {
120 	*stats = pager_stats;
121 
122 	pager_stats.hidden_hits = 0;
123 	pager_stats.ro_hits = 0;
124 	pager_stats.rw_hits = 0;
125 	pager_stats.zi_released = 0;
126 }
127 
128 #else /* CFG_WITH_STATS */
129 static inline void incr_ro_hits(void) { }
130 static inline void incr_rw_hits(void) { }
131 static inline void incr_hidden_hits(void) { }
132 static inline void incr_zi_released(void) { }
133 static inline void incr_npages_all(void) { }
134 static inline void set_npages(void) { }
135 
136 void tee_pager_get_stats(struct tee_pager_stats *stats)
137 {
138 	memset(stats, 0, sizeof(struct tee_pager_stats));
139 }
140 #endif /* CFG_WITH_STATS */
141 
142 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
143 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
144 #define TBL_SHIFT	SMALL_PAGE_SHIFT
145 
146 #define EFFECTIVE_VA_SIZE \
147 	(ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \
148 	 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE))
149 
150 static struct pager_table {
151 	struct pgt pgt;
152 	struct core_mmu_table_info tbl_info;
153 } *pager_tables;
154 static unsigned int num_pager_tables;
155 
156 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
157 
158 /* Defines the range of the alias area */
159 static tee_mm_entry_t *pager_alias_area;
160 /*
161  * Physical pages are added in a stack like fashion to the alias area,
162  * @pager_alias_next_free gives the address of next free entry if
163  * @pager_alias_next_free is != 0
164  */
165 static uintptr_t pager_alias_next_free;
166 
167 #ifdef CFG_TEE_CORE_DEBUG
168 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
169 
170 static uint32_t pager_lock_dldetect(const char *func, const int line,
171 				    struct abort_info *ai)
172 {
173 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
174 	unsigned int retries = 0;
175 	unsigned int reminder = 0;
176 
177 	while (!cpu_spin_trylock(&pager_spinlock)) {
178 		retries++;
179 		if (!retries) {
180 			/* wrapped, time to report */
181 			trace_printf(func, line, TRACE_ERROR, true,
182 				     "possible spinlock deadlock reminder %u",
183 				     reminder);
184 			if (reminder < UINT_MAX)
185 				reminder++;
186 			if (ai)
187 				abort_print(ai);
188 		}
189 	}
190 
191 	return exceptions;
192 }
193 #else
194 static uint32_t pager_lock(struct abort_info __unused *ai)
195 {
196 	return cpu_spin_lock_xsave(&pager_spinlock);
197 }
198 #endif
199 
200 static uint32_t pager_lock_check_stack(size_t stack_size)
201 {
202 	if (stack_size) {
203 		int8_t buf[stack_size];
204 		size_t n;
205 
206 		/*
207 		 * Make sure to touch all pages of the stack that we expect
208 		 * to use with this lock held. We need to take eventual
209 		 * page faults before the lock is taken or we'll deadlock
210 		 * the pager. The pages that are populated in this way will
211 		 * eventually be released at certain save transitions of
212 		 * the thread.
213 		 */
214 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
215 			io_write8((vaddr_t)buf + n, 1);
216 		io_write8((vaddr_t)buf + stack_size - 1, 1);
217 	}
218 
219 	return pager_lock(NULL);
220 }
221 
222 static void pager_unlock(uint32_t exceptions)
223 {
224 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
225 }
226 
227 void *tee_pager_phys_to_virt(paddr_t pa)
228 {
229 	struct core_mmu_table_info ti;
230 	unsigned idx;
231 	uint32_t a;
232 	paddr_t p;
233 	vaddr_t v;
234 	size_t n;
235 
236 	/*
237 	 * Most addresses are mapped lineary, try that first if possible.
238 	 */
239 	if (!tee_pager_get_table_info(pa, &ti))
240 		return NULL; /* impossible pa */
241 	idx = core_mmu_va2idx(&ti, pa);
242 	core_mmu_get_entry(&ti, idx, &p, &a);
243 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
244 		return (void *)core_mmu_idx2va(&ti, idx);
245 
246 	n = 0;
247 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
248 	while (true) {
249 		while (idx < TBL_NUM_ENTRIES) {
250 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
251 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
252 				return NULL;
253 
254 			core_mmu_get_entry(&pager_tables[n].tbl_info,
255 					   idx, &p, &a);
256 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
257 				return (void *)v;
258 			idx++;
259 		}
260 
261 		n++;
262 		if (n >= num_pager_tables)
263 			return NULL;
264 		idx = 0;
265 	}
266 
267 	return NULL;
268 }
269 
270 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
271 {
272 	return pmem->flags & PMEM_FLAG_HIDDEN;
273 }
274 
275 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
276 {
277 	return pmem->flags & PMEM_FLAG_DIRTY;
278 }
279 
280 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
281 				    struct tee_pager_area *area)
282 {
283 	if (pmem->fobj != area->fobj)
284 		return false;
285 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
286 		return false;
287 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
288 	    (area->size >> SMALL_PAGE_SHIFT))
289 		return false;
290 
291 	return true;
292 }
293 
294 static struct tblidx pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
295 					  struct tee_pager_area *area)
296 {
297 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
298 	size_t idx = pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
299 	struct pgt *pgt = area->pgt;
300 
301 	assert(pmem->fobj && pmem->fobj_pgidx != INVALID_PGIDX);
302 	assert(idx < TBL_NUM_ENTRIES);
303 
304 	return (struct tblidx){ .idx = idx, .pgt = pgt };
305 }
306 
307 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
308 {
309 	size_t n;
310 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
311 
312 	if (!pager_tables)
313 		return NULL;
314 
315 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
316 	    CORE_MMU_PGDIR_SHIFT;
317 	if (n >= num_pager_tables)
318 		return NULL;
319 
320 	assert(va >= pager_tables[n].tbl_info.va_base &&
321 	       va <= (pager_tables[n].tbl_info.va_base | mask));
322 
323 	return pager_tables + n;
324 }
325 
326 static struct pager_table *find_pager_table(vaddr_t va)
327 {
328 	struct pager_table *pt = find_pager_table_may_fail(va);
329 
330 	assert(pt);
331 	return pt;
332 }
333 
334 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
335 {
336 	struct pager_table *pt = find_pager_table_may_fail(va);
337 
338 	if (!pt)
339 		return false;
340 
341 	*ti = pt->tbl_info;
342 	return true;
343 }
344 
345 static struct core_mmu_table_info *find_table_info(vaddr_t va)
346 {
347 	return &find_pager_table(va)->tbl_info;
348 }
349 
350 static struct pgt *find_core_pgt(vaddr_t va)
351 {
352 	return &find_pager_table(va)->pgt;
353 }
354 
355 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
356 {
357 	struct pager_table *pt;
358 	unsigned idx;
359 	vaddr_t smem = tee_mm_get_smem(mm);
360 	size_t nbytes = tee_mm_get_bytes(mm);
361 	vaddr_t v;
362 	uint32_t a = 0;
363 
364 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
365 
366 	assert(!pager_alias_area);
367 	pager_alias_area = mm;
368 	pager_alias_next_free = smem;
369 
370 	/* Clear all mapping in the alias area */
371 	pt = find_pager_table(smem);
372 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
373 	while (pt <= (pager_tables + num_pager_tables - 1)) {
374 		while (idx < TBL_NUM_ENTRIES) {
375 			v = core_mmu_idx2va(&pt->tbl_info, idx);
376 			if (v >= (smem + nbytes))
377 				goto out;
378 
379 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
380 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
381 			if (a & TEE_MATTR_VALID_BLOCK)
382 				pgt_dec_used_entries(&pt->pgt);
383 			idx++;
384 		}
385 
386 		pt++;
387 		idx = 0;
388 	}
389 
390 out:
391 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
392 }
393 
394 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
395 {
396 	size_t n;
397 	uint32_t a = 0;
398 	size_t usage = 0;
399 
400 	for (n = 0; n < ti->num_entries; n++) {
401 		core_mmu_get_entry(ti, n, NULL, &a);
402 		if (a & TEE_MATTR_VALID_BLOCK)
403 			usage++;
404 	}
405 	return usage;
406 }
407 
408 static void tblidx_get_entry(struct tblidx tblidx, paddr_t *pa, uint32_t *attr)
409 {
410 	assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES);
411 	core_mmu_get_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx,
412 				     pa, attr);
413 }
414 
415 static void tblidx_set_entry(struct tblidx tblidx, paddr_t pa, uint32_t attr)
416 {
417 	assert(tblidx.pgt && tblidx.idx < TBL_NUM_ENTRIES);
418 	core_mmu_set_entry_primitive(tblidx.pgt->tbl, TBL_LEVEL, tblidx.idx,
419 				     pa, attr);
420 }
421 
422 static struct tblidx area_va2tblidx(struct tee_pager_area *area, vaddr_t va)
423 {
424 	struct pgt *pgt = area->pgt;
425 	paddr_t mask = CORE_MMU_PGDIR_MASK;
426 
427 	assert(va >= area->base && va < (area->base + area->size));
428 
429 	return (struct tblidx){
430 		.idx = (va & mask) / SMALL_PAGE_SIZE,
431 		.pgt = pgt
432 	};
433 }
434 
435 static vaddr_t tblidx2va(struct tblidx tblidx)
436 {
437 	return tblidx.pgt->vabase + (tblidx.idx << SMALL_PAGE_SHIFT);
438 }
439 
440 static void tblidx_tlbi_entry(struct tblidx tblidx)
441 {
442 	vaddr_t va = tblidx2va(tblidx);
443 
444 #if defined(CFG_PAGED_USER_TA)
445 	if (tblidx.pgt->ctx) {
446 		uint32_t asid = to_user_mode_ctx(tblidx.pgt->ctx)->vm_info.asid;
447 
448 		tlbi_mva_asid(va, asid);
449 		return;
450 	}
451 #endif
452 	tlbi_mva_allasid(va);
453 }
454 
455 static void pmem_assign_fobj_page(struct tee_pager_pmem *pmem,
456 				  struct tee_pager_area *area, vaddr_t va)
457 {
458 	struct tee_pager_pmem *p = NULL;
459 	unsigned int fobj_pgidx = 0;
460 
461 	assert(!pmem->fobj && pmem->fobj_pgidx == INVALID_PGIDX);
462 
463 	assert(va >= area->base && va < (area->base + area->size));
464 	fobj_pgidx = (va - area->base) / SMALL_PAGE_SIZE + area->fobj_pgoffs;
465 
466 	TAILQ_FOREACH(p, &tee_pager_pmem_head, link)
467 		assert(p->fobj != area->fobj || p->fobj_pgidx != fobj_pgidx);
468 
469 	pmem->fobj = area->fobj;
470 	pmem->fobj_pgidx = fobj_pgidx;
471 }
472 
473 static void pmem_clear(struct tee_pager_pmem *pmem)
474 {
475 	pmem->fobj = NULL;
476 	pmem->fobj_pgidx = INVALID_PGIDX;
477 	pmem->flags = 0;
478 }
479 
480 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
481 {
482 	struct tee_pager_area *area = NULL;
483 	struct tblidx tblidx = { };
484 	uint32_t a = 0;
485 
486 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
487 		/*
488 		 * If only_this_pgt points to a pgt then the pgt of this
489 		 * area has to match or we'll skip over it.
490 		 */
491 		if (only_this_pgt && area->pgt != only_this_pgt)
492 			continue;
493 		if (!area->pgt || !pmem_is_covered_by_area(pmem, area))
494 			continue;
495 		tblidx = pmem_get_area_tblidx(pmem, area);
496 		tblidx_get_entry(tblidx, NULL, &a);
497 		if (a & TEE_MATTR_VALID_BLOCK) {
498 			tblidx_set_entry(tblidx, 0, 0);
499 			pgt_dec_used_entries(tblidx.pgt);
500 			tblidx_tlbi_entry(tblidx);
501 		}
502 	}
503 }
504 
505 void tee_pager_early_init(void)
506 {
507 	size_t n = 0;
508 
509 	num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE;
510 	pager_tables = calloc(num_pager_tables, sizeof(*pager_tables));
511 	if (!pager_tables)
512 		panic("Cannot allocate pager_tables");
513 
514 	/*
515 	 * Note that this depends on add_pager_vaspace() adding vaspace
516 	 * after end of memory.
517 	 */
518 	for (n = 0; n < num_pager_tables; n++) {
519 		if (!core_mmu_find_table(NULL, VCORE_START_VA +
520 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
521 					 &pager_tables[n].tbl_info))
522 			panic("can't find mmu tables");
523 
524 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
525 			panic("Unsupported page size in translation table");
526 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
527 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
528 
529 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
530 		pgt_set_used_entries(&pager_tables[n].pgt,
531 				tbl_usage_count(&pager_tables[n].tbl_info));
532 	}
533 }
534 
535 static void *pager_add_alias_page(paddr_t pa)
536 {
537 	unsigned idx;
538 	struct core_mmu_table_info *ti;
539 	/* Alias pages mapped without write permission: runtime will care */
540 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
541 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
542 			TEE_MATTR_SECURE | TEE_MATTR_PR;
543 
544 	DMSG("0x%" PRIxPA, pa);
545 
546 	ti = find_table_info(pager_alias_next_free);
547 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
548 	core_mmu_set_entry(ti, idx, pa, attr);
549 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
550 	pager_alias_next_free += SMALL_PAGE_SIZE;
551 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
552 				      tee_mm_get_bytes(pager_alias_area)))
553 		pager_alias_next_free = 0;
554 	return (void *)core_mmu_idx2va(ti, idx);
555 }
556 
557 static void area_insert(struct tee_pager_area_head *head,
558 			struct tee_pager_area *area,
559 			struct tee_pager_area *a_prev)
560 {
561 	uint32_t exceptions = pager_lock_check_stack(8);
562 
563 	if (a_prev)
564 		TAILQ_INSERT_AFTER(head, a_prev, area, link);
565 	else
566 		TAILQ_INSERT_HEAD(head, area, link);
567 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
568 
569 	pager_unlock(exceptions);
570 }
571 DECLARE_KEEP_PAGER(area_insert);
572 
573 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
574 			     struct fobj *fobj)
575 {
576 	struct tee_pager_area *area = NULL;
577 	uint32_t flags = 0;
578 	size_t fobj_pgoffs = 0;
579 	vaddr_t b = base;
580 	size_t s = 0;
581 	size_t s2 = 0;
582 
583 	assert(fobj);
584 	s = fobj->num_pages * SMALL_PAGE_SIZE;
585 
586 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
587 
588 	if (base & SMALL_PAGE_MASK || !s) {
589 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
590 		panic();
591 	}
592 
593 	switch (type) {
594 	case PAGER_AREA_TYPE_RO:
595 		flags = TEE_MATTR_PRX;
596 		break;
597 	case PAGER_AREA_TYPE_RW:
598 	case PAGER_AREA_TYPE_LOCK:
599 		flags = TEE_MATTR_PRW;
600 		break;
601 	default:
602 		panic();
603 	}
604 
605 	while (s) {
606 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
607 		area = calloc(1, sizeof(*area));
608 		if (!area)
609 			panic("alloc_area");
610 
611 		area->fobj = fobj_get(fobj);
612 		area->fobj_pgoffs = fobj_pgoffs;
613 		area->type = type;
614 		area->pgt = find_core_pgt(b);
615 		area->base = b;
616 		area->size = s2;
617 		area->flags = flags;
618 		area_insert(&tee_pager_area_head, area, NULL);
619 
620 		b += s2;
621 		s -= s2;
622 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
623 	}
624 }
625 
626 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
627 					vaddr_t va)
628 {
629 	struct tee_pager_area *area;
630 
631 	if (!areas)
632 		return NULL;
633 
634 	TAILQ_FOREACH(area, areas, link) {
635 		if (core_is_buffer_inside(va, 1, area->base, area->size))
636 			return area;
637 	}
638 	return NULL;
639 }
640 
641 #ifdef CFG_PAGED_USER_TA
642 static struct tee_pager_area *find_uta_area(vaddr_t va)
643 {
644 	struct ts_ctx *ctx = thread_get_tsd()->ctx;
645 
646 	if (!is_user_mode_ctx(ctx))
647 		return NULL;
648 	return find_area(to_user_mode_ctx(ctx)->areas, va);
649 }
650 #else
651 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
652 {
653 	return NULL;
654 }
655 #endif /*CFG_PAGED_USER_TA*/
656 
657 
658 static uint32_t get_area_mattr(uint32_t area_flags)
659 {
660 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
661 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
662 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
663 
664 	return attr;
665 }
666 
667 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
668 {
669 	struct core_mmu_table_info *ti;
670 	paddr_t pa;
671 	unsigned idx;
672 
673 	ti = find_table_info((vaddr_t)pmem->va_alias);
674 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
675 	core_mmu_get_entry(ti, idx, &pa, NULL);
676 	return pa;
677 }
678 
679 #ifdef CFG_PAGED_USER_TA
680 static void unlink_area(struct tee_pager_area_head *area_head,
681 			struct tee_pager_area *area)
682 {
683 	uint32_t exceptions = pager_lock_check_stack(64);
684 
685 	TAILQ_REMOVE(area_head, area, link);
686 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
687 
688 	pager_unlock(exceptions);
689 }
690 DECLARE_KEEP_PAGER(unlink_area);
691 
692 static void free_area(struct tee_pager_area *area)
693 {
694 	fobj_put(area->fobj);
695 	free(area);
696 }
697 
698 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
699 				    struct fobj *fobj, uint32_t prot)
700 {
701 	struct tee_pager_area *a_prev = NULL;
702 	struct tee_pager_area *area = NULL;
703 	vaddr_t b = base;
704 	size_t fobj_pgoffs = 0;
705 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
706 
707 	if (!uctx->areas) {
708 		uctx->areas = malloc(sizeof(*uctx->areas));
709 		if (!uctx->areas)
710 			return TEE_ERROR_OUT_OF_MEMORY;
711 		TAILQ_INIT(uctx->areas);
712 	}
713 
714 	area = TAILQ_FIRST(uctx->areas);
715 	while (area) {
716 		if (core_is_buffer_intersect(b, s, area->base,
717 					     area->size))
718 			return TEE_ERROR_BAD_PARAMETERS;
719 		if (b < area->base)
720 			break;
721 		a_prev = area;
722 		area = TAILQ_NEXT(area, link);
723 	}
724 
725 	while (s) {
726 		size_t s2;
727 
728 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
729 		area = calloc(1, sizeof(*area));
730 		if (!area)
731 			return TEE_ERROR_OUT_OF_MEMORY;
732 
733 		/* Table info will be set when the context is activated. */
734 		area->fobj = fobj_get(fobj);
735 		area->fobj_pgoffs = fobj_pgoffs;
736 		area->type = PAGER_AREA_TYPE_RW;
737 		area->base = b;
738 		area->size = s2;
739 		area->flags = prot;
740 
741 		area_insert(uctx->areas, area, a_prev);
742 
743 		a_prev = area;
744 		b += s2;
745 		s -= s2;
746 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
747 	}
748 
749 	return TEE_SUCCESS;
750 }
751 
752 TEE_Result tee_pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
753 				 struct fobj *fobj, uint32_t prot)
754 {
755 	TEE_Result res = TEE_SUCCESS;
756 	struct thread_specific_data *tsd = thread_get_tsd();
757 	struct tee_pager_area *area = NULL;
758 	struct core_mmu_table_info dir_info = { NULL };
759 
760 	if (uctx->ts_ctx != tsd->ctx) {
761 		/*
762 		 * Changes are to an utc that isn't active. Just add the
763 		 * areas page tables will be dealt with later.
764 		 */
765 		return pager_add_um_area(uctx, base, fobj, prot);
766 	}
767 
768 	/*
769 	 * Assign page tables before adding areas to be able to tell which
770 	 * are newly added and should be removed in case of failure.
771 	 */
772 	tee_pager_assign_um_tables(uctx);
773 	res = pager_add_um_area(uctx, base, fobj, prot);
774 	if (res) {
775 		struct tee_pager_area *next_a;
776 
777 		/* Remove all added areas */
778 		TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
779 			if (!area->pgt) {
780 				unlink_area(uctx->areas, area);
781 				free_area(area);
782 			}
783 		}
784 		return res;
785 	}
786 
787 	/*
788 	 * Assign page tables to the new areas and make sure that the page
789 	 * tables are registered in the upper table.
790 	 */
791 	tee_pager_assign_um_tables(uctx);
792 	core_mmu_get_user_pgdir(&dir_info);
793 	TAILQ_FOREACH(area, uctx->areas, link) {
794 		paddr_t pa;
795 		size_t idx;
796 		uint32_t attr;
797 
798 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
799 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
800 
801 		/*
802 		 * Check if the page table already is used, if it is, it's
803 		 * already registered.
804 		 */
805 		if (area->pgt->num_used_entries) {
806 			assert(attr & TEE_MATTR_TABLE);
807 			assert(pa == virt_to_phys(area->pgt->tbl));
808 			continue;
809 		}
810 
811 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
812 		pa = virt_to_phys(area->pgt->tbl);
813 		assert(pa);
814 		/*
815 		 * Note that the update of the table entry is guaranteed to
816 		 * be atomic.
817 		 */
818 		core_mmu_set_entry(&dir_info, idx, pa, attr);
819 	}
820 
821 	return TEE_SUCCESS;
822 }
823 
824 static void split_area(struct tee_pager_area_head *area_head,
825 		       struct tee_pager_area *area, struct tee_pager_area *a2,
826 		       vaddr_t va)
827 {
828 	uint32_t exceptions = pager_lock_check_stack(64);
829 	size_t diff = va - area->base;
830 
831 	a2->fobj = fobj_get(area->fobj);
832 	a2->fobj_pgoffs = area->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
833 	a2->type = area->type;
834 	a2->flags = area->flags;
835 	a2->base = va;
836 	a2->size = area->size - diff;
837 	a2->pgt = area->pgt;
838 	area->size = diff;
839 
840 	TAILQ_INSERT_AFTER(area_head, area, a2, link);
841 	TAILQ_INSERT_AFTER(&area->fobj->areas, area, a2, fobj_link);
842 
843 	pager_unlock(exceptions);
844 }
845 DECLARE_KEEP_PAGER(split_area);
846 
847 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
848 {
849 	struct tee_pager_area *area = NULL;
850 	struct tee_pager_area *a2 = NULL;
851 
852 	if (va & SMALL_PAGE_MASK)
853 		return TEE_ERROR_BAD_PARAMETERS;
854 
855 	TAILQ_FOREACH(area, uctx->areas, link) {
856 		if (va == area->base || va == area->base + area->size)
857 			return TEE_SUCCESS;
858 		if (va > area->base && va < area->base + area->size) {
859 			a2 = calloc(1, sizeof(*a2));
860 			if (!a2)
861 				return TEE_ERROR_OUT_OF_MEMORY;
862 			split_area(uctx->areas, area, a2, va);
863 			return TEE_SUCCESS;
864 		}
865 	}
866 
867 	return TEE_SUCCESS;
868 }
869 
870 static void merge_area_with_next(struct tee_pager_area_head *area_head,
871 				 struct tee_pager_area *a,
872 				 struct tee_pager_area *a_next)
873 {
874 	uint32_t exceptions = pager_lock_check_stack(64);
875 
876 	TAILQ_REMOVE(area_head, a_next, link);
877 	TAILQ_REMOVE(&a_next->fobj->areas, a_next, fobj_link);
878 	a->size += a_next->size;
879 
880 	pager_unlock(exceptions);
881 }
882 DECLARE_KEEP_PAGER(merge_area_with_next);
883 
884 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
885 			       size_t len)
886 {
887 	struct tee_pager_area *a_next = NULL;
888 	struct tee_pager_area *a = NULL;
889 	vaddr_t end_va = 0;
890 
891 	if ((va | len) & SMALL_PAGE_MASK)
892 		return;
893 	if (ADD_OVERFLOW(va, len, &end_va))
894 		return;
895 
896 	for (a = TAILQ_FIRST(uctx->areas);; a = a_next) {
897 		a_next = TAILQ_NEXT(a, link);
898 		if (!a_next)
899 			return;
900 
901 		/* Try merging with the area just before va */
902 		if (a->base + a->size < va)
903 			continue;
904 
905 		/*
906 		 * If a->base is well past our range we're done.
907 		 * Note that if it's just the page after our range we'll
908 		 * try to merge.
909 		 */
910 		if (a->base > end_va)
911 			return;
912 
913 		if (a->base + a->size != a_next->base)
914 			continue;
915 		if (a->fobj != a_next->fobj || a->type != a_next->type ||
916 		    a->flags != a_next->flags || a->pgt != a_next->pgt)
917 			continue;
918 		if (a->fobj_pgoffs + a->size / SMALL_PAGE_SIZE !=
919 		    a_next->fobj_pgoffs)
920 			continue;
921 
922 		merge_area_with_next(uctx->areas, a, a_next);
923 		free_area(a_next);
924 		a_next = a;
925 	}
926 }
927 
928 static void rem_area(struct tee_pager_area_head *area_head,
929 		     struct tee_pager_area *area)
930 {
931 	struct tee_pager_pmem *pmem;
932 	size_t last_pgoffs = area->fobj_pgoffs +
933 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
934 	uint32_t exceptions;
935 	struct tblidx tblidx = { };
936 	uint32_t a = 0;
937 
938 	exceptions = pager_lock_check_stack(64);
939 
940 	TAILQ_REMOVE(area_head, area, link);
941 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
942 
943 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
944 		if (pmem->fobj != area->fobj ||
945 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
946 		    pmem->fobj_pgidx > last_pgoffs)
947 			continue;
948 
949 		tblidx = pmem_get_area_tblidx(pmem, area);
950 		tblidx_get_entry(tblidx, NULL, &a);
951 		if (!(a & TEE_MATTR_VALID_BLOCK))
952 			continue;
953 
954 		tblidx_set_entry(tblidx, 0, 0);
955 		tblidx_tlbi_entry(tblidx);
956 		pgt_dec_used_entries(tblidx.pgt);
957 	}
958 
959 	pager_unlock(exceptions);
960 
961 	free_area(area);
962 }
963 DECLARE_KEEP_PAGER(rem_area);
964 
965 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
966 			     size_t size)
967 {
968 	struct tee_pager_area *area;
969 	struct tee_pager_area *next_a;
970 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
971 
972 	TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
973 		if (core_is_buffer_inside(area->base, area->size, base, s))
974 			rem_area(uctx->areas, area);
975 	}
976 	tlbi_asid(uctx->vm_info.asid);
977 }
978 
979 void tee_pager_rem_um_areas(struct user_mode_ctx *uctx)
980 {
981 	struct tee_pager_area *area = NULL;
982 
983 	if (!uctx->areas)
984 		return;
985 
986 	while (true) {
987 		area = TAILQ_FIRST(uctx->areas);
988 		if (!area)
989 			break;
990 		unlink_area(uctx->areas, area);
991 		free_area(area);
992 	}
993 
994 	free(uctx->areas);
995 }
996 
997 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
998 {
999 	struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas);
1000 	void *ctx = a->pgt->ctx;
1001 
1002 	do {
1003 		a = TAILQ_NEXT(a, fobj_link);
1004 		if (!a)
1005 			return true;
1006 	} while (a->pgt->ctx == ctx);
1007 
1008 	return false;
1009 }
1010 
1011 bool tee_pager_set_um_area_attr(struct user_mode_ctx *uctx, vaddr_t base,
1012 				size_t size, uint32_t flags)
1013 {
1014 	bool ret = false;
1015 	vaddr_t b = base;
1016 	size_t s = size;
1017 	size_t s2 = 0;
1018 	struct tee_pager_area *area = find_area(uctx->areas, b);
1019 	uint32_t exceptions = 0;
1020 	struct tee_pager_pmem *pmem = NULL;
1021 	uint32_t a = 0;
1022 	uint32_t f = 0;
1023 	uint32_t mattr = 0;
1024 	uint32_t f2 = 0;
1025 	struct tblidx tblidx = { };
1026 
1027 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1028 	if (f & TEE_MATTR_UW)
1029 		f |= TEE_MATTR_PW;
1030 	mattr = get_area_mattr(f);
1031 
1032 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1033 
1034 	while (s) {
1035 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1036 		if (!area || area->base != b || area->size != s2) {
1037 			ret = false;
1038 			goto out;
1039 		}
1040 		b += s2;
1041 		s -= s2;
1042 
1043 		if (area->flags == f)
1044 			goto next_area;
1045 
1046 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1047 			if (!pmem_is_covered_by_area(pmem, area))
1048 				continue;
1049 
1050 			tblidx = pmem_get_area_tblidx(pmem, area);
1051 			tblidx_get_entry(tblidx, NULL, &a);
1052 			if (a == f)
1053 				continue;
1054 			tblidx_set_entry(tblidx, 0, 0);
1055 			tblidx_tlbi_entry(tblidx);
1056 
1057 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1058 			if (pmem_is_dirty(pmem))
1059 				f2 = mattr;
1060 			else
1061 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
1062 			tblidx_set_entry(tblidx, get_pmem_pa(pmem), f2);
1063 			if (!(a & TEE_MATTR_VALID_BLOCK))
1064 				pgt_inc_used_entries(area->pgt);
1065 			/*
1066 			 * Make sure the table update is visible before
1067 			 * continuing.
1068 			 */
1069 			dsb_ishst();
1070 
1071 			/*
1072 			 * Here's a problem if this page already is shared.
1073 			 * We need do icache invalidate for each context
1074 			 * in which it is shared. In practice this will
1075 			 * never happen.
1076 			 */
1077 			if (flags & TEE_MATTR_UX) {
1078 				void *va = (void *)tblidx2va(tblidx);
1079 
1080 				/* Assert that the pmem isn't shared. */
1081 				assert(same_context(pmem));
1082 
1083 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1084 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1085 			}
1086 		}
1087 
1088 		area->flags = f;
1089 next_area:
1090 		area = TAILQ_NEXT(area, link);
1091 	}
1092 
1093 	ret = true;
1094 out:
1095 	pager_unlock(exceptions);
1096 	return ret;
1097 }
1098 
1099 DECLARE_KEEP_PAGER(tee_pager_set_um_area_attr);
1100 #endif /*CFG_PAGED_USER_TA*/
1101 
1102 void tee_pager_invalidate_fobj(struct fobj *fobj)
1103 {
1104 	struct tee_pager_pmem *pmem;
1105 	uint32_t exceptions;
1106 
1107 	exceptions = pager_lock_check_stack(64);
1108 
1109 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1110 		if (pmem->fobj == fobj)
1111 			pmem_clear(pmem);
1112 
1113 	pager_unlock(exceptions);
1114 }
1115 DECLARE_KEEP_PAGER(tee_pager_invalidate_fobj);
1116 
1117 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area, vaddr_t va)
1118 {
1119 	struct tee_pager_pmem *pmem = NULL;
1120 	size_t fobj_pgidx = 0;
1121 
1122 	assert(va >= area->base && va < (area->base + area->size));
1123 	fobj_pgidx = (va - area->base) / SMALL_PAGE_SIZE + area->fobj_pgoffs;
1124 
1125 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1126 		if (pmem->fobj == area->fobj && pmem->fobj_pgidx == fobj_pgidx)
1127 			return pmem;
1128 
1129 	return NULL;
1130 }
1131 
1132 static bool tee_pager_unhide_page(struct tee_pager_area *area, vaddr_t page_va)
1133 {
1134 	struct tblidx tblidx = area_va2tblidx(area, page_va);
1135 	struct tee_pager_pmem *pmem = pmem_find(area, page_va);
1136 	uint32_t a = get_area_mattr(area->flags);
1137 	uint32_t attr = 0;
1138 	paddr_t pa = 0;
1139 
1140 	if (!pmem)
1141 		return false;
1142 
1143 	tblidx_get_entry(tblidx, NULL, &attr);
1144 	if (attr & TEE_MATTR_VALID_BLOCK)
1145 		return false;
1146 
1147 	/*
1148 	 * The page is hidden, or not not mapped yet. Unhide the page and
1149 	 * move it to the tail.
1150 	 *
1151 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1152 	 * for this address, so no TLB invalidation is required after setting
1153 	 * the new entry. A DSB is needed though, to make the write visible.
1154 	 *
1155 	 * For user executable pages it's more complicated. Those pages can
1156 	 * be shared between multiple TA mappings and thus populated by
1157 	 * another TA. The reference manual states that:
1158 	 *
1159 	 * "instruction cache maintenance is required only after writing
1160 	 * new data to a physical address that holds an instruction."
1161 	 *
1162 	 * So for hidden pages we would not need to invalidate i-cache, but
1163 	 * for newly populated pages we do. Since we don't know which we
1164 	 * have to assume the worst and always invalidate the i-cache. We
1165 	 * don't need to clean the d-cache though, since that has already
1166 	 * been done earlier.
1167 	 *
1168 	 * Additional bookkeeping to tell if the i-cache invalidation is
1169 	 * needed or not is left as a future optimization.
1170 	 */
1171 
1172 	/* If it's not a dirty block, then it should be read only. */
1173 	if (!pmem_is_dirty(pmem))
1174 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1175 
1176 	pa = get_pmem_pa(pmem);
1177 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1178 	if (area->flags & TEE_MATTR_UX) {
1179 		void *va = (void *)tblidx2va(tblidx);
1180 
1181 		/* Set a temporary read-only mapping */
1182 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1183 		tblidx_set_entry(tblidx, pa, a & ~TEE_MATTR_UX);
1184 		dsb_ishst();
1185 
1186 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1187 
1188 		/* Set the final mapping */
1189 		tblidx_set_entry(tblidx, pa, a);
1190 		tblidx_tlbi_entry(tblidx);
1191 	} else {
1192 		tblidx_set_entry(tblidx, pa, a);
1193 		dsb_ishst();
1194 	}
1195 	pgt_inc_used_entries(tblidx.pgt);
1196 
1197 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1198 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1199 	incr_hidden_hits();
1200 	return true;
1201 }
1202 
1203 static void tee_pager_hide_pages(void)
1204 {
1205 	struct tee_pager_pmem *pmem = NULL;
1206 	size_t n = 0;
1207 
1208 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1209 		if (n >= TEE_PAGER_NHIDE)
1210 			break;
1211 		n++;
1212 
1213 		/* we cannot hide pages when pmem->fobj is not defined. */
1214 		if (!pmem->fobj)
1215 			continue;
1216 
1217 		if (pmem_is_hidden(pmem))
1218 			continue;
1219 
1220 		pmem->flags |= PMEM_FLAG_HIDDEN;
1221 		pmem_unmap(pmem, NULL);
1222 	}
1223 }
1224 
1225 static unsigned int __maybe_unused
1226 num_areas_with_pmem(struct tee_pager_pmem *pmem)
1227 {
1228 	struct tee_pager_area *a = NULL;
1229 	unsigned int num_matches = 0;
1230 
1231 	TAILQ_FOREACH(a, &pmem->fobj->areas, fobj_link)
1232 		if (pmem_is_covered_by_area(pmem, a))
1233 			num_matches++;
1234 
1235 	return num_matches;
1236 }
1237 
1238 /*
1239  * Find mapped pmem, hide and move to pageble pmem.
1240  * Return false if page was not mapped, and true if page was mapped.
1241  */
1242 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1243 				       vaddr_t page_va)
1244 {
1245 	struct tee_pager_pmem *pmem = NULL;
1246 	struct tblidx tblidx = { };
1247 	size_t fobj_pgidx = 0;
1248 
1249 	assert(page_va >= area->base && page_va < (area->base + area->size));
1250 	fobj_pgidx = (page_va - area->base) / SMALL_PAGE_SIZE +
1251 		     area->fobj_pgoffs;
1252 
1253 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1254 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != fobj_pgidx)
1255 			continue;
1256 
1257 		/*
1258 		 * Locked pages may not be shared. We're asserting that the
1259 		 * number of areas using this pmem is one and only one as
1260 		 * we're about to unmap it.
1261 		 */
1262 		assert(num_areas_with_pmem(pmem) == 1);
1263 
1264 		tblidx = pmem_get_area_tblidx(pmem, area);
1265 		tblidx_set_entry(tblidx, 0, 0);
1266 		pgt_dec_used_entries(tblidx.pgt);
1267 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1268 		pmem_clear(pmem);
1269 		tee_pager_npages++;
1270 		set_npages();
1271 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1272 		incr_zi_released();
1273 		return true;
1274 	}
1275 
1276 	return false;
1277 }
1278 
1279 static void pager_deploy_page(struct tee_pager_pmem *pmem,
1280 			      struct tee_pager_area *area, vaddr_t page_va,
1281 			      bool clean_user_cache, bool writable)
1282 {
1283 	struct tblidx tblidx = area_va2tblidx(area, page_va);
1284 	uint32_t attr = get_area_mattr(area->flags);
1285 	struct core_mmu_table_info *ti = NULL;
1286 	uint8_t *va_alias = pmem->va_alias;
1287 	paddr_t pa = get_pmem_pa(pmem);
1288 	unsigned int idx_alias = 0;
1289 	uint32_t attr_alias = 0;
1290 	paddr_t pa_alias = 0;
1291 
1292 	/* Ensure we are allowed to write to aliased virtual page */
1293 	ti = find_table_info((vaddr_t)va_alias);
1294 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
1295 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
1296 	if (!(attr_alias & TEE_MATTR_PW)) {
1297 		attr_alias |= TEE_MATTR_PW;
1298 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1299 		tlbi_mva_allasid((vaddr_t)va_alias);
1300 	}
1301 
1302 	asan_tag_access(va_alias, va_alias + SMALL_PAGE_SIZE);
1303 	if (fobj_load_page(pmem->fobj, pmem->fobj_pgidx, va_alias)) {
1304 		EMSG("PH 0x%" PRIxVA " failed", page_va);
1305 		panic();
1306 	}
1307 	switch (area->type) {
1308 	case PAGER_AREA_TYPE_RO:
1309 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1310 		incr_ro_hits();
1311 		/* Forbid write to aliases for read-only (maybe exec) pages */
1312 		attr_alias &= ~TEE_MATTR_PW;
1313 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1314 		tlbi_mva_allasid((vaddr_t)va_alias);
1315 		break;
1316 	case PAGER_AREA_TYPE_RW:
1317 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1318 		if (writable && (attr & (TEE_MATTR_PW | TEE_MATTR_UW)))
1319 			pmem->flags |= PMEM_FLAG_DIRTY;
1320 		incr_rw_hits();
1321 		break;
1322 	case PAGER_AREA_TYPE_LOCK:
1323 		/* Move page to lock list */
1324 		if (tee_pager_npages <= 0)
1325 			panic("Running out of pages");
1326 		tee_pager_npages--;
1327 		set_npages();
1328 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1329 		break;
1330 	default:
1331 		panic();
1332 	}
1333 	asan_tag_no_access(va_alias, va_alias + SMALL_PAGE_SIZE);
1334 
1335 	if (!writable)
1336 		attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1337 
1338 	/*
1339 	 * We've updated the page using the aliased mapping and
1340 	 * some cache maintenance is now needed if it's an
1341 	 * executable page.
1342 	 *
1343 	 * Since the d-cache is a Physically-indexed,
1344 	 * physically-tagged (PIPT) cache we can clean either the
1345 	 * aliased address or the real virtual address. In this
1346 	 * case we choose the real virtual address.
1347 	 *
1348 	 * The i-cache can also be PIPT, but may be something else
1349 	 * too like VIPT. The current code requires the caches to
1350 	 * implement the IVIPT extension, that is:
1351 	 * "instruction cache maintenance is required only after
1352 	 * writing new data to a physical address that holds an
1353 	 * instruction."
1354 	 *
1355 	 * To portably invalidate the icache the page has to
1356 	 * be mapped at the final virtual address but not
1357 	 * executable.
1358 	 */
1359 	if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1360 		uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1361 				TEE_MATTR_PW | TEE_MATTR_UW;
1362 		void *va = (void *)page_va;
1363 
1364 		/* Set a temporary read-only mapping */
1365 		tblidx_set_entry(tblidx, pa, attr & ~mask);
1366 		tblidx_tlbi_entry(tblidx);
1367 
1368 		dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1369 		if (clean_user_cache)
1370 			icache_inv_user_range(va, SMALL_PAGE_SIZE);
1371 		else
1372 			icache_inv_range(va, SMALL_PAGE_SIZE);
1373 
1374 		/* Set the final mapping */
1375 		tblidx_set_entry(tblidx, pa, attr);
1376 		tblidx_tlbi_entry(tblidx);
1377 	} else {
1378 		tblidx_set_entry(tblidx, pa, attr);
1379 		/*
1380 		 * No need to flush TLB for this entry, it was
1381 		 * invalid. We should use a barrier though, to make
1382 		 * sure that the change is visible.
1383 		 */
1384 		dsb_ishst();
1385 	}
1386 	pgt_inc_used_entries(tblidx.pgt);
1387 
1388 	FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1389 }
1390 
1391 static void make_dirty_page(struct tee_pager_pmem *pmem,
1392 			    struct tee_pager_area *area, struct tblidx tblidx,
1393 			    paddr_t pa)
1394 {
1395 	assert(area->flags & (TEE_MATTR_UW | TEE_MATTR_PW));
1396 	assert(!(pmem->flags & PMEM_FLAG_DIRTY));
1397 
1398 	FMSG("Dirty %#"PRIxVA, tblidx2va(tblidx));
1399 	pmem->flags |= PMEM_FLAG_DIRTY;
1400 	tblidx_set_entry(tblidx, pa, get_area_mattr(area->flags));
1401 	tblidx_tlbi_entry(tblidx);
1402 }
1403 
1404 /*
1405  * This function takes a reference to a page (@fobj + fobj_pgidx) and makes
1406  * the corresponding IV available.
1407  *
1408  * In case the page needs to be saved the IV must be writable, consequently
1409  * is the page holding the IV made dirty. If the page instead only is to
1410  * be verified it's enough that the page holding the IV is readonly and
1411  * thus doesn't have to be made dirty too.
1412  *
1413  * This function depends on pager_spare_pmem pointing to a free pmem when
1414  * entered. In case the page holding the needed IV isn't mapped this spare
1415  * pmem is used to map the page. If this function has used pager_spare_pmem
1416  * and assigned it to NULL it must be reassigned with a new free pmem
1417  * before this function can be called again.
1418  */
1419 static void make_iv_available(struct fobj *fobj, unsigned int fobj_pgidx,
1420 			      bool writable)
1421 {
1422 	struct tee_pager_area *area = pager_iv_area;
1423 	struct tee_pager_pmem *pmem = NULL;
1424 	struct tblidx tblidx = { };
1425 	vaddr_t page_va = 0;
1426 	uint32_t attr = 0;
1427 	paddr_t pa = 0;
1428 
1429 	page_va = fobj_get_iv_vaddr(fobj, fobj_pgidx) & ~SMALL_PAGE_MASK;
1430 	if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || !page_va) {
1431 		assert(!page_va);
1432 		return;
1433 	}
1434 
1435 	assert(area && area->type == PAGER_AREA_TYPE_RW);
1436 	assert(pager_spare_pmem);
1437 	assert(core_is_buffer_inside(page_va, 1, area->base, area->size));
1438 
1439 	tblidx = area_va2tblidx(area, page_va);
1440 	/*
1441 	 * We don't care if tee_pager_unhide_page() succeeds or not, we're
1442 	 * still checking the attributes afterwards.
1443 	 */
1444 	tee_pager_unhide_page(area, page_va);
1445 	tblidx_get_entry(tblidx, &pa, &attr);
1446 	if (!(attr & TEE_MATTR_VALID_BLOCK)) {
1447 		/*
1448 		 * We're using the spare pmem to map the IV corresponding
1449 		 * to another page.
1450 		 */
1451 		pmem = pager_spare_pmem;
1452 		pager_spare_pmem = NULL;
1453 		pmem_assign_fobj_page(pmem, area, page_va);
1454 
1455 		if (writable)
1456 			pmem->flags |= PMEM_FLAG_DIRTY;
1457 
1458 		pager_deploy_page(pmem, area, page_va,
1459 				  false /*!clean_user_cache*/, writable);
1460 	} else if (writable && !(attr & TEE_MATTR_PW)) {
1461 		pmem = pmem_find(area, page_va);
1462 		/* Note that pa is valid since TEE_MATTR_VALID_BLOCK is set */
1463 		make_dirty_page(pmem, area, tblidx, pa);
1464 	}
1465 }
1466 
1467 static void pager_get_page(struct tee_pager_area *area, struct abort_info *ai,
1468 			   bool clean_user_cache)
1469 {
1470 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1471 	struct tblidx tblidx = area_va2tblidx(area, page_va);
1472 	struct tee_pager_pmem *pmem = NULL;
1473 	bool writable = false;
1474 	uint32_t attr = 0;
1475 
1476 	/*
1477 	 * Get a pmem to load code and data into, also make sure
1478 	 * the corresponding IV page is available.
1479 	 */
1480 	while (true) {
1481 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1482 		if (!pmem) {
1483 			EMSG("No pmem entries");
1484 			abort_print(ai);
1485 			panic();
1486 		}
1487 
1488 		if (pmem->fobj) {
1489 			pmem_unmap(pmem, NULL);
1490 			if (pmem_is_dirty(pmem)) {
1491 				uint8_t *va = pmem->va_alias;
1492 
1493 				make_iv_available(pmem->fobj, pmem->fobj_pgidx,
1494 						  true /*writable*/);
1495 				asan_tag_access(va, va + SMALL_PAGE_SIZE);
1496 				if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
1497 						   pmem->va_alias))
1498 					panic("fobj_save_page");
1499 				asan_tag_no_access(va, va + SMALL_PAGE_SIZE);
1500 
1501 				pmem_clear(pmem);
1502 
1503 				/*
1504 				 * If the spare pmem was used by
1505 				 * make_iv_available() we need to replace
1506 				 * it with the just freed pmem.
1507 				 *
1508 				 * See make_iv_available() for details.
1509 				 */
1510 				if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) &&
1511 				    !pager_spare_pmem) {
1512 					TAILQ_REMOVE(&tee_pager_pmem_head,
1513 						     pmem, link);
1514 					pager_spare_pmem = pmem;
1515 					pmem = NULL;
1516 				}
1517 
1518 				/*
1519 				 * Check if the needed virtual page was
1520 				 * made available as a side effect of the
1521 				 * call to make_iv_available() above. If so
1522 				 * we're done.
1523 				 */
1524 				tblidx_get_entry(tblidx, NULL, &attr);
1525 				if (attr & TEE_MATTR_VALID_BLOCK)
1526 					return;
1527 
1528 				/*
1529 				 * The freed pmem was used to replace the
1530 				 * consumed pager_spare_pmem above. Restart
1531 				 * to find another pmem.
1532 				 */
1533 				if (!pmem)
1534 					continue;
1535 			}
1536 		}
1537 
1538 		TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1539 		pmem_clear(pmem);
1540 
1541 		pmem_assign_fobj_page(pmem, area, page_va);
1542 		make_iv_available(pmem->fobj, pmem->fobj_pgidx,
1543 				  false /*!writable*/);
1544 		if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) || pager_spare_pmem)
1545 			break;
1546 
1547 		/*
1548 		 * The spare pmem was used by make_iv_available(). We need
1549 		 * to replace it with the just freed pmem. And get another
1550 		 * pmem.
1551 		 *
1552 		 * See make_iv_available() for details.
1553 		 */
1554 		pmem_clear(pmem);
1555 		pager_spare_pmem = pmem;
1556 	}
1557 
1558 	/*
1559 	 * PAGER_AREA_TYPE_LOCK are always writable while PAGER_AREA_TYPE_RO
1560 	 * are never writable.
1561 	 *
1562 	 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1563 	 * able to tell when they are updated and should be tagged
1564 	 * as dirty.
1565 	 */
1566 	if (area->type == PAGER_AREA_TYPE_LOCK ||
1567 	    (area->type == PAGER_AREA_TYPE_RW && abort_is_write_fault(ai)))
1568 		writable = true;
1569 	else
1570 		writable = false;
1571 
1572 	pager_deploy_page(pmem, area, page_va, clean_user_cache, writable);
1573 }
1574 
1575 static bool pager_update_permissions(struct tee_pager_area *area,
1576 			struct abort_info *ai, bool *handled)
1577 {
1578 	struct tblidx tblidx = area_va2tblidx(area, ai->va);
1579 	struct tee_pager_pmem *pmem = NULL;
1580 	uint32_t attr = 0;
1581 	paddr_t pa = 0;
1582 
1583 	*handled = false;
1584 
1585 	tblidx_get_entry(tblidx, &pa, &attr);
1586 
1587 	/* Not mapped */
1588 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1589 		return false;
1590 
1591 	/* Not readable, should not happen */
1592 	if (abort_is_user_exception(ai)) {
1593 		if (!(attr & TEE_MATTR_UR))
1594 			return true;
1595 	} else {
1596 		if (!(attr & TEE_MATTR_PR)) {
1597 			abort_print_error(ai);
1598 			panic();
1599 		}
1600 	}
1601 
1602 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1603 	case CORE_MMU_FAULT_TRANSLATION:
1604 	case CORE_MMU_FAULT_READ_PERMISSION:
1605 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1606 			/* Check attempting to execute from an NOX page */
1607 			if (abort_is_user_exception(ai)) {
1608 				if (!(attr & TEE_MATTR_UX))
1609 					return true;
1610 			} else {
1611 				if (!(attr & TEE_MATTR_PX)) {
1612 					abort_print_error(ai);
1613 					panic();
1614 				}
1615 			}
1616 		}
1617 		/* Since the page is mapped now it's OK */
1618 		break;
1619 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1620 		/* Check attempting to write to an RO page */
1621 		pmem = pmem_find(area, ai->va);
1622 		if (!pmem)
1623 			panic();
1624 		if (abort_is_user_exception(ai)) {
1625 			if (!(area->flags & TEE_MATTR_UW))
1626 				return true;
1627 			if (!(attr & TEE_MATTR_UW))
1628 				make_dirty_page(pmem, area, tblidx, pa);
1629 		} else {
1630 			if (!(area->flags & TEE_MATTR_PW)) {
1631 				abort_print_error(ai);
1632 				panic();
1633 			}
1634 			if (!(attr & TEE_MATTR_PW))
1635 				make_dirty_page(pmem, area, tblidx, pa);
1636 		}
1637 		/* Since permissions has been updated now it's OK */
1638 		break;
1639 	default:
1640 		/* Some fault we can't deal with */
1641 		if (abort_is_user_exception(ai))
1642 			return true;
1643 		abort_print_error(ai);
1644 		panic();
1645 	}
1646 	*handled = true;
1647 	return true;
1648 }
1649 
1650 #ifdef CFG_TEE_CORE_DEBUG
1651 static void stat_handle_fault(void)
1652 {
1653 	static size_t num_faults;
1654 	static size_t min_npages = SIZE_MAX;
1655 	static size_t total_min_npages = SIZE_MAX;
1656 
1657 	num_faults++;
1658 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1659 		DMSG("nfaults %zu npages %zu (min %zu)",
1660 		     num_faults, tee_pager_npages, min_npages);
1661 		min_npages = tee_pager_npages; /* reset */
1662 	}
1663 	if (tee_pager_npages < min_npages)
1664 		min_npages = tee_pager_npages;
1665 	if (tee_pager_npages < total_min_npages)
1666 		total_min_npages = tee_pager_npages;
1667 }
1668 #else
1669 static void stat_handle_fault(void)
1670 {
1671 }
1672 #endif
1673 
1674 bool tee_pager_handle_fault(struct abort_info *ai)
1675 {
1676 	struct tee_pager_area *area;
1677 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1678 	uint32_t exceptions;
1679 	bool ret;
1680 	bool clean_user_cache = false;
1681 
1682 #ifdef TEE_PAGER_DEBUG_PRINT
1683 	if (!abort_is_user_exception(ai))
1684 		abort_print(ai);
1685 #endif
1686 
1687 	/*
1688 	 * We're updating pages that can affect several active CPUs at a
1689 	 * time below. We end up here because a thread tries to access some
1690 	 * memory that isn't available. We have to be careful when making
1691 	 * that memory available as other threads may succeed in accessing
1692 	 * that address the moment after we've made it available.
1693 	 *
1694 	 * That means that we can't just map the memory and populate the
1695 	 * page, instead we use the aliased mapping to populate the page
1696 	 * and once everything is ready we map it.
1697 	 */
1698 	exceptions = pager_lock(ai);
1699 
1700 	stat_handle_fault();
1701 
1702 	/* check if the access is valid */
1703 	if (abort_is_user_exception(ai)) {
1704 		area = find_uta_area(ai->va);
1705 		clean_user_cache = true;
1706 	} else {
1707 		area = find_area(&tee_pager_area_head, ai->va);
1708 		if (!area) {
1709 			area = find_uta_area(ai->va);
1710 			clean_user_cache = true;
1711 		}
1712 	}
1713 	if (!area || !area->pgt) {
1714 		ret = false;
1715 		goto out;
1716 	}
1717 
1718 	if (tee_pager_unhide_page(area, page_va))
1719 		goto out_success;
1720 
1721 	/*
1722 	 * The page wasn't hidden, but some other core may have
1723 	 * updated the table entry before we got here or we need
1724 	 * to make a read-only page read-write (dirty).
1725 	 */
1726 	if (pager_update_permissions(area, ai, &ret)) {
1727 		/*
1728 		 * Nothing more to do with the abort. The problem
1729 		 * could already have been dealt with from another
1730 		 * core or if ret is false the TA will be paniced.
1731 		 */
1732 		goto out;
1733 	}
1734 
1735 	pager_get_page(area, ai, clean_user_cache);
1736 
1737 out_success:
1738 	tee_pager_hide_pages();
1739 	ret = true;
1740 out:
1741 	pager_unlock(exceptions);
1742 	return ret;
1743 }
1744 
1745 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1746 {
1747 	size_t n = 0;
1748 
1749 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1750 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1751 
1752 	/* setup memory */
1753 	for (n = 0; n < npages; n++) {
1754 		struct core_mmu_table_info *ti = NULL;
1755 		struct tee_pager_pmem *pmem = NULL;
1756 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1757 		struct tblidx tblidx = { };
1758 		unsigned int pgidx = 0;
1759 		paddr_t pa = 0;
1760 		uint32_t attr = 0;
1761 
1762 		ti = find_table_info(va);
1763 		pgidx = core_mmu_va2idx(ti, va);
1764 		/*
1765 		 * Note that we can only support adding pages in the
1766 		 * valid range of this table info, currently not a problem.
1767 		 */
1768 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1769 
1770 		/* Ignore unmapped pages/blocks */
1771 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1772 			continue;
1773 
1774 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1775 		if (!pmem)
1776 			panic("out of mem");
1777 		pmem_clear(pmem);
1778 
1779 		pmem->va_alias = pager_add_alias_page(pa);
1780 
1781 		if (unmap) {
1782 			core_mmu_set_entry(ti, pgidx, 0, 0);
1783 			pgt_dec_used_entries(find_core_pgt(va));
1784 		} else {
1785 			struct tee_pager_area *area = NULL;
1786 
1787 			/*
1788 			 * The page is still mapped, let's assign the area
1789 			 * and update the protection bits accordingly.
1790 			 */
1791 			area = find_area(&tee_pager_area_head, va);
1792 			assert(area);
1793 			pmem_assign_fobj_page(pmem, area, va);
1794 			tblidx = pmem_get_area_tblidx(pmem, area);
1795 			assert(tblidx.pgt == find_core_pgt(va));
1796 			assert(pa == get_pmem_pa(pmem));
1797 			tblidx_set_entry(tblidx, pa,
1798 					 get_area_mattr(area->flags));
1799 		}
1800 
1801 		if (unmap && IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV) &&
1802 		    !pager_spare_pmem) {
1803 			pager_spare_pmem = pmem;
1804 		} else {
1805 			tee_pager_npages++;
1806 			incr_npages_all();
1807 			set_npages();
1808 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1809 		}
1810 	}
1811 
1812 	/*
1813 	 * As this is done at inits, invalidate all TLBs once instead of
1814 	 * targeting only the modified entries.
1815 	 */
1816 	tlbi_all();
1817 }
1818 
1819 #ifdef CFG_PAGED_USER_TA
1820 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1821 {
1822 	struct pgt *p = pgt;
1823 
1824 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1825 		p = SLIST_NEXT(p, link);
1826 	return p;
1827 }
1828 
1829 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1830 {
1831 	struct tee_pager_area *area = NULL;
1832 	struct pgt *pgt = NULL;
1833 
1834 	if (!uctx->areas)
1835 		return;
1836 
1837 	pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1838 	TAILQ_FOREACH(area, uctx->areas, link) {
1839 		if (!area->pgt)
1840 			area->pgt = find_pgt(pgt, area->base);
1841 		else
1842 			assert(area->pgt == find_pgt(pgt, area->base));
1843 		if (!area->pgt)
1844 			panic();
1845 	}
1846 }
1847 
1848 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1849 {
1850 	struct tee_pager_pmem *pmem = NULL;
1851 	struct tee_pager_area *area = NULL;
1852 	struct tee_pager_area_head *areas = NULL;
1853 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1854 
1855 	if (!pgt->num_used_entries)
1856 		goto out;
1857 
1858 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1859 		if (pmem->fobj)
1860 			pmem_unmap(pmem, pgt);
1861 	}
1862 	assert(!pgt->num_used_entries);
1863 
1864 out:
1865 	areas = to_user_mode_ctx(pgt->ctx)->areas;
1866 	if (areas) {
1867 		TAILQ_FOREACH(area, areas, link) {
1868 			if (area->pgt == pgt)
1869 				area->pgt = NULL;
1870 		}
1871 	}
1872 
1873 	pager_unlock(exceptions);
1874 }
1875 DECLARE_KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1876 #endif /*CFG_PAGED_USER_TA*/
1877 
1878 void tee_pager_release_phys(void *addr, size_t size)
1879 {
1880 	bool unmaped = false;
1881 	vaddr_t va = (vaddr_t)addr;
1882 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1883 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1884 	struct tee_pager_area *area;
1885 	uint32_t exceptions;
1886 
1887 	if (end <= begin)
1888 		return;
1889 
1890 	exceptions = pager_lock_check_stack(128);
1891 
1892 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1893 		area = find_area(&tee_pager_area_head, va);
1894 		if (!area)
1895 			panic();
1896 		unmaped |= tee_pager_release_one_phys(area, va);
1897 	}
1898 
1899 	if (unmaped)
1900 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1901 
1902 	pager_unlock(exceptions);
1903 }
1904 DECLARE_KEEP_PAGER(tee_pager_release_phys);
1905 
1906 void *tee_pager_alloc(size_t size)
1907 {
1908 	tee_mm_entry_t *mm = NULL;
1909 	uint8_t *smem = NULL;
1910 	size_t num_pages = 0;
1911 	struct fobj *fobj = NULL;
1912 
1913 	if (!size)
1914 		return NULL;
1915 
1916 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1917 	if (!mm)
1918 		return NULL;
1919 
1920 	smem = (uint8_t *)tee_mm_get_smem(mm);
1921 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1922 	fobj = fobj_locked_paged_alloc(num_pages);
1923 	if (!fobj) {
1924 		tee_mm_free(mm);
1925 		return NULL;
1926 	}
1927 
1928 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1929 	fobj_put(fobj);
1930 
1931 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1932 
1933 	return smem;
1934 }
1935 
1936 vaddr_t tee_pager_init_iv_area(struct fobj *fobj)
1937 {
1938 	tee_mm_entry_t *mm = NULL;
1939 	uint8_t *smem = NULL;
1940 
1941 	assert(!pager_iv_area);
1942 
1943 	mm = tee_mm_alloc(&tee_mm_vcore, fobj->num_pages * SMALL_PAGE_SIZE);
1944 	if (!mm)
1945 		panic();
1946 
1947 	smem = (uint8_t *)tee_mm_get_smem(mm);
1948 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_RW, fobj);
1949 	fobj_put(fobj);
1950 
1951 	asan_tag_access(smem, smem + fobj->num_pages * SMALL_PAGE_SIZE);
1952 
1953 	pager_iv_area = find_area(&tee_pager_area_head, (vaddr_t)smem);
1954 	assert(pager_iv_area && pager_iv_area->fobj == fobj);
1955 
1956 	return (vaddr_t)smem;
1957 }
1958