xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 71e2b567d71ff5ea090d369bcc2b9296d129b5c7)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <kernel/tee_ta_manager.h>
17 #include <kernel/thread.h>
18 #include <kernel/tlb_helpers.h>
19 #include <mm/core_memprot.h>
20 #include <mm/fobj.h>
21 #include <mm/tee_mm.h>
22 #include <mm/tee_pager.h>
23 #include <stdlib.h>
24 #include <sys/queue.h>
25 #include <tee_api_defines.h>
26 #include <trace.h>
27 #include <types_ext.h>
28 #include <utee_defines.h>
29 #include <util.h>
30 
31 struct tee_pager_area {
32 	struct fobj *fobj;
33 	size_t fobj_pgidx;
34 	enum tee_pager_area_type type;
35 	uint32_t flags;
36 	vaddr_t base;
37 	size_t size;
38 	struct pgt *pgt;
39 	TAILQ_ENTRY(tee_pager_area) link;
40 };
41 
42 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
43 
44 static struct tee_pager_area_head tee_pager_area_head =
45 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
46 
47 #define INVALID_PGIDX	UINT_MAX
48 
49 /*
50  * struct tee_pager_pmem - Represents a physical page used for paging.
51  *
52  * @pgidx	an index of the entry in area->ti.
53  * @va_alias	Virtual address where the physical page always is aliased.
54  *		Used during remapping of the page when the content need to
55  *		be updated before it's available at the new location.
56  * @area	a pointer to the pager area
57  */
58 struct tee_pager_pmem {
59 	unsigned pgidx;
60 	void *va_alias;
61 	struct tee_pager_area *area;
62 	TAILQ_ENTRY(tee_pager_pmem) link;
63 };
64 
65 /* The list of physical pages. The first page in the list is the oldest */
66 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
67 
68 static struct tee_pager_pmem_head tee_pager_pmem_head =
69 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
70 
71 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
72 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
73 
74 /* number of pages hidden */
75 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
76 
77 /* Number of registered physical pages, used hiding pages. */
78 static size_t tee_pager_npages;
79 
80 #ifdef CFG_WITH_STATS
81 static struct tee_pager_stats pager_stats;
82 
83 static inline void incr_ro_hits(void)
84 {
85 	pager_stats.ro_hits++;
86 }
87 
88 static inline void incr_rw_hits(void)
89 {
90 	pager_stats.rw_hits++;
91 }
92 
93 static inline void incr_hidden_hits(void)
94 {
95 	pager_stats.hidden_hits++;
96 }
97 
98 static inline void incr_zi_released(void)
99 {
100 	pager_stats.zi_released++;
101 }
102 
103 static inline void incr_npages_all(void)
104 {
105 	pager_stats.npages_all++;
106 }
107 
108 static inline void set_npages(void)
109 {
110 	pager_stats.npages = tee_pager_npages;
111 }
112 
113 void tee_pager_get_stats(struct tee_pager_stats *stats)
114 {
115 	*stats = pager_stats;
116 
117 	pager_stats.hidden_hits = 0;
118 	pager_stats.ro_hits = 0;
119 	pager_stats.rw_hits = 0;
120 	pager_stats.zi_released = 0;
121 }
122 
123 #else /* CFG_WITH_STATS */
124 static inline void incr_ro_hits(void) { }
125 static inline void incr_rw_hits(void) { }
126 static inline void incr_hidden_hits(void) { }
127 static inline void incr_zi_released(void) { }
128 static inline void incr_npages_all(void) { }
129 static inline void set_npages(void) { }
130 
131 void tee_pager_get_stats(struct tee_pager_stats *stats)
132 {
133 	memset(stats, 0, sizeof(struct tee_pager_stats));
134 }
135 #endif /* CFG_WITH_STATS */
136 
137 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
138 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
139 #define TBL_SHIFT	SMALL_PAGE_SHIFT
140 
141 #define EFFECTIVE_VA_SIZE \
142 	(ROUNDUP(TEE_RAM_VA_START + TEE_RAM_VA_SIZE, \
143 		 CORE_MMU_PGDIR_SIZE) - \
144 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
145 
146 static struct pager_table {
147 	struct pgt pgt;
148 	struct core_mmu_table_info tbl_info;
149 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
150 
151 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
152 
153 /* Defines the range of the alias area */
154 static tee_mm_entry_t *pager_alias_area;
155 /*
156  * Physical pages are added in a stack like fashion to the alias area,
157  * @pager_alias_next_free gives the address of next free entry if
158  * @pager_alias_next_free is != 0
159  */
160 static uintptr_t pager_alias_next_free;
161 
162 #ifdef CFG_TEE_CORE_DEBUG
163 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
164 
165 static uint32_t pager_lock_dldetect(const char *func, const int line,
166 				    struct abort_info *ai)
167 {
168 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
169 	unsigned int retries = 0;
170 	unsigned int reminder = 0;
171 
172 	while (!cpu_spin_trylock(&pager_spinlock)) {
173 		retries++;
174 		if (!retries) {
175 			/* wrapped, time to report */
176 			trace_printf(func, line, TRACE_ERROR, true,
177 				     "possible spinlock deadlock reminder %u",
178 				     reminder);
179 			if (reminder < UINT_MAX)
180 				reminder++;
181 			if (ai)
182 				abort_print(ai);
183 		}
184 	}
185 
186 	return exceptions;
187 }
188 #else
189 static uint32_t pager_lock(struct abort_info __unused *ai)
190 {
191 	return cpu_spin_lock_xsave(&pager_spinlock);
192 }
193 #endif
194 
195 static uint32_t pager_lock_check_stack(size_t stack_size)
196 {
197 	if (stack_size) {
198 		int8_t buf[stack_size];
199 		size_t n;
200 
201 		/*
202 		 * Make sure to touch all pages of the stack that we expect
203 		 * to use with this lock held. We need to take eventual
204 		 * page faults before the lock is taken or we'll deadlock
205 		 * the pager. The pages that are populated in this way will
206 		 * eventually be released at certain save transitions of
207 		 * the thread.
208 		 */
209 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
210 			io_write8((vaddr_t)buf + n, 1);
211 		io_write8((vaddr_t)buf + stack_size - 1, 1);
212 	}
213 
214 	return pager_lock(NULL);
215 }
216 
217 static void pager_unlock(uint32_t exceptions)
218 {
219 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
220 }
221 
222 void *tee_pager_phys_to_virt(paddr_t pa)
223 {
224 	struct core_mmu_table_info ti;
225 	unsigned idx;
226 	uint32_t a;
227 	paddr_t p;
228 	vaddr_t v;
229 	size_t n;
230 
231 	/*
232 	 * Most addresses are mapped lineary, try that first if possible.
233 	 */
234 	if (!tee_pager_get_table_info(pa, &ti))
235 		return NULL; /* impossible pa */
236 	idx = core_mmu_va2idx(&ti, pa);
237 	core_mmu_get_entry(&ti, idx, &p, &a);
238 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
239 		return (void *)core_mmu_idx2va(&ti, idx);
240 
241 	n = 0;
242 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
243 	while (true) {
244 		while (idx < TBL_NUM_ENTRIES) {
245 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
246 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
247 				return NULL;
248 
249 			core_mmu_get_entry(&pager_tables[n].tbl_info,
250 					   idx, &p, &a);
251 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
252 				return (void *)v;
253 			idx++;
254 		}
255 
256 		n++;
257 		if (n >= ARRAY_SIZE(pager_tables))
258 			return NULL;
259 		idx = 0;
260 	}
261 
262 	return NULL;
263 }
264 
265 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
266 {
267 	size_t n;
268 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
269 
270 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
271 	    CORE_MMU_PGDIR_SHIFT;
272 	if (n >= ARRAY_SIZE(pager_tables))
273 		return NULL;
274 
275 	assert(va >= pager_tables[n].tbl_info.va_base &&
276 	       va <= (pager_tables[n].tbl_info.va_base | mask));
277 
278 	return pager_tables + n;
279 }
280 
281 static struct pager_table *find_pager_table(vaddr_t va)
282 {
283 	struct pager_table *pt = find_pager_table_may_fail(va);
284 
285 	assert(pt);
286 	return pt;
287 }
288 
289 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
290 {
291 	struct pager_table *pt = find_pager_table_may_fail(va);
292 
293 	if (!pt)
294 		return false;
295 
296 	*ti = pt->tbl_info;
297 	return true;
298 }
299 
300 static struct core_mmu_table_info *find_table_info(vaddr_t va)
301 {
302 	return &find_pager_table(va)->tbl_info;
303 }
304 
305 static struct pgt *find_core_pgt(vaddr_t va)
306 {
307 	return &find_pager_table(va)->pgt;
308 }
309 
310 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
311 {
312 	struct pager_table *pt;
313 	unsigned idx;
314 	vaddr_t smem = tee_mm_get_smem(mm);
315 	size_t nbytes = tee_mm_get_bytes(mm);
316 	vaddr_t v;
317 
318 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
319 
320 	assert(!pager_alias_area);
321 	pager_alias_area = mm;
322 	pager_alias_next_free = smem;
323 
324 	/* Clear all mapping in the alias area */
325 	pt = find_pager_table(smem);
326 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
327 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
328 		while (idx < TBL_NUM_ENTRIES) {
329 			v = core_mmu_idx2va(&pt->tbl_info, idx);
330 			if (v >= (smem + nbytes))
331 				goto out;
332 
333 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
334 			idx++;
335 		}
336 
337 		pt++;
338 		idx = 0;
339 	}
340 
341 out:
342 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
343 }
344 
345 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
346 {
347 	size_t n;
348 	paddr_t pa;
349 	size_t usage = 0;
350 
351 	for (n = 0; n < ti->num_entries; n++) {
352 		core_mmu_get_entry(ti, n, &pa, NULL);
353 		if (pa)
354 			usage++;
355 	}
356 	return usage;
357 }
358 
359 static void area_get_entry(struct tee_pager_area *area, size_t idx,
360 			   paddr_t *pa, uint32_t *attr)
361 {
362 	assert(area->pgt);
363 	assert(idx < TBL_NUM_ENTRIES);
364 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
365 }
366 
367 static void area_set_entry(struct tee_pager_area *area, size_t idx,
368 			   paddr_t pa, uint32_t attr)
369 {
370 	assert(area->pgt);
371 	assert(idx < TBL_NUM_ENTRIES);
372 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
373 }
374 
375 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
376 {
377 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
378 }
379 
380 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
381 {
382 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
383 }
384 
385 void tee_pager_early_init(void)
386 {
387 	size_t n;
388 
389 	/*
390 	 * Note that this depends on add_pager_vaspace() adding vaspace
391 	 * after end of memory.
392 	 */
393 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
394 		if (!core_mmu_find_table(NULL, TEE_RAM_VA_START +
395 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
396 					 &pager_tables[n].tbl_info))
397 			panic("can't find mmu tables");
398 
399 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
400 			panic("Unsupported page size in translation table");
401 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
402 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
403 
404 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
405 		pgt_set_used_entries(&pager_tables[n].pgt,
406 				tbl_usage_count(&pager_tables[n].tbl_info));
407 	}
408 }
409 
410 static void *pager_add_alias_page(paddr_t pa)
411 {
412 	unsigned idx;
413 	struct core_mmu_table_info *ti;
414 	/* Alias pages mapped without write permission: runtime will care */
415 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
416 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
417 			TEE_MATTR_SECURE | TEE_MATTR_PR;
418 
419 	DMSG("0x%" PRIxPA, pa);
420 
421 	ti = find_table_info(pager_alias_next_free);
422 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
423 	core_mmu_set_entry(ti, idx, pa, attr);
424 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
425 	pager_alias_next_free += SMALL_PAGE_SIZE;
426 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
427 				      tee_mm_get_bytes(pager_alias_area)))
428 		pager_alias_next_free = 0;
429 	return (void *)core_mmu_idx2va(ti, idx);
430 }
431 
432 static void area_insert_tail(struct tee_pager_area *area)
433 {
434 	uint32_t exceptions = pager_lock_check_stack(8);
435 
436 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
437 
438 	pager_unlock(exceptions);
439 }
440 KEEP_PAGER(area_insert_tail);
441 
442 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
443 			     struct fobj *fobj)
444 {
445 	struct tee_pager_area *area = NULL;
446 	uint32_t flags = 0;
447 	size_t fobj_pgidx = 0;
448 	vaddr_t b = base;
449 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
450 	size_t s2 = 0;
451 
452 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
453 
454 	if (base & SMALL_PAGE_MASK || !s) {
455 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
456 		panic();
457 	}
458 
459 	switch (type) {
460 	case PAGER_AREA_TYPE_RO:
461 		flags = TEE_MATTR_PRX;
462 		break;
463 	case PAGER_AREA_TYPE_RW:
464 		flags = TEE_MATTR_PRW;
465 		break;
466 	case PAGER_AREA_TYPE_LOCK:
467 		flags = TEE_MATTR_PRW | TEE_MATTR_LOCKED;
468 		break;
469 	default:
470 		panic();
471 	}
472 
473 	if (!fobj)
474 		panic();
475 
476 	while (s) {
477 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
478 		area = calloc(1, sizeof(*area));
479 		if (!area)
480 			panic("alloc_area");
481 
482 		area->fobj = fobj_get(fobj);
483 		area->fobj_pgidx = fobj_pgidx;
484 		area->type = type;
485 		area->pgt = find_core_pgt(b);
486 		area->base = b;
487 		area->size = s2;
488 		area->flags = flags;
489 		area_insert_tail(area);
490 
491 		b += s2;
492 		s -= s2;
493 		fobj_pgidx += s2 / SMALL_PAGE_SIZE;
494 	}
495 }
496 
497 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
498 					vaddr_t va)
499 {
500 	struct tee_pager_area *area;
501 
502 	if (!areas)
503 		return NULL;
504 
505 	TAILQ_FOREACH(area, areas, link) {
506 		if (core_is_buffer_inside(va, 1, area->base, area->size))
507 			return area;
508 	}
509 	return NULL;
510 }
511 
512 #ifdef CFG_PAGED_USER_TA
513 static struct tee_pager_area *find_uta_area(vaddr_t va)
514 {
515 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
516 
517 	if (!is_user_ta_ctx(ctx))
518 		return NULL;
519 	return find_area(to_user_ta_ctx(ctx)->areas, va);
520 }
521 #else
522 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
523 {
524 	return NULL;
525 }
526 #endif /*CFG_PAGED_USER_TA*/
527 
528 
529 static uint32_t get_area_mattr(uint32_t area_flags)
530 {
531 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
532 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
533 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
534 
535 	return attr;
536 }
537 
538 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
539 {
540 	struct core_mmu_table_info *ti;
541 	paddr_t pa;
542 	unsigned idx;
543 
544 	ti = find_table_info((vaddr_t)pmem->va_alias);
545 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
546 	core_mmu_get_entry(ti, idx, &pa, NULL);
547 	return pa;
548 }
549 
550 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
551 			void *va_alias)
552 {
553 	size_t fobj_pgidx = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
554 			    area->fobj_pgidx;
555 	struct core_mmu_table_info *ti;
556 	uint32_t attr_alias;
557 	paddr_t pa_alias;
558 	unsigned int idx_alias;
559 
560 	/* Insure we are allowed to write to aliased virtual page */
561 	ti = find_table_info((vaddr_t)va_alias);
562 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
563 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
564 	if (!(attr_alias & TEE_MATTR_PW)) {
565 		attr_alias |= TEE_MATTR_PW;
566 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
567 		tlbi_mva_allasid((vaddr_t)va_alias);
568 	}
569 
570 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
571 	if (fobj_load_page(area->fobj, fobj_pgidx, va_alias)) {
572 		EMSG("PH 0x%" PRIxVA " failed", page_va);
573 		panic();
574 	}
575 	switch (area->type) {
576 	case PAGER_AREA_TYPE_RO:
577 		incr_ro_hits();
578 		/* Forbid write to aliases for read-only (maybe exec) pages */
579 		attr_alias &= ~TEE_MATTR_PW;
580 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
581 		tlbi_mva_allasid((vaddr_t)va_alias);
582 		break;
583 	case PAGER_AREA_TYPE_RW:
584 		incr_rw_hits();
585 		break;
586 	case PAGER_AREA_TYPE_LOCK:
587 		break;
588 	default:
589 		panic();
590 	}
591 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
592 }
593 
594 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
595 {
596 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
597 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
598 
599 	if (pmem->area->type == PAGER_AREA_TYPE_RW && (attr & dirty_bits)) {
600 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
601 		size_t fobj_pgidx = (pmem->pgidx - (offs >> SMALL_PAGE_SHIFT)) +
602 				    pmem->area->fobj_pgidx;
603 
604 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
605 		asan_tag_access(pmem->va_alias,
606 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
607 		if (fobj_save_page(pmem->area->fobj, fobj_pgidx,
608 				   pmem->va_alias))
609 			panic("fobj_save_page");
610 		asan_tag_no_access(pmem->va_alias,
611 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
612 	}
613 }
614 
615 #ifdef CFG_PAGED_USER_TA
616 static void free_area(struct tee_pager_area *area)
617 {
618 	fobj_put(area->fobj);
619 	free(area);
620 }
621 
622 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
623 			       size_t size)
624 {
625 	struct tee_pager_area *area = NULL;
626 	vaddr_t b = base;
627 	struct fobj *fobj = NULL;
628 	size_t fobj_pgidx = 0;
629 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
630 
631 	if (!utc->areas) {
632 		utc->areas = malloc(sizeof(*utc->areas));
633 		if (!utc->areas)
634 			return false;
635 		TAILQ_INIT(utc->areas);
636 	}
637 
638 	fobj = fobj_rw_paged_alloc(s / SMALL_PAGE_SIZE);
639 	if (!fobj)
640 		return false;
641 
642 	while (s) {
643 		size_t s2;
644 
645 		if (find_area(utc->areas, b)) {
646 			fobj_put(fobj);
647 			return false;
648 		}
649 
650 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
651 		area = calloc(1, sizeof(*area));
652 		if (!area) {
653 			fobj_put(fobj);
654 			return false;
655 		}
656 
657 		if (b != base)
658 			fobj_get(fobj);
659 
660 		/* Table info will be set when the context is activated. */
661 
662 		area->fobj = fobj;
663 		area->fobj_pgidx = fobj_pgidx;
664 		area->type = PAGER_AREA_TYPE_RW;
665 		area->base = b;
666 		area->size = s2;
667 		area->flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
668 
669 		TAILQ_INSERT_TAIL(utc->areas, area, link);
670 		b += s2;
671 		s -= s2;
672 		fobj_pgidx += s2 / SMALL_PAGE_SIZE;
673 	}
674 
675 	return true;
676 }
677 
678 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
679 {
680 	struct thread_specific_data *tsd = thread_get_tsd();
681 	struct tee_pager_area *area;
682 	struct core_mmu_table_info dir_info = { NULL };
683 
684 	if (&utc->ctx != tsd->ctx) {
685 		/*
686 		 * Changes are to an utc that isn't active. Just add the
687 		 * areas page tables will be dealt with later.
688 		 */
689 		return pager_add_uta_area(utc, base, size);
690 	}
691 
692 	/*
693 	 * Assign page tables before adding areas to be able to tell which
694 	 * are newly added and should be removed in case of failure.
695 	 */
696 	tee_pager_assign_uta_tables(utc);
697 	if (!pager_add_uta_area(utc, base, size)) {
698 		struct tee_pager_area *next_a;
699 
700 		/* Remove all added areas */
701 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
702 			if (!area->pgt) {
703 				TAILQ_REMOVE(utc->areas, area, link);
704 				free_area(area);
705 			}
706 		}
707 		return false;
708 	}
709 
710 	/*
711 	 * Assign page tables to the new areas and make sure that the page
712 	 * tables are registered in the upper table.
713 	 */
714 	tee_pager_assign_uta_tables(utc);
715 	core_mmu_get_user_pgdir(&dir_info);
716 	TAILQ_FOREACH(area, utc->areas, link) {
717 		paddr_t pa;
718 		size_t idx;
719 		uint32_t attr;
720 
721 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
722 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
723 
724 		/*
725 		 * Check if the page table already is used, if it is, it's
726 		 * already registered.
727 		 */
728 		if (area->pgt->num_used_entries) {
729 			assert(attr & TEE_MATTR_TABLE);
730 			assert(pa == virt_to_phys(area->pgt->tbl));
731 			continue;
732 		}
733 
734 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
735 		pa = virt_to_phys(area->pgt->tbl);
736 		assert(pa);
737 		/*
738 		 * Note that the update of the table entry is guaranteed to
739 		 * be atomic.
740 		 */
741 		core_mmu_set_entry(&dir_info, idx, pa, attr);
742 	}
743 
744 	return true;
745 }
746 
747 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
748 				   struct pgt *pgt)
749 {
750 	assert(pgt);
751 	ti->table = pgt->tbl;
752 	ti->va_base = pgt->vabase;
753 	ti->level = TBL_LEVEL;
754 	ti->shift = TBL_SHIFT;
755 	ti->num_entries = TBL_NUM_ENTRIES;
756 }
757 
758 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
759 			   vaddr_t new_base)
760 {
761 	uint32_t exceptions = pager_lock_check_stack(64);
762 
763 	/*
764 	 * If there's no pgt assigned to the old area there's no pages to
765 	 * deal with either, just update with a new pgt and base.
766 	 */
767 	if (area->pgt) {
768 		struct core_mmu_table_info old_ti;
769 		struct core_mmu_table_info new_ti;
770 		struct tee_pager_pmem *pmem;
771 
772 		init_tbl_info_from_pgt(&old_ti, area->pgt);
773 		init_tbl_info_from_pgt(&new_ti, new_pgt);
774 
775 
776 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
777 			vaddr_t va;
778 			paddr_t pa;
779 			uint32_t attr;
780 
781 			if (pmem->area != area)
782 				continue;
783 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
784 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
785 
786 			assert(pa == get_pmem_pa(pmem));
787 			assert(attr);
788 			assert(area->pgt->num_used_entries);
789 			area->pgt->num_used_entries--;
790 
791 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
792 			va = va - area->base + new_base;
793 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
794 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
795 			new_pgt->num_used_entries++;
796 		}
797 	}
798 
799 	area->pgt = new_pgt;
800 	area->base = new_base;
801 	pager_unlock(exceptions);
802 }
803 KEEP_PAGER(transpose_area);
804 
805 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
806 				   vaddr_t src_base,
807 				   struct user_ta_ctx *dst_utc,
808 				   vaddr_t dst_base, struct pgt **dst_pgt,
809 				   size_t size)
810 {
811 	struct tee_pager_area *area;
812 	struct tee_pager_area *next_a;
813 
814 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
815 		vaddr_t new_area_base;
816 		size_t new_idx;
817 
818 		if (!core_is_buffer_inside(area->base, area->size,
819 					  src_base, size))
820 			continue;
821 
822 		TAILQ_REMOVE(src_utc->areas, area, link);
823 
824 		new_area_base = dst_base + (src_base - area->base);
825 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
826 			  CORE_MMU_PGDIR_SIZE;
827 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
828 		       dst_pgt[new_idx]->vabase);
829 		transpose_area(area, dst_pgt[new_idx], new_area_base);
830 
831 		/*
832 		 * Assert that this will not cause any conflicts in the new
833 		 * utc.  This should already be guaranteed, but a bug here
834 		 * could be tricky to find.
835 		 */
836 		assert(!find_area(dst_utc->areas, area->base));
837 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
838 	}
839 }
840 
841 static void rem_area(struct tee_pager_area_head *area_head,
842 		     struct tee_pager_area *area)
843 {
844 	struct tee_pager_pmem *pmem;
845 	uint32_t exceptions;
846 
847 	exceptions = pager_lock_check_stack(64);
848 
849 	TAILQ_REMOVE(area_head, area, link);
850 
851 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
852 		if (pmem->area == area) {
853 			area_set_entry(area, pmem->pgidx, 0, 0);
854 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
855 			pgt_dec_used_entries(area->pgt);
856 			pmem->area = NULL;
857 			pmem->pgidx = INVALID_PGIDX;
858 		}
859 	}
860 
861 	pager_unlock(exceptions);
862 	free_area(area);
863 }
864 KEEP_PAGER(rem_area);
865 
866 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
867 			      size_t size)
868 {
869 	struct tee_pager_area *area;
870 	struct tee_pager_area *next_a;
871 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
872 
873 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
874 		if (core_is_buffer_inside(area->base, area->size, base, s))
875 			rem_area(utc->areas, area);
876 	}
877 }
878 
879 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
880 {
881 	struct tee_pager_area *area;
882 
883 	if (!utc->areas)
884 		return;
885 
886 	while (true) {
887 		area = TAILQ_FIRST(utc->areas);
888 		if (!area)
889 			break;
890 		TAILQ_REMOVE(utc->areas, area, link);
891 		free_area(area);
892 	}
893 
894 	free(utc->areas);
895 }
896 
897 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
898 				 size_t size, uint32_t flags)
899 {
900 	bool ret;
901 	vaddr_t b = base;
902 	size_t s = size;
903 	size_t s2;
904 	struct tee_pager_area *area = find_area(utc->areas, b);
905 	uint32_t exceptions;
906 	struct tee_pager_pmem *pmem;
907 	paddr_t pa;
908 	uint32_t a;
909 	uint32_t f;
910 
911 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
912 	if (f & TEE_MATTR_UW)
913 		f |= TEE_MATTR_PW;
914 	f = get_area_mattr(f);
915 
916 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
917 
918 	while (s) {
919 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
920 		if (!area || area->base != b || area->size != s2) {
921 			ret = false;
922 			goto out;
923 		}
924 		b += s2;
925 		s -= s2;
926 
927 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
928 			if (pmem->area != area)
929 				continue;
930 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
931 			if (a & TEE_MATTR_VALID_BLOCK)
932 				assert(pa == get_pmem_pa(pmem));
933 			else
934 				pa = get_pmem_pa(pmem);
935 			if (a == f)
936 				continue;
937 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
938 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
939 			if (!(flags & TEE_MATTR_UW))
940 				tee_pager_save_page(pmem, a);
941 
942 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
943 			/*
944 			 * Make sure the table update is visible before
945 			 * continuing.
946 			 */
947 			dsb_ishst();
948 
949 			if (flags & TEE_MATTR_UX) {
950 				void *va = (void *)area_idx2va(pmem->area,
951 							       pmem->pgidx);
952 
953 				cache_op_inner(DCACHE_AREA_CLEAN, va,
954 						SMALL_PAGE_SIZE);
955 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
956 						SMALL_PAGE_SIZE);
957 			}
958 		}
959 
960 		area->flags = f;
961 		area = TAILQ_NEXT(area, link);
962 	}
963 
964 	ret = true;
965 out:
966 	pager_unlock(exceptions);
967 	return ret;
968 }
969 KEEP_PAGER(tee_pager_set_uta_area_attr);
970 #endif /*CFG_PAGED_USER_TA*/
971 
972 static bool tee_pager_unhide_page(vaddr_t page_va)
973 {
974 	struct tee_pager_pmem *pmem;
975 
976 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
977 		paddr_t pa;
978 		uint32_t attr;
979 
980 		if (pmem->pgidx == INVALID_PGIDX)
981 			continue;
982 
983 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
984 
985 		if (!(attr &
986 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
987 			continue;
988 
989 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
990 			uint32_t a = get_area_mattr(pmem->area->flags);
991 
992 			/* page is hidden, show and move to back */
993 			if (pa != get_pmem_pa(pmem))
994 				panic("unexpected pa");
995 
996 			/*
997 			 * If it's not a dirty block, then it should be
998 			 * read only.
999 			 */
1000 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
1001 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1002 			else
1003 				FMSG("Unhide %#" PRIxVA, page_va);
1004 
1005 			if (page_va == 0x8000a000)
1006 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
1007 					page_va, a);
1008 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
1009 			/*
1010 			 * Note that TLB invalidation isn't needed since
1011 			 * there wasn't a valid mapping before. We should
1012 			 * use a barrier though, to make sure that the
1013 			 * change is visible.
1014 			 */
1015 			dsb_ishst();
1016 
1017 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1018 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1019 			incr_hidden_hits();
1020 			return true;
1021 		}
1022 	}
1023 
1024 	return false;
1025 }
1026 
1027 static void tee_pager_hide_pages(void)
1028 {
1029 	struct tee_pager_pmem *pmem;
1030 	size_t n = 0;
1031 
1032 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1033 		paddr_t pa;
1034 		uint32_t attr;
1035 		uint32_t a;
1036 
1037 		if (n >= TEE_PAGER_NHIDE)
1038 			break;
1039 		n++;
1040 
1041 		/* we cannot hide pages when pmem->area is not defined. */
1042 		if (!pmem->area)
1043 			continue;
1044 
1045 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1046 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1047 			continue;
1048 
1049 		assert(pa == get_pmem_pa(pmem));
1050 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1051 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1052 			FMSG("Hide %#" PRIxVA,
1053 			     area_idx2va(pmem->area, pmem->pgidx));
1054 		} else
1055 			a = TEE_MATTR_HIDDEN_BLOCK;
1056 
1057 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1058 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1059 	}
1060 }
1061 
1062 /*
1063  * Find mapped pmem, hide and move to pageble pmem.
1064  * Return false if page was not mapped, and true if page was mapped.
1065  */
1066 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1067 				       vaddr_t page_va)
1068 {
1069 	struct tee_pager_pmem *pmem;
1070 	unsigned pgidx;
1071 	paddr_t pa;
1072 	uint32_t attr;
1073 
1074 	pgidx = area_va2idx(area, page_va);
1075 	area_get_entry(area, pgidx, &pa, &attr);
1076 
1077 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1078 
1079 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1080 		if (pmem->area != area || pmem->pgidx != pgidx)
1081 			continue;
1082 
1083 		assert(pa == get_pmem_pa(pmem));
1084 		area_set_entry(area, pgidx, 0, 0);
1085 		pgt_dec_used_entries(area->pgt);
1086 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1087 		pmem->area = NULL;
1088 		pmem->pgidx = INVALID_PGIDX;
1089 		tee_pager_npages++;
1090 		set_npages();
1091 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1092 		incr_zi_released();
1093 		return true;
1094 	}
1095 
1096 	return false;
1097 }
1098 
1099 /* Finds the oldest page and unmats it from its old virtual address */
1100 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1101 {
1102 	struct tee_pager_pmem *pmem;
1103 
1104 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1105 	if (!pmem) {
1106 		EMSG("No pmem entries");
1107 		return NULL;
1108 	}
1109 	if (pmem->pgidx != INVALID_PGIDX) {
1110 		uint32_t a;
1111 
1112 		assert(pmem->area && pmem->area->pgt);
1113 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1114 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1115 		pgt_dec_used_entries(pmem->area->pgt);
1116 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1117 		tee_pager_save_page(pmem, a);
1118 	}
1119 
1120 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1121 	pmem->pgidx = INVALID_PGIDX;
1122 	pmem->area = NULL;
1123 	if (area->type == PAGER_AREA_TYPE_LOCK) {
1124 		/* Move page to lock list */
1125 		if (tee_pager_npages <= 0)
1126 			panic("running out of page");
1127 		tee_pager_npages--;
1128 		set_npages();
1129 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1130 	} else {
1131 		/* move page to back */
1132 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1133 	}
1134 
1135 	return pmem;
1136 }
1137 
1138 static bool pager_update_permissions(struct tee_pager_area *area,
1139 			struct abort_info *ai, bool *handled)
1140 {
1141 	unsigned int pgidx = area_va2idx(area, ai->va);
1142 	uint32_t attr;
1143 	paddr_t pa;
1144 
1145 	*handled = false;
1146 
1147 	area_get_entry(area, pgidx, &pa, &attr);
1148 
1149 	/* Not mapped */
1150 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1151 		return false;
1152 
1153 	/* Not readable, should not happen */
1154 	if (abort_is_user_exception(ai)) {
1155 		if (!(attr & TEE_MATTR_UR))
1156 			return true;
1157 	} else {
1158 		if (!(attr & TEE_MATTR_PR)) {
1159 			abort_print_error(ai);
1160 			panic();
1161 		}
1162 	}
1163 
1164 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1165 	case CORE_MMU_FAULT_TRANSLATION:
1166 	case CORE_MMU_FAULT_READ_PERMISSION:
1167 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1168 			/* Check attempting to execute from an NOX page */
1169 			if (abort_is_user_exception(ai)) {
1170 				if (!(attr & TEE_MATTR_UX))
1171 					return true;
1172 			} else {
1173 				if (!(attr & TEE_MATTR_PX)) {
1174 					abort_print_error(ai);
1175 					panic();
1176 				}
1177 			}
1178 		}
1179 		/* Since the page is mapped now it's OK */
1180 		break;
1181 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1182 		/* Check attempting to write to an RO page */
1183 		if (abort_is_user_exception(ai)) {
1184 			if (!(area->flags & TEE_MATTR_UW))
1185 				return true;
1186 			if (!(attr & TEE_MATTR_UW)) {
1187 				FMSG("Dirty %p",
1188 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1189 				area_set_entry(area, pgidx, pa,
1190 					       get_area_mattr(area->flags));
1191 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1192 			}
1193 
1194 		} else {
1195 			if (!(area->flags & TEE_MATTR_PW)) {
1196 				abort_print_error(ai);
1197 				panic();
1198 			}
1199 			if (!(attr & TEE_MATTR_PW)) {
1200 				FMSG("Dirty %p",
1201 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1202 				area_set_entry(area, pgidx, pa,
1203 					       get_area_mattr(area->flags));
1204 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1205 			}
1206 		}
1207 		/* Since permissions has been updated now it's OK */
1208 		break;
1209 	default:
1210 		/* Some fault we can't deal with */
1211 		if (abort_is_user_exception(ai))
1212 			return true;
1213 		abort_print_error(ai);
1214 		panic();
1215 	}
1216 	*handled = true;
1217 	return true;
1218 }
1219 
1220 #ifdef CFG_TEE_CORE_DEBUG
1221 static void stat_handle_fault(void)
1222 {
1223 	static size_t num_faults;
1224 	static size_t min_npages = SIZE_MAX;
1225 	static size_t total_min_npages = SIZE_MAX;
1226 
1227 	num_faults++;
1228 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1229 		DMSG("nfaults %zu npages %zu (min %zu)",
1230 		     num_faults, tee_pager_npages, min_npages);
1231 		min_npages = tee_pager_npages; /* reset */
1232 	}
1233 	if (tee_pager_npages < min_npages)
1234 		min_npages = tee_pager_npages;
1235 	if (tee_pager_npages < total_min_npages)
1236 		total_min_npages = tee_pager_npages;
1237 }
1238 #else
1239 static void stat_handle_fault(void)
1240 {
1241 }
1242 #endif
1243 
1244 bool tee_pager_handle_fault(struct abort_info *ai)
1245 {
1246 	struct tee_pager_area *area;
1247 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1248 	uint32_t exceptions;
1249 	bool ret;
1250 
1251 #ifdef TEE_PAGER_DEBUG_PRINT
1252 	abort_print(ai);
1253 #endif
1254 
1255 	/*
1256 	 * We're updating pages that can affect several active CPUs at a
1257 	 * time below. We end up here because a thread tries to access some
1258 	 * memory that isn't available. We have to be careful when making
1259 	 * that memory available as other threads may succeed in accessing
1260 	 * that address the moment after we've made it available.
1261 	 *
1262 	 * That means that we can't just map the memory and populate the
1263 	 * page, instead we use the aliased mapping to populate the page
1264 	 * and once everything is ready we map it.
1265 	 */
1266 	exceptions = pager_lock(ai);
1267 
1268 	stat_handle_fault();
1269 
1270 	/* check if the access is valid */
1271 	if (abort_is_user_exception(ai)) {
1272 		area = find_uta_area(ai->va);
1273 
1274 	} else {
1275 		area = find_area(&tee_pager_area_head, ai->va);
1276 		if (!area)
1277 			area = find_uta_area(ai->va);
1278 	}
1279 	if (!area || !area->pgt) {
1280 		ret = false;
1281 		goto out;
1282 	}
1283 
1284 	if (!tee_pager_unhide_page(page_va)) {
1285 		struct tee_pager_pmem *pmem = NULL;
1286 		uint32_t attr;
1287 		paddr_t pa;
1288 
1289 		/*
1290 		 * The page wasn't hidden, but some other core may have
1291 		 * updated the table entry before we got here or we need
1292 		 * to make a read-only page read-write (dirty).
1293 		 */
1294 		if (pager_update_permissions(area, ai, &ret)) {
1295 			/*
1296 			 * Nothing more to do with the abort. The problem
1297 			 * could already have been dealt with from another
1298 			 * core or if ret is false the TA will be paniced.
1299 			 */
1300 			goto out;
1301 		}
1302 
1303 		pmem = tee_pager_get_page(area);
1304 		if (!pmem) {
1305 			abort_print(ai);
1306 			panic();
1307 		}
1308 
1309 		/* load page code & data */
1310 		tee_pager_load_page(area, page_va, pmem->va_alias);
1311 
1312 
1313 		pmem->area = area;
1314 		pmem->pgidx = area_va2idx(area, ai->va);
1315 		attr = get_area_mattr(area->flags) &
1316 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1317 		pa = get_pmem_pa(pmem);
1318 
1319 		/*
1320 		 * We've updated the page using the aliased mapping and
1321 		 * some cache maintenence is now needed if it's an
1322 		 * executable page.
1323 		 *
1324 		 * Since the d-cache is a Physically-indexed,
1325 		 * physically-tagged (PIPT) cache we can clean either the
1326 		 * aliased address or the real virtual address. In this
1327 		 * case we choose the real virtual address.
1328 		 *
1329 		 * The i-cache can also be PIPT, but may be something else
1330 		 * too like VIPT. The current code requires the caches to
1331 		 * implement the IVIPT extension, that is:
1332 		 * "instruction cache maintenance is required only after
1333 		 * writing new data to a physical address that holds an
1334 		 * instruction."
1335 		 *
1336 		 * To portably invalidate the icache the page has to
1337 		 * be mapped at the final virtual address but not
1338 		 * executable.
1339 		 */
1340 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1341 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1342 					TEE_MATTR_PW | TEE_MATTR_UW;
1343 
1344 			/* Set a temporary read-only mapping */
1345 			area_set_entry(pmem->area, pmem->pgidx, pa,
1346 				       attr & ~mask);
1347 			tlbi_mva_allasid(page_va);
1348 
1349 			/*
1350 			 * Doing these operations to LoUIS (Level of
1351 			 * unification, Inner Shareable) would be enough
1352 			 */
1353 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1354 				       SMALL_PAGE_SIZE);
1355 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1356 				       SMALL_PAGE_SIZE);
1357 
1358 			/* Set the final mapping */
1359 			area_set_entry(area, pmem->pgidx, pa, attr);
1360 			tlbi_mva_allasid(page_va);
1361 		} else {
1362 			area_set_entry(area, pmem->pgidx, pa, attr);
1363 			/*
1364 			 * No need to flush TLB for this entry, it was
1365 			 * invalid. We should use a barrier though, to make
1366 			 * sure that the change is visible.
1367 			 */
1368 			dsb_ishst();
1369 		}
1370 		pgt_inc_used_entries(area->pgt);
1371 
1372 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1373 
1374 	}
1375 
1376 	tee_pager_hide_pages();
1377 	ret = true;
1378 out:
1379 	pager_unlock(exceptions);
1380 	return ret;
1381 }
1382 
1383 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1384 {
1385 	size_t n;
1386 
1387 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1388 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1389 
1390 	/* setup memory */
1391 	for (n = 0; n < npages; n++) {
1392 		struct core_mmu_table_info *ti;
1393 		struct tee_pager_pmem *pmem;
1394 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1395 		unsigned int pgidx;
1396 		paddr_t pa;
1397 		uint32_t attr;
1398 
1399 		ti = find_table_info(va);
1400 		pgidx = core_mmu_va2idx(ti, va);
1401 		/*
1402 		 * Note that we can only support adding pages in the
1403 		 * valid range of this table info, currently not a problem.
1404 		 */
1405 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1406 
1407 		/* Ignore unmapped pages/blocks */
1408 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1409 			continue;
1410 
1411 		pmem = malloc(sizeof(struct tee_pager_pmem));
1412 		if (!pmem)
1413 			panic("out of mem");
1414 
1415 		pmem->va_alias = pager_add_alias_page(pa);
1416 
1417 		if (unmap) {
1418 			pmem->area = NULL;
1419 			pmem->pgidx = INVALID_PGIDX;
1420 			core_mmu_set_entry(ti, pgidx, 0, 0);
1421 			pgt_dec_used_entries(find_core_pgt(va));
1422 		} else {
1423 			/*
1424 			 * The page is still mapped, let's assign the area
1425 			 * and update the protection bits accordingly.
1426 			 */
1427 			pmem->area = find_area(&tee_pager_area_head, va);
1428 			assert(pmem->area->pgt == find_core_pgt(va));
1429 			pmem->pgidx = pgidx;
1430 			assert(pa == get_pmem_pa(pmem));
1431 			area_set_entry(pmem->area, pgidx, pa,
1432 				       get_area_mattr(pmem->area->flags));
1433 		}
1434 
1435 		tee_pager_npages++;
1436 		incr_npages_all();
1437 		set_npages();
1438 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1439 	}
1440 
1441 	/*
1442 	 * As this is done at inits, invalidate all TLBs once instead of
1443 	 * targeting only the modified entries.
1444 	 */
1445 	tlbi_all();
1446 }
1447 
1448 #ifdef CFG_PAGED_USER_TA
1449 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1450 {
1451 	struct pgt *p = pgt;
1452 
1453 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1454 		p = SLIST_NEXT(p, link);
1455 	return p;
1456 }
1457 
1458 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1459 {
1460 	struct tee_pager_area *area;
1461 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1462 
1463 	TAILQ_FOREACH(area, utc->areas, link) {
1464 		if (!area->pgt)
1465 			area->pgt = find_pgt(pgt, area->base);
1466 		else
1467 			assert(area->pgt == find_pgt(pgt, area->base));
1468 		if (!area->pgt)
1469 			panic();
1470 	}
1471 }
1472 
1473 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1474 {
1475 	uint32_t attr;
1476 
1477 	assert(pmem->area && pmem->area->pgt);
1478 
1479 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1480 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1481 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1482 	tee_pager_save_page(pmem, attr);
1483 	assert(pmem->area->pgt->num_used_entries);
1484 	pmem->area->pgt->num_used_entries--;
1485 	pmem->pgidx = INVALID_PGIDX;
1486 	pmem->area = NULL;
1487 }
1488 
1489 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1490 {
1491 	struct tee_pager_pmem *pmem;
1492 	struct tee_pager_area *area;
1493 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1494 
1495 	if (!pgt->num_used_entries)
1496 		goto out;
1497 
1498 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1499 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1500 			continue;
1501 		if (pmem->area->pgt == pgt)
1502 			pager_save_and_release_entry(pmem);
1503 	}
1504 	assert(!pgt->num_used_entries);
1505 
1506 out:
1507 	if (is_user_ta_ctx(pgt->ctx)) {
1508 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1509 			if (area->pgt == pgt)
1510 				area->pgt = NULL;
1511 		}
1512 	}
1513 
1514 	pager_unlock(exceptions);
1515 }
1516 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1517 #endif /*CFG_PAGED_USER_TA*/
1518 
1519 void tee_pager_release_phys(void *addr, size_t size)
1520 {
1521 	bool unmaped = false;
1522 	vaddr_t va = (vaddr_t)addr;
1523 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1524 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1525 	struct tee_pager_area *area;
1526 	uint32_t exceptions;
1527 
1528 	if (end <= begin)
1529 		return;
1530 
1531 	exceptions = pager_lock_check_stack(128);
1532 
1533 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1534 		area = find_area(&tee_pager_area_head, va);
1535 		if (!area)
1536 			panic();
1537 		unmaped |= tee_pager_release_one_phys(area, va);
1538 	}
1539 
1540 	if (unmaped)
1541 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1542 
1543 	pager_unlock(exceptions);
1544 }
1545 KEEP_PAGER(tee_pager_release_phys);
1546 
1547 void *tee_pager_alloc(size_t size)
1548 {
1549 	tee_mm_entry_t *mm = NULL;
1550 	uint8_t *smem = NULL;
1551 	size_t num_pages = 0;
1552 	struct fobj *fobj = NULL;
1553 
1554 	if (!size)
1555 		return NULL;
1556 
1557 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1558 	if (!mm)
1559 		return NULL;
1560 
1561 	smem = (uint8_t *)tee_mm_get_smem(mm);
1562 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1563 	fobj = fobj_locked_paged_alloc(num_pages);
1564 	if (!fobj) {
1565 		tee_mm_free(mm);
1566 		return NULL;
1567 	}
1568 
1569 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1570 	fobj_put(fobj);
1571 
1572 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1573 
1574 	return smem;
1575 }
1576