xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 53a68c384e7f0b46aa9182259afe45aea4a4748b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <kernel/tee_ta_manager.h>
17 #include <kernel/thread.h>
18 #include <kernel/tlb_helpers.h>
19 #include <mm/core_memprot.h>
20 #include <mm/fobj.h>
21 #include <mm/tee_mm.h>
22 #include <mm/tee_pager.h>
23 #include <stdlib.h>
24 #include <sys/queue.h>
25 #include <tee_api_defines.h>
26 #include <trace.h>
27 #include <types_ext.h>
28 #include <utee_defines.h>
29 #include <util.h>
30 
31 struct tee_pager_area {
32 	struct fobj *fobj;
33 	size_t fobj_pgidx;
34 	enum tee_pager_area_type type;
35 	uint32_t flags;
36 	vaddr_t base;
37 	size_t size;
38 	struct pgt *pgt;
39 	TAILQ_ENTRY(tee_pager_area) link;
40 };
41 
42 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
43 
44 static struct tee_pager_area_head tee_pager_area_head =
45 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
46 
47 #define INVALID_PGIDX		UINT_MAX
48 #define PMEM_FLAG_DIRTY		BIT(0)
49 #define PMEM_FLAG_HIDDEN	BIT(1)
50 
51 /*
52  * struct tee_pager_pmem - Represents a physical page used for paging.
53  *
54  * @flags	flags defined by PMEM_FLAG_* above
55  * @pgidx	an index of the entry in area->ti.
56  * @va_alias	Virtual address where the physical page always is aliased.
57  *		Used during remapping of the page when the content need to
58  *		be updated before it's available at the new location.
59  * @area	a pointer to the pager area
60  */
61 struct tee_pager_pmem {
62 	unsigned int flags;
63 	unsigned int pgidx;
64 	void *va_alias;
65 	struct tee_pager_area *area;
66 	TAILQ_ENTRY(tee_pager_pmem) link;
67 };
68 
69 /* The list of physical pages. The first page in the list is the oldest */
70 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
71 
72 static struct tee_pager_pmem_head tee_pager_pmem_head =
73 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
74 
75 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
76 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
77 
78 /* number of pages hidden */
79 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
80 
81 /* Number of registered physical pages, used hiding pages. */
82 static size_t tee_pager_npages;
83 
84 #ifdef CFG_WITH_STATS
85 static struct tee_pager_stats pager_stats;
86 
87 static inline void incr_ro_hits(void)
88 {
89 	pager_stats.ro_hits++;
90 }
91 
92 static inline void incr_rw_hits(void)
93 {
94 	pager_stats.rw_hits++;
95 }
96 
97 static inline void incr_hidden_hits(void)
98 {
99 	pager_stats.hidden_hits++;
100 }
101 
102 static inline void incr_zi_released(void)
103 {
104 	pager_stats.zi_released++;
105 }
106 
107 static inline void incr_npages_all(void)
108 {
109 	pager_stats.npages_all++;
110 }
111 
112 static inline void set_npages(void)
113 {
114 	pager_stats.npages = tee_pager_npages;
115 }
116 
117 void tee_pager_get_stats(struct tee_pager_stats *stats)
118 {
119 	*stats = pager_stats;
120 
121 	pager_stats.hidden_hits = 0;
122 	pager_stats.ro_hits = 0;
123 	pager_stats.rw_hits = 0;
124 	pager_stats.zi_released = 0;
125 }
126 
127 #else /* CFG_WITH_STATS */
128 static inline void incr_ro_hits(void) { }
129 static inline void incr_rw_hits(void) { }
130 static inline void incr_hidden_hits(void) { }
131 static inline void incr_zi_released(void) { }
132 static inline void incr_npages_all(void) { }
133 static inline void set_npages(void) { }
134 
135 void tee_pager_get_stats(struct tee_pager_stats *stats)
136 {
137 	memset(stats, 0, sizeof(struct tee_pager_stats));
138 }
139 #endif /* CFG_WITH_STATS */
140 
141 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
142 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
143 #define TBL_SHIFT	SMALL_PAGE_SHIFT
144 
145 #define EFFECTIVE_VA_SIZE \
146 	(ROUNDUP(TEE_RAM_VA_START + TEE_RAM_VA_SIZE, \
147 		 CORE_MMU_PGDIR_SIZE) - \
148 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
149 
150 static struct pager_table {
151 	struct pgt pgt;
152 	struct core_mmu_table_info tbl_info;
153 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
154 
155 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
156 
157 /* Defines the range of the alias area */
158 static tee_mm_entry_t *pager_alias_area;
159 /*
160  * Physical pages are added in a stack like fashion to the alias area,
161  * @pager_alias_next_free gives the address of next free entry if
162  * @pager_alias_next_free is != 0
163  */
164 static uintptr_t pager_alias_next_free;
165 
166 #ifdef CFG_TEE_CORE_DEBUG
167 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
168 
169 static uint32_t pager_lock_dldetect(const char *func, const int line,
170 				    struct abort_info *ai)
171 {
172 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
173 	unsigned int retries = 0;
174 	unsigned int reminder = 0;
175 
176 	while (!cpu_spin_trylock(&pager_spinlock)) {
177 		retries++;
178 		if (!retries) {
179 			/* wrapped, time to report */
180 			trace_printf(func, line, TRACE_ERROR, true,
181 				     "possible spinlock deadlock reminder %u",
182 				     reminder);
183 			if (reminder < UINT_MAX)
184 				reminder++;
185 			if (ai)
186 				abort_print(ai);
187 		}
188 	}
189 
190 	return exceptions;
191 }
192 #else
193 static uint32_t pager_lock(struct abort_info __unused *ai)
194 {
195 	return cpu_spin_lock_xsave(&pager_spinlock);
196 }
197 #endif
198 
199 static uint32_t pager_lock_check_stack(size_t stack_size)
200 {
201 	if (stack_size) {
202 		int8_t buf[stack_size];
203 		size_t n;
204 
205 		/*
206 		 * Make sure to touch all pages of the stack that we expect
207 		 * to use with this lock held. We need to take eventual
208 		 * page faults before the lock is taken or we'll deadlock
209 		 * the pager. The pages that are populated in this way will
210 		 * eventually be released at certain save transitions of
211 		 * the thread.
212 		 */
213 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
214 			io_write8((vaddr_t)buf + n, 1);
215 		io_write8((vaddr_t)buf + stack_size - 1, 1);
216 	}
217 
218 	return pager_lock(NULL);
219 }
220 
221 static void pager_unlock(uint32_t exceptions)
222 {
223 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
224 }
225 
226 void *tee_pager_phys_to_virt(paddr_t pa)
227 {
228 	struct core_mmu_table_info ti;
229 	unsigned idx;
230 	uint32_t a;
231 	paddr_t p;
232 	vaddr_t v;
233 	size_t n;
234 
235 	/*
236 	 * Most addresses are mapped lineary, try that first if possible.
237 	 */
238 	if (!tee_pager_get_table_info(pa, &ti))
239 		return NULL; /* impossible pa */
240 	idx = core_mmu_va2idx(&ti, pa);
241 	core_mmu_get_entry(&ti, idx, &p, &a);
242 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
243 		return (void *)core_mmu_idx2va(&ti, idx);
244 
245 	n = 0;
246 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
247 	while (true) {
248 		while (idx < TBL_NUM_ENTRIES) {
249 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
250 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
251 				return NULL;
252 
253 			core_mmu_get_entry(&pager_tables[n].tbl_info,
254 					   idx, &p, &a);
255 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
256 				return (void *)v;
257 			idx++;
258 		}
259 
260 		n++;
261 		if (n >= ARRAY_SIZE(pager_tables))
262 			return NULL;
263 		idx = 0;
264 	}
265 
266 	return NULL;
267 }
268 
269 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
270 {
271 	return pmem->flags & PMEM_FLAG_HIDDEN;
272 }
273 
274 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
275 {
276 	return pmem->flags & PMEM_FLAG_DIRTY;
277 }
278 
279 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
280 {
281 	size_t n;
282 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
283 
284 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
285 	    CORE_MMU_PGDIR_SHIFT;
286 	if (n >= ARRAY_SIZE(pager_tables))
287 		return NULL;
288 
289 	assert(va >= pager_tables[n].tbl_info.va_base &&
290 	       va <= (pager_tables[n].tbl_info.va_base | mask));
291 
292 	return pager_tables + n;
293 }
294 
295 static struct pager_table *find_pager_table(vaddr_t va)
296 {
297 	struct pager_table *pt = find_pager_table_may_fail(va);
298 
299 	assert(pt);
300 	return pt;
301 }
302 
303 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
304 {
305 	struct pager_table *pt = find_pager_table_may_fail(va);
306 
307 	if (!pt)
308 		return false;
309 
310 	*ti = pt->tbl_info;
311 	return true;
312 }
313 
314 static struct core_mmu_table_info *find_table_info(vaddr_t va)
315 {
316 	return &find_pager_table(va)->tbl_info;
317 }
318 
319 static struct pgt *find_core_pgt(vaddr_t va)
320 {
321 	return &find_pager_table(va)->pgt;
322 }
323 
324 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
325 {
326 	struct pager_table *pt;
327 	unsigned idx;
328 	vaddr_t smem = tee_mm_get_smem(mm);
329 	size_t nbytes = tee_mm_get_bytes(mm);
330 	vaddr_t v;
331 
332 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
333 
334 	assert(!pager_alias_area);
335 	pager_alias_area = mm;
336 	pager_alias_next_free = smem;
337 
338 	/* Clear all mapping in the alias area */
339 	pt = find_pager_table(smem);
340 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
341 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
342 		while (idx < TBL_NUM_ENTRIES) {
343 			v = core_mmu_idx2va(&pt->tbl_info, idx);
344 			if (v >= (smem + nbytes))
345 				goto out;
346 
347 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
348 			idx++;
349 		}
350 
351 		pt++;
352 		idx = 0;
353 	}
354 
355 out:
356 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
357 }
358 
359 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
360 {
361 	size_t n;
362 	paddr_t pa;
363 	size_t usage = 0;
364 
365 	for (n = 0; n < ti->num_entries; n++) {
366 		core_mmu_get_entry(ti, n, &pa, NULL);
367 		if (pa)
368 			usage++;
369 	}
370 	return usage;
371 }
372 
373 static void area_get_entry(struct tee_pager_area *area, size_t idx,
374 			   paddr_t *pa, uint32_t *attr)
375 {
376 	assert(area->pgt);
377 	assert(idx < TBL_NUM_ENTRIES);
378 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
379 }
380 
381 static void area_set_entry(struct tee_pager_area *area, size_t idx,
382 			   paddr_t pa, uint32_t attr)
383 {
384 	assert(area->pgt);
385 	assert(idx < TBL_NUM_ENTRIES);
386 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
387 }
388 
389 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
390 {
391 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
392 }
393 
394 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
395 {
396 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
397 }
398 
399 void tee_pager_early_init(void)
400 {
401 	size_t n;
402 
403 	/*
404 	 * Note that this depends on add_pager_vaspace() adding vaspace
405 	 * after end of memory.
406 	 */
407 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
408 		if (!core_mmu_find_table(NULL, TEE_RAM_VA_START +
409 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
410 					 &pager_tables[n].tbl_info))
411 			panic("can't find mmu tables");
412 
413 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
414 			panic("Unsupported page size in translation table");
415 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
416 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
417 
418 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
419 		pgt_set_used_entries(&pager_tables[n].pgt,
420 				tbl_usage_count(&pager_tables[n].tbl_info));
421 	}
422 }
423 
424 static void *pager_add_alias_page(paddr_t pa)
425 {
426 	unsigned idx;
427 	struct core_mmu_table_info *ti;
428 	/* Alias pages mapped without write permission: runtime will care */
429 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
430 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
431 			TEE_MATTR_SECURE | TEE_MATTR_PR;
432 
433 	DMSG("0x%" PRIxPA, pa);
434 
435 	ti = find_table_info(pager_alias_next_free);
436 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
437 	core_mmu_set_entry(ti, idx, pa, attr);
438 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
439 	pager_alias_next_free += SMALL_PAGE_SIZE;
440 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
441 				      tee_mm_get_bytes(pager_alias_area)))
442 		pager_alias_next_free = 0;
443 	return (void *)core_mmu_idx2va(ti, idx);
444 }
445 
446 static void area_insert_tail(struct tee_pager_area *area)
447 {
448 	uint32_t exceptions = pager_lock_check_stack(8);
449 
450 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
451 
452 	pager_unlock(exceptions);
453 }
454 KEEP_PAGER(area_insert_tail);
455 
456 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
457 			     struct fobj *fobj)
458 {
459 	struct tee_pager_area *area = NULL;
460 	uint32_t flags = 0;
461 	size_t fobj_pgidx = 0;
462 	vaddr_t b = base;
463 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
464 	size_t s2 = 0;
465 
466 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
467 
468 	if (base & SMALL_PAGE_MASK || !s) {
469 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
470 		panic();
471 	}
472 
473 	switch (type) {
474 	case PAGER_AREA_TYPE_RO:
475 		flags = TEE_MATTR_PRX;
476 		break;
477 	case PAGER_AREA_TYPE_RW:
478 		flags = TEE_MATTR_PRW;
479 		break;
480 	case PAGER_AREA_TYPE_LOCK:
481 		flags = TEE_MATTR_PRW | TEE_MATTR_LOCKED;
482 		break;
483 	default:
484 		panic();
485 	}
486 
487 	if (!fobj)
488 		panic();
489 
490 	while (s) {
491 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
492 		area = calloc(1, sizeof(*area));
493 		if (!area)
494 			panic("alloc_area");
495 
496 		area->fobj = fobj_get(fobj);
497 		area->fobj_pgidx = fobj_pgidx;
498 		area->type = type;
499 		area->pgt = find_core_pgt(b);
500 		area->base = b;
501 		area->size = s2;
502 		area->flags = flags;
503 		area_insert_tail(area);
504 
505 		b += s2;
506 		s -= s2;
507 		fobj_pgidx += s2 / SMALL_PAGE_SIZE;
508 	}
509 }
510 
511 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
512 					vaddr_t va)
513 {
514 	struct tee_pager_area *area;
515 
516 	if (!areas)
517 		return NULL;
518 
519 	TAILQ_FOREACH(area, areas, link) {
520 		if (core_is_buffer_inside(va, 1, area->base, area->size))
521 			return area;
522 	}
523 	return NULL;
524 }
525 
526 #ifdef CFG_PAGED_USER_TA
527 static struct tee_pager_area *find_uta_area(vaddr_t va)
528 {
529 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
530 
531 	if (!is_user_ta_ctx(ctx))
532 		return NULL;
533 	return find_area(to_user_ta_ctx(ctx)->areas, va);
534 }
535 #else
536 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
537 {
538 	return NULL;
539 }
540 #endif /*CFG_PAGED_USER_TA*/
541 
542 
543 static uint32_t get_area_mattr(uint32_t area_flags)
544 {
545 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
546 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
547 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
548 
549 	return attr;
550 }
551 
552 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
553 {
554 	struct core_mmu_table_info *ti;
555 	paddr_t pa;
556 	unsigned idx;
557 
558 	ti = find_table_info((vaddr_t)pmem->va_alias);
559 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
560 	core_mmu_get_entry(ti, idx, &pa, NULL);
561 	return pa;
562 }
563 
564 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
565 			void *va_alias)
566 {
567 	size_t fobj_pgidx = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
568 			    area->fobj_pgidx;
569 	struct core_mmu_table_info *ti;
570 	uint32_t attr_alias;
571 	paddr_t pa_alias;
572 	unsigned int idx_alias;
573 
574 	/* Insure we are allowed to write to aliased virtual page */
575 	ti = find_table_info((vaddr_t)va_alias);
576 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
577 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
578 	if (!(attr_alias & TEE_MATTR_PW)) {
579 		attr_alias |= TEE_MATTR_PW;
580 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
581 		tlbi_mva_allasid((vaddr_t)va_alias);
582 	}
583 
584 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
585 	if (fobj_load_page(area->fobj, fobj_pgidx, va_alias)) {
586 		EMSG("PH 0x%" PRIxVA " failed", page_va);
587 		panic();
588 	}
589 	switch (area->type) {
590 	case PAGER_AREA_TYPE_RO:
591 		incr_ro_hits();
592 		/* Forbid write to aliases for read-only (maybe exec) pages */
593 		attr_alias &= ~TEE_MATTR_PW;
594 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
595 		tlbi_mva_allasid((vaddr_t)va_alias);
596 		break;
597 	case PAGER_AREA_TYPE_RW:
598 		incr_rw_hits();
599 		break;
600 	case PAGER_AREA_TYPE_LOCK:
601 		break;
602 	default:
603 		panic();
604 	}
605 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
606 }
607 
608 static void tee_pager_save_page(struct tee_pager_pmem *pmem)
609 {
610 	if (pmem_is_dirty(pmem)) {
611 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
612 		size_t fobj_pgidx = (pmem->pgidx - (offs >> SMALL_PAGE_SHIFT)) +
613 				    pmem->area->fobj_pgidx;
614 
615 		asan_tag_access(pmem->va_alias,
616 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
617 		if (fobj_save_page(pmem->area->fobj, fobj_pgidx,
618 				   pmem->va_alias))
619 			panic("fobj_save_page");
620 		asan_tag_no_access(pmem->va_alias,
621 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
622 	}
623 }
624 
625 #ifdef CFG_PAGED_USER_TA
626 static void free_area(struct tee_pager_area *area)
627 {
628 	fobj_put(area->fobj);
629 	free(area);
630 }
631 
632 static TEE_Result pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
633 				     struct fobj *fobj)
634 {
635 	struct tee_pager_area *area;
636 	vaddr_t b = base;
637 	size_t fobj_pgidx = 0;
638 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
639 
640 	if (!utc->areas) {
641 		utc->areas = malloc(sizeof(*utc->areas));
642 		if (!utc->areas)
643 			return TEE_ERROR_OUT_OF_MEMORY;
644 		TAILQ_INIT(utc->areas);
645 	}
646 
647 	while (s) {
648 		size_t s2;
649 
650 		if (find_area(utc->areas, b))
651 			return TEE_ERROR_BAD_PARAMETERS;
652 
653 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
654 		area = calloc(1, sizeof(*area));
655 		if (!area)
656 			return TEE_ERROR_OUT_OF_MEMORY;
657 
658 		/* Table info will be set when the context is activated. */
659 		area->fobj = fobj_get(fobj);
660 		area->fobj_pgidx = fobj_pgidx;
661 		area->type = PAGER_AREA_TYPE_RW;
662 		area->base = b;
663 		area->size = s2;
664 		area->flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
665 
666 		TAILQ_INSERT_TAIL(utc->areas, area, link);
667 		b += s2;
668 		s -= s2;
669 		fobj_pgidx += s2 / SMALL_PAGE_SIZE;
670 	}
671 
672 	return TEE_SUCCESS;
673 }
674 
675 TEE_Result tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
676 			    struct fobj *fobj)
677 {
678 	TEE_Result res = TEE_SUCCESS;
679 	struct thread_specific_data *tsd = thread_get_tsd();
680 	struct tee_pager_area *area = NULL;
681 	struct core_mmu_table_info dir_info = { NULL };
682 
683 	if (&utc->ctx != tsd->ctx) {
684 		/*
685 		 * Changes are to an utc that isn't active. Just add the
686 		 * areas page tables will be dealt with later.
687 		 */
688 		return pager_add_uta_area(utc, base, fobj);
689 	}
690 
691 	/*
692 	 * Assign page tables before adding areas to be able to tell which
693 	 * are newly added and should be removed in case of failure.
694 	 */
695 	tee_pager_assign_uta_tables(utc);
696 	res = pager_add_uta_area(utc, base, fobj);
697 	if (res) {
698 		struct tee_pager_area *next_a;
699 
700 		/* Remove all added areas */
701 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
702 			if (!area->pgt) {
703 				TAILQ_REMOVE(utc->areas, area, link);
704 				free_area(area);
705 			}
706 		}
707 		return res;
708 	}
709 
710 	/*
711 	 * Assign page tables to the new areas and make sure that the page
712 	 * tables are registered in the upper table.
713 	 */
714 	tee_pager_assign_uta_tables(utc);
715 	core_mmu_get_user_pgdir(&dir_info);
716 	TAILQ_FOREACH(area, utc->areas, link) {
717 		paddr_t pa;
718 		size_t idx;
719 		uint32_t attr;
720 
721 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
722 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
723 
724 		/*
725 		 * Check if the page table already is used, if it is, it's
726 		 * already registered.
727 		 */
728 		if (area->pgt->num_used_entries) {
729 			assert(attr & TEE_MATTR_TABLE);
730 			assert(pa == virt_to_phys(area->pgt->tbl));
731 			continue;
732 		}
733 
734 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
735 		pa = virt_to_phys(area->pgt->tbl);
736 		assert(pa);
737 		/*
738 		 * Note that the update of the table entry is guaranteed to
739 		 * be atomic.
740 		 */
741 		core_mmu_set_entry(&dir_info, idx, pa, attr);
742 	}
743 
744 	return TEE_SUCCESS;
745 }
746 
747 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
748 				   struct pgt *pgt)
749 {
750 	assert(pgt);
751 	ti->table = pgt->tbl;
752 	ti->va_base = pgt->vabase;
753 	ti->level = TBL_LEVEL;
754 	ti->shift = TBL_SHIFT;
755 	ti->num_entries = TBL_NUM_ENTRIES;
756 }
757 
758 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
759 			   vaddr_t new_base)
760 {
761 	uint32_t exceptions = pager_lock_check_stack(64);
762 
763 	/*
764 	 * If there's no pgt assigned to the old area there's no pages to
765 	 * deal with either, just update with a new pgt and base.
766 	 */
767 	if (area->pgt) {
768 		struct core_mmu_table_info old_ti;
769 		struct core_mmu_table_info new_ti;
770 		struct tee_pager_pmem *pmem;
771 
772 		init_tbl_info_from_pgt(&old_ti, area->pgt);
773 		init_tbl_info_from_pgt(&new_ti, new_pgt);
774 
775 
776 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
777 			vaddr_t va;
778 			paddr_t pa;
779 			uint32_t attr;
780 
781 			if (pmem->area != area)
782 				continue;
783 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
784 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
785 
786 			assert(pa == get_pmem_pa(pmem));
787 			assert(attr);
788 			assert(area->pgt->num_used_entries);
789 			area->pgt->num_used_entries--;
790 
791 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
792 			va = va - area->base + new_base;
793 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
794 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
795 			new_pgt->num_used_entries++;
796 		}
797 	}
798 
799 	area->pgt = new_pgt;
800 	area->base = new_base;
801 	pager_unlock(exceptions);
802 }
803 KEEP_PAGER(transpose_area);
804 
805 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
806 				   vaddr_t src_base,
807 				   struct user_ta_ctx *dst_utc,
808 				   vaddr_t dst_base, struct pgt **dst_pgt,
809 				   size_t size)
810 {
811 	struct tee_pager_area *area;
812 	struct tee_pager_area *next_a;
813 
814 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
815 		vaddr_t new_area_base;
816 		size_t new_idx;
817 
818 		if (!core_is_buffer_inside(area->base, area->size,
819 					  src_base, size))
820 			continue;
821 
822 		TAILQ_REMOVE(src_utc->areas, area, link);
823 
824 		new_area_base = dst_base + (src_base - area->base);
825 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
826 			  CORE_MMU_PGDIR_SIZE;
827 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
828 		       dst_pgt[new_idx]->vabase);
829 		transpose_area(area, dst_pgt[new_idx], new_area_base);
830 
831 		/*
832 		 * Assert that this will not cause any conflicts in the new
833 		 * utc.  This should already be guaranteed, but a bug here
834 		 * could be tricky to find.
835 		 */
836 		assert(!find_area(dst_utc->areas, area->base));
837 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
838 	}
839 }
840 
841 static void rem_area(struct tee_pager_area_head *area_head,
842 		     struct tee_pager_area *area)
843 {
844 	struct tee_pager_pmem *pmem;
845 	uint32_t exceptions;
846 
847 	exceptions = pager_lock_check_stack(64);
848 
849 	TAILQ_REMOVE(area_head, area, link);
850 
851 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
852 		if (pmem->area == area) {
853 			area_set_entry(area, pmem->pgidx, 0, 0);
854 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
855 			pgt_dec_used_entries(area->pgt);
856 			pmem->area = NULL;
857 			pmem->pgidx = INVALID_PGIDX;
858 		}
859 	}
860 
861 	pager_unlock(exceptions);
862 	free_area(area);
863 }
864 KEEP_PAGER(rem_area);
865 
866 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
867 			      size_t size)
868 {
869 	struct tee_pager_area *area;
870 	struct tee_pager_area *next_a;
871 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
872 
873 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
874 		if (core_is_buffer_inside(area->base, area->size, base, s))
875 			rem_area(utc->areas, area);
876 	}
877 }
878 
879 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
880 {
881 	struct tee_pager_area *area;
882 
883 	if (!utc->areas)
884 		return;
885 
886 	while (true) {
887 		area = TAILQ_FIRST(utc->areas);
888 		if (!area)
889 			break;
890 		TAILQ_REMOVE(utc->areas, area, link);
891 		free_area(area);
892 	}
893 
894 	free(utc->areas);
895 }
896 
897 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
898 				 size_t size, uint32_t flags)
899 {
900 	bool ret = false;
901 	vaddr_t b = base;
902 	size_t s = size;
903 	size_t s2 = 0;
904 	struct tee_pager_area *area = find_area(utc->areas, b);
905 	uint32_t exceptions = 0;
906 	struct tee_pager_pmem *pmem = NULL;
907 	paddr_t pa = 0;
908 	uint32_t a = 0;
909 	uint32_t f = 0;
910 	uint32_t f2 = 0;
911 
912 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
913 	if (f & TEE_MATTR_UW)
914 		f |= TEE_MATTR_PW;
915 	f = get_area_mattr(f);
916 
917 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
918 
919 	while (s) {
920 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
921 		if (!area || area->base != b || area->size != s2) {
922 			ret = false;
923 			goto out;
924 		}
925 		b += s2;
926 		s -= s2;
927 
928 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
929 			if (pmem->area != area)
930 				continue;
931 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
932 			assert(pa == get_pmem_pa(pmem));
933 			if (a == f)
934 				continue;
935 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
936 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
937 
938 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
939 			if (pmem_is_dirty(pmem))
940 				f2 = f;
941 			else
942 				f2 = f & ~(TEE_MATTR_UW | TEE_MATTR_PW);
943 			area_set_entry(pmem->area, pmem->pgidx, pa, f2);
944 			/*
945 			 * Make sure the table update is visible before
946 			 * continuing.
947 			 */
948 			dsb_ishst();
949 
950 			if (flags & TEE_MATTR_UX) {
951 				void *va = (void *)area_idx2va(pmem->area,
952 							       pmem->pgidx);
953 
954 				cache_op_inner(DCACHE_AREA_CLEAN, va,
955 						SMALL_PAGE_SIZE);
956 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
957 						SMALL_PAGE_SIZE);
958 			}
959 		}
960 
961 		area->flags = f;
962 		area = TAILQ_NEXT(area, link);
963 	}
964 
965 	ret = true;
966 out:
967 	pager_unlock(exceptions);
968 	return ret;
969 }
970 KEEP_PAGER(tee_pager_set_uta_area_attr);
971 #endif /*CFG_PAGED_USER_TA*/
972 
973 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area,
974 					unsigned int pgidx)
975 {
976 	struct tee_pager_pmem *pmem = NULL;
977 
978 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
979 		if (pmem->area == area && pmem->pgidx == pgidx)
980 			return pmem;
981 
982 	return NULL;
983 }
984 
985 static bool tee_pager_unhide_page(vaddr_t page_va)
986 {
987 	struct tee_pager_pmem *pmem;
988 
989 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
990 		if (pmem->pgidx == INVALID_PGIDX)
991 			continue;
992 
993 		if (!pmem_is_hidden(pmem))
994 			continue;
995 
996 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
997 			paddr_t pa = 0;
998 			uint32_t a = get_area_mattr(pmem->area->flags);
999 
1000 			/* page is hidden, show and move to back */
1001 
1002 			/*
1003 			 * If it's not a dirty block, then it should be
1004 			 * read only.
1005 			 */
1006 			if (!pmem_is_dirty(pmem))
1007 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1008 
1009 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1010 			area_get_entry(pmem->area, pmem->pgidx, &pa, NULL);
1011 			assert(pa == get_pmem_pa(pmem));
1012 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
1013 			/*
1014 			 * Note that TLB invalidation isn't needed since
1015 			 * there wasn't a valid mapping before. We should
1016 			 * use a barrier though, to make sure that the
1017 			 * change is visible.
1018 			 */
1019 			dsb_ishst();
1020 
1021 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1022 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1023 			incr_hidden_hits();
1024 			return true;
1025 		}
1026 	}
1027 
1028 	return false;
1029 }
1030 
1031 static void tee_pager_hide_pages(void)
1032 {
1033 	struct tee_pager_pmem *pmem;
1034 	size_t n = 0;
1035 	paddr_t pa = 0;
1036 
1037 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1038 		if (n >= TEE_PAGER_NHIDE)
1039 			break;
1040 		n++;
1041 
1042 		/* we cannot hide pages when pmem->area is not defined. */
1043 		if (!pmem->area)
1044 			continue;
1045 
1046 		if (pmem_is_hidden(pmem))
1047 			continue;
1048 
1049 		pmem->flags |= PMEM_FLAG_HIDDEN;
1050 		area_get_entry(pmem->area, pmem->pgidx, &pa, NULL);
1051 		assert(pa == get_pmem_pa(pmem));
1052 		area_set_entry(pmem->area, pmem->pgidx, pa, 0);
1053 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1054 	}
1055 }
1056 
1057 /*
1058  * Find mapped pmem, hide and move to pageble pmem.
1059  * Return false if page was not mapped, and true if page was mapped.
1060  */
1061 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1062 				       vaddr_t page_va)
1063 {
1064 	struct tee_pager_pmem *pmem;
1065 	unsigned pgidx;
1066 	paddr_t pa;
1067 	uint32_t attr;
1068 
1069 	pgidx = area_va2idx(area, page_va);
1070 	area_get_entry(area, pgidx, &pa, &attr);
1071 
1072 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1073 
1074 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1075 		if (pmem->area != area || pmem->pgidx != pgidx)
1076 			continue;
1077 
1078 		assert(pa == get_pmem_pa(pmem));
1079 		area_set_entry(area, pgidx, 0, 0);
1080 		pgt_dec_used_entries(area->pgt);
1081 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1082 		pmem->area = NULL;
1083 		pmem->pgidx = INVALID_PGIDX;
1084 		tee_pager_npages++;
1085 		set_npages();
1086 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1087 		incr_zi_released();
1088 		return true;
1089 	}
1090 
1091 	return false;
1092 }
1093 
1094 /* Finds the oldest page and unmats it from its old virtual address */
1095 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1096 {
1097 	struct tee_pager_pmem *pmem;
1098 
1099 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1100 	if (!pmem) {
1101 		EMSG("No pmem entries");
1102 		return NULL;
1103 	}
1104 	if (pmem->pgidx != INVALID_PGIDX) {
1105 		assert(pmem->area && pmem->area->pgt);
1106 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1107 		pgt_dec_used_entries(pmem->area->pgt);
1108 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1109 		tee_pager_save_page(pmem);
1110 	}
1111 
1112 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1113 	pmem->pgidx = INVALID_PGIDX;
1114 	pmem->area = NULL;
1115 	pmem->flags = 0;
1116 	if (area->type == PAGER_AREA_TYPE_LOCK) {
1117 		/* Move page to lock list */
1118 		if (tee_pager_npages <= 0)
1119 			panic("running out of page");
1120 		tee_pager_npages--;
1121 		set_npages();
1122 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1123 	} else {
1124 		/* move page to back */
1125 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1126 	}
1127 
1128 	return pmem;
1129 }
1130 
1131 static bool pager_update_permissions(struct tee_pager_area *area,
1132 			struct abort_info *ai, bool *handled)
1133 {
1134 	unsigned int pgidx = area_va2idx(area, ai->va);
1135 	struct tee_pager_pmem *pmem = NULL;
1136 	uint32_t attr = 0;
1137 	paddr_t pa = 0;
1138 
1139 	*handled = false;
1140 
1141 	area_get_entry(area, pgidx, &pa, &attr);
1142 
1143 	/* Not mapped */
1144 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1145 		return false;
1146 
1147 	/* Not readable, should not happen */
1148 	if (abort_is_user_exception(ai)) {
1149 		if (!(attr & TEE_MATTR_UR))
1150 			return true;
1151 	} else {
1152 		if (!(attr & TEE_MATTR_PR)) {
1153 			abort_print_error(ai);
1154 			panic();
1155 		}
1156 	}
1157 
1158 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1159 	case CORE_MMU_FAULT_TRANSLATION:
1160 	case CORE_MMU_FAULT_READ_PERMISSION:
1161 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1162 			/* Check attempting to execute from an NOX page */
1163 			if (abort_is_user_exception(ai)) {
1164 				if (!(attr & TEE_MATTR_UX))
1165 					return true;
1166 			} else {
1167 				if (!(attr & TEE_MATTR_PX)) {
1168 					abort_print_error(ai);
1169 					panic();
1170 				}
1171 			}
1172 		}
1173 		/* Since the page is mapped now it's OK */
1174 		break;
1175 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1176 		/* Check attempting to write to an RO page */
1177 		pmem = pmem_find(area, pgidx);
1178 		if (!pmem)
1179 			panic();
1180 		if (abort_is_user_exception(ai)) {
1181 			if (!(area->flags & TEE_MATTR_UW))
1182 				return true;
1183 			if (!(attr & TEE_MATTR_UW)) {
1184 				FMSG("Dirty %p",
1185 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1186 				pmem->flags |= PMEM_FLAG_DIRTY;
1187 				area_set_entry(area, pgidx, pa,
1188 					       get_area_mattr(area->flags));
1189 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1190 			}
1191 
1192 		} else {
1193 			if (!(area->flags & TEE_MATTR_PW)) {
1194 				abort_print_error(ai);
1195 				panic();
1196 			}
1197 			if (!(attr & TEE_MATTR_PW)) {
1198 				FMSG("Dirty %p",
1199 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1200 				pmem->flags |= PMEM_FLAG_DIRTY;
1201 				area_set_entry(area, pgidx, pa,
1202 					       get_area_mattr(area->flags));
1203 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1204 			}
1205 		}
1206 		/* Since permissions has been updated now it's OK */
1207 		break;
1208 	default:
1209 		/* Some fault we can't deal with */
1210 		if (abort_is_user_exception(ai))
1211 			return true;
1212 		abort_print_error(ai);
1213 		panic();
1214 	}
1215 	*handled = true;
1216 	return true;
1217 }
1218 
1219 #ifdef CFG_TEE_CORE_DEBUG
1220 static void stat_handle_fault(void)
1221 {
1222 	static size_t num_faults;
1223 	static size_t min_npages = SIZE_MAX;
1224 	static size_t total_min_npages = SIZE_MAX;
1225 
1226 	num_faults++;
1227 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1228 		DMSG("nfaults %zu npages %zu (min %zu)",
1229 		     num_faults, tee_pager_npages, min_npages);
1230 		min_npages = tee_pager_npages; /* reset */
1231 	}
1232 	if (tee_pager_npages < min_npages)
1233 		min_npages = tee_pager_npages;
1234 	if (tee_pager_npages < total_min_npages)
1235 		total_min_npages = tee_pager_npages;
1236 }
1237 #else
1238 static void stat_handle_fault(void)
1239 {
1240 }
1241 #endif
1242 
1243 bool tee_pager_handle_fault(struct abort_info *ai)
1244 {
1245 	struct tee_pager_area *area;
1246 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1247 	uint32_t exceptions;
1248 	bool ret;
1249 
1250 #ifdef TEE_PAGER_DEBUG_PRINT
1251 	abort_print(ai);
1252 #endif
1253 
1254 	/*
1255 	 * We're updating pages that can affect several active CPUs at a
1256 	 * time below. We end up here because a thread tries to access some
1257 	 * memory that isn't available. We have to be careful when making
1258 	 * that memory available as other threads may succeed in accessing
1259 	 * that address the moment after we've made it available.
1260 	 *
1261 	 * That means that we can't just map the memory and populate the
1262 	 * page, instead we use the aliased mapping to populate the page
1263 	 * and once everything is ready we map it.
1264 	 */
1265 	exceptions = pager_lock(ai);
1266 
1267 	stat_handle_fault();
1268 
1269 	/* check if the access is valid */
1270 	if (abort_is_user_exception(ai)) {
1271 		area = find_uta_area(ai->va);
1272 
1273 	} else {
1274 		area = find_area(&tee_pager_area_head, ai->va);
1275 		if (!area)
1276 			area = find_uta_area(ai->va);
1277 	}
1278 	if (!area || !area->pgt) {
1279 		ret = false;
1280 		goto out;
1281 	}
1282 
1283 	if (!tee_pager_unhide_page(page_va)) {
1284 		struct tee_pager_pmem *pmem = NULL;
1285 		uint32_t attr;
1286 		paddr_t pa;
1287 
1288 		/*
1289 		 * The page wasn't hidden, but some other core may have
1290 		 * updated the table entry before we got here or we need
1291 		 * to make a read-only page read-write (dirty).
1292 		 */
1293 		if (pager_update_permissions(area, ai, &ret)) {
1294 			/*
1295 			 * Nothing more to do with the abort. The problem
1296 			 * could already have been dealt with from another
1297 			 * core or if ret is false the TA will be paniced.
1298 			 */
1299 			goto out;
1300 		}
1301 
1302 		pmem = tee_pager_get_page(area);
1303 		if (!pmem) {
1304 			abort_print(ai);
1305 			panic();
1306 		}
1307 
1308 		/* load page code & data */
1309 		tee_pager_load_page(area, page_va, pmem->va_alias);
1310 
1311 
1312 		pmem->area = area;
1313 		pmem->pgidx = area_va2idx(area, ai->va);
1314 		attr = get_area_mattr(area->flags);
1315 		/*
1316 		 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1317 		 * able to tell when they are updated and should be tagged
1318 		 * as dirty.
1319 		 */
1320 		if (area->type == PAGER_AREA_TYPE_RW)
1321 			attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1322 		pa = get_pmem_pa(pmem);
1323 
1324 		/*
1325 		 * We've updated the page using the aliased mapping and
1326 		 * some cache maintenence is now needed if it's an
1327 		 * executable page.
1328 		 *
1329 		 * Since the d-cache is a Physically-indexed,
1330 		 * physically-tagged (PIPT) cache we can clean either the
1331 		 * aliased address or the real virtual address. In this
1332 		 * case we choose the real virtual address.
1333 		 *
1334 		 * The i-cache can also be PIPT, but may be something else
1335 		 * too like VIPT. The current code requires the caches to
1336 		 * implement the IVIPT extension, that is:
1337 		 * "instruction cache maintenance is required only after
1338 		 * writing new data to a physical address that holds an
1339 		 * instruction."
1340 		 *
1341 		 * To portably invalidate the icache the page has to
1342 		 * be mapped at the final virtual address but not
1343 		 * executable.
1344 		 */
1345 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1346 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1347 					TEE_MATTR_PW | TEE_MATTR_UW;
1348 
1349 			/* Set a temporary read-only mapping */
1350 			area_set_entry(pmem->area, pmem->pgidx, pa,
1351 				       attr & ~mask);
1352 			tlbi_mva_allasid(page_va);
1353 
1354 			/*
1355 			 * Doing these operations to LoUIS (Level of
1356 			 * unification, Inner Shareable) would be enough
1357 			 */
1358 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1359 				       SMALL_PAGE_SIZE);
1360 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1361 				       SMALL_PAGE_SIZE);
1362 
1363 			/* Set the final mapping */
1364 			area_set_entry(area, pmem->pgidx, pa, attr);
1365 			tlbi_mva_allasid(page_va);
1366 		} else {
1367 			area_set_entry(area, pmem->pgidx, pa, attr);
1368 			/*
1369 			 * No need to flush TLB for this entry, it was
1370 			 * invalid. We should use a barrier though, to make
1371 			 * sure that the change is visible.
1372 			 */
1373 			dsb_ishst();
1374 		}
1375 		pgt_inc_used_entries(area->pgt);
1376 
1377 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1378 
1379 	}
1380 
1381 	tee_pager_hide_pages();
1382 	ret = true;
1383 out:
1384 	pager_unlock(exceptions);
1385 	return ret;
1386 }
1387 
1388 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1389 {
1390 	size_t n;
1391 
1392 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1393 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1394 
1395 	/* setup memory */
1396 	for (n = 0; n < npages; n++) {
1397 		struct core_mmu_table_info *ti;
1398 		struct tee_pager_pmem *pmem;
1399 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1400 		unsigned int pgidx;
1401 		paddr_t pa;
1402 		uint32_t attr;
1403 
1404 		ti = find_table_info(va);
1405 		pgidx = core_mmu_va2idx(ti, va);
1406 		/*
1407 		 * Note that we can only support adding pages in the
1408 		 * valid range of this table info, currently not a problem.
1409 		 */
1410 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1411 
1412 		/* Ignore unmapped pages/blocks */
1413 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1414 			continue;
1415 
1416 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1417 		if (!pmem)
1418 			panic("out of mem");
1419 
1420 		pmem->va_alias = pager_add_alias_page(pa);
1421 
1422 		if (unmap) {
1423 			pmem->area = NULL;
1424 			pmem->pgidx = INVALID_PGIDX;
1425 			core_mmu_set_entry(ti, pgidx, 0, 0);
1426 			pgt_dec_used_entries(find_core_pgt(va));
1427 		} else {
1428 			/*
1429 			 * The page is still mapped, let's assign the area
1430 			 * and update the protection bits accordingly.
1431 			 */
1432 			pmem->area = find_area(&tee_pager_area_head, va);
1433 			assert(pmem->area->pgt == find_core_pgt(va));
1434 			pmem->pgidx = pgidx;
1435 			assert(pa == get_pmem_pa(pmem));
1436 			area_set_entry(pmem->area, pgidx, pa,
1437 				       get_area_mattr(pmem->area->flags));
1438 		}
1439 
1440 		tee_pager_npages++;
1441 		incr_npages_all();
1442 		set_npages();
1443 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1444 	}
1445 
1446 	/*
1447 	 * As this is done at inits, invalidate all TLBs once instead of
1448 	 * targeting only the modified entries.
1449 	 */
1450 	tlbi_all();
1451 }
1452 
1453 #ifdef CFG_PAGED_USER_TA
1454 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1455 {
1456 	struct pgt *p = pgt;
1457 
1458 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1459 		p = SLIST_NEXT(p, link);
1460 	return p;
1461 }
1462 
1463 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1464 {
1465 	struct tee_pager_area *area;
1466 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1467 
1468 	TAILQ_FOREACH(area, utc->areas, link) {
1469 		if (!area->pgt)
1470 			area->pgt = find_pgt(pgt, area->base);
1471 		else
1472 			assert(area->pgt == find_pgt(pgt, area->base));
1473 		if (!area->pgt)
1474 			panic();
1475 	}
1476 }
1477 
1478 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1479 {
1480 	assert(pmem->area && pmem->area->pgt);
1481 
1482 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1483 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1484 	tee_pager_save_page(pmem);
1485 	assert(pmem->area->pgt->num_used_entries);
1486 	pmem->area->pgt->num_used_entries--;
1487 	pmem->pgidx = INVALID_PGIDX;
1488 	pmem->area = NULL;
1489 	pmem->flags = 0;
1490 }
1491 
1492 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1493 {
1494 	struct tee_pager_pmem *pmem;
1495 	struct tee_pager_area *area;
1496 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1497 
1498 	if (!pgt->num_used_entries)
1499 		goto out;
1500 
1501 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1502 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1503 			continue;
1504 		if (pmem->area->pgt == pgt)
1505 			pager_save_and_release_entry(pmem);
1506 	}
1507 	assert(!pgt->num_used_entries);
1508 
1509 out:
1510 	if (is_user_ta_ctx(pgt->ctx)) {
1511 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1512 			if (area->pgt == pgt)
1513 				area->pgt = NULL;
1514 		}
1515 	}
1516 
1517 	pager_unlock(exceptions);
1518 }
1519 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1520 #endif /*CFG_PAGED_USER_TA*/
1521 
1522 void tee_pager_release_phys(void *addr, size_t size)
1523 {
1524 	bool unmaped = false;
1525 	vaddr_t va = (vaddr_t)addr;
1526 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1527 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1528 	struct tee_pager_area *area;
1529 	uint32_t exceptions;
1530 
1531 	if (end <= begin)
1532 		return;
1533 
1534 	exceptions = pager_lock_check_stack(128);
1535 
1536 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1537 		area = find_area(&tee_pager_area_head, va);
1538 		if (!area)
1539 			panic();
1540 		unmaped |= tee_pager_release_one_phys(area, va);
1541 	}
1542 
1543 	if (unmaped)
1544 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1545 
1546 	pager_unlock(exceptions);
1547 }
1548 KEEP_PAGER(tee_pager_release_phys);
1549 
1550 void *tee_pager_alloc(size_t size)
1551 {
1552 	tee_mm_entry_t *mm = NULL;
1553 	uint8_t *smem = NULL;
1554 	size_t num_pages = 0;
1555 	struct fobj *fobj = NULL;
1556 
1557 	if (!size)
1558 		return NULL;
1559 
1560 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1561 	if (!mm)
1562 		return NULL;
1563 
1564 	smem = (uint8_t *)tee_mm_get_smem(mm);
1565 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1566 	fobj = fobj_locked_paged_alloc(num_pages);
1567 	if (!fobj) {
1568 		tee_mm_free(mm);
1569 		return NULL;
1570 	}
1571 
1572 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1573 	fobj_put(fobj);
1574 
1575 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1576 
1577 	return smem;
1578 }
1579