xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision e595a5f0a2c5d3ce527db8d0fdad02476d9d7743)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <kernel/tee_ta_manager.h>
17 #include <kernel/thread.h>
18 #include <kernel/tlb_helpers.h>
19 #include <mm/core_memprot.h>
20 #include <mm/fobj.h>
21 #include <mm/tee_mm.h>
22 #include <mm/tee_pager.h>
23 #include <stdlib.h>
24 #include <sys/queue.h>
25 #include <tee_api_defines.h>
26 #include <trace.h>
27 #include <types_ext.h>
28 #include <utee_defines.h>
29 #include <util.h>
30 
31 struct tee_pager_area {
32 	struct fobj *fobj;
33 	size_t fobj_pgidx;
34 	enum tee_pager_area_type type;
35 	uint32_t flags;
36 	vaddr_t base;
37 	size_t size;
38 	struct pgt *pgt;
39 	TAILQ_ENTRY(tee_pager_area) link;
40 };
41 
42 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
43 
44 static struct tee_pager_area_head tee_pager_area_head =
45 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
46 
47 #define INVALID_PGIDX		UINT_MAX
48 #define PMEM_FLAG_DIRTY		BIT(0)
49 #define PMEM_FLAG_HIDDEN	BIT(1)
50 
51 /*
52  * struct tee_pager_pmem - Represents a physical page used for paging.
53  *
54  * @flags	flags defined by PMEM_FLAG_* above
55  * @pgidx	an index of the entry in area->ti.
56  * @va_alias	Virtual address where the physical page always is aliased.
57  *		Used during remapping of the page when the content need to
58  *		be updated before it's available at the new location.
59  * @area	a pointer to the pager area
60  */
61 struct tee_pager_pmem {
62 	unsigned int flags;
63 	unsigned int pgidx;
64 	void *va_alias;
65 	struct tee_pager_area *area;
66 	TAILQ_ENTRY(tee_pager_pmem) link;
67 };
68 
69 /* The list of physical pages. The first page in the list is the oldest */
70 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
71 
72 static struct tee_pager_pmem_head tee_pager_pmem_head =
73 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
74 
75 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
76 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
77 
78 /* number of pages hidden */
79 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
80 
81 /* Number of registered physical pages, used hiding pages. */
82 static size_t tee_pager_npages;
83 
84 #ifdef CFG_WITH_STATS
85 static struct tee_pager_stats pager_stats;
86 
87 static inline void incr_ro_hits(void)
88 {
89 	pager_stats.ro_hits++;
90 }
91 
92 static inline void incr_rw_hits(void)
93 {
94 	pager_stats.rw_hits++;
95 }
96 
97 static inline void incr_hidden_hits(void)
98 {
99 	pager_stats.hidden_hits++;
100 }
101 
102 static inline void incr_zi_released(void)
103 {
104 	pager_stats.zi_released++;
105 }
106 
107 static inline void incr_npages_all(void)
108 {
109 	pager_stats.npages_all++;
110 }
111 
112 static inline void set_npages(void)
113 {
114 	pager_stats.npages = tee_pager_npages;
115 }
116 
117 void tee_pager_get_stats(struct tee_pager_stats *stats)
118 {
119 	*stats = pager_stats;
120 
121 	pager_stats.hidden_hits = 0;
122 	pager_stats.ro_hits = 0;
123 	pager_stats.rw_hits = 0;
124 	pager_stats.zi_released = 0;
125 }
126 
127 #else /* CFG_WITH_STATS */
128 static inline void incr_ro_hits(void) { }
129 static inline void incr_rw_hits(void) { }
130 static inline void incr_hidden_hits(void) { }
131 static inline void incr_zi_released(void) { }
132 static inline void incr_npages_all(void) { }
133 static inline void set_npages(void) { }
134 
135 void tee_pager_get_stats(struct tee_pager_stats *stats)
136 {
137 	memset(stats, 0, sizeof(struct tee_pager_stats));
138 }
139 #endif /* CFG_WITH_STATS */
140 
141 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
142 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
143 #define TBL_SHIFT	SMALL_PAGE_SHIFT
144 
145 #define EFFECTIVE_VA_SIZE \
146 	(ROUNDUP(TEE_RAM_VA_START + TEE_RAM_VA_SIZE, \
147 		 CORE_MMU_PGDIR_SIZE) - \
148 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
149 
150 static struct pager_table {
151 	struct pgt pgt;
152 	struct core_mmu_table_info tbl_info;
153 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
154 
155 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
156 
157 /* Defines the range of the alias area */
158 static tee_mm_entry_t *pager_alias_area;
159 /*
160  * Physical pages are added in a stack like fashion to the alias area,
161  * @pager_alias_next_free gives the address of next free entry if
162  * @pager_alias_next_free is != 0
163  */
164 static uintptr_t pager_alias_next_free;
165 
166 #ifdef CFG_TEE_CORE_DEBUG
167 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
168 
169 static uint32_t pager_lock_dldetect(const char *func, const int line,
170 				    struct abort_info *ai)
171 {
172 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
173 	unsigned int retries = 0;
174 	unsigned int reminder = 0;
175 
176 	while (!cpu_spin_trylock(&pager_spinlock)) {
177 		retries++;
178 		if (!retries) {
179 			/* wrapped, time to report */
180 			trace_printf(func, line, TRACE_ERROR, true,
181 				     "possible spinlock deadlock reminder %u",
182 				     reminder);
183 			if (reminder < UINT_MAX)
184 				reminder++;
185 			if (ai)
186 				abort_print(ai);
187 		}
188 	}
189 
190 	return exceptions;
191 }
192 #else
193 static uint32_t pager_lock(struct abort_info __unused *ai)
194 {
195 	return cpu_spin_lock_xsave(&pager_spinlock);
196 }
197 #endif
198 
199 static uint32_t pager_lock_check_stack(size_t stack_size)
200 {
201 	if (stack_size) {
202 		int8_t buf[stack_size];
203 		size_t n;
204 
205 		/*
206 		 * Make sure to touch all pages of the stack that we expect
207 		 * to use with this lock held. We need to take eventual
208 		 * page faults before the lock is taken or we'll deadlock
209 		 * the pager. The pages that are populated in this way will
210 		 * eventually be released at certain save transitions of
211 		 * the thread.
212 		 */
213 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
214 			io_write8((vaddr_t)buf + n, 1);
215 		io_write8((vaddr_t)buf + stack_size - 1, 1);
216 	}
217 
218 	return pager_lock(NULL);
219 }
220 
221 static void pager_unlock(uint32_t exceptions)
222 {
223 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
224 }
225 
226 void *tee_pager_phys_to_virt(paddr_t pa)
227 {
228 	struct core_mmu_table_info ti;
229 	unsigned idx;
230 	uint32_t a;
231 	paddr_t p;
232 	vaddr_t v;
233 	size_t n;
234 
235 	/*
236 	 * Most addresses are mapped lineary, try that first if possible.
237 	 */
238 	if (!tee_pager_get_table_info(pa, &ti))
239 		return NULL; /* impossible pa */
240 	idx = core_mmu_va2idx(&ti, pa);
241 	core_mmu_get_entry(&ti, idx, &p, &a);
242 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
243 		return (void *)core_mmu_idx2va(&ti, idx);
244 
245 	n = 0;
246 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
247 	while (true) {
248 		while (idx < TBL_NUM_ENTRIES) {
249 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
250 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
251 				return NULL;
252 
253 			core_mmu_get_entry(&pager_tables[n].tbl_info,
254 					   idx, &p, &a);
255 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
256 				return (void *)v;
257 			idx++;
258 		}
259 
260 		n++;
261 		if (n >= ARRAY_SIZE(pager_tables))
262 			return NULL;
263 		idx = 0;
264 	}
265 
266 	return NULL;
267 }
268 
269 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
270 {
271 	return pmem->flags & PMEM_FLAG_HIDDEN;
272 }
273 
274 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
275 {
276 	return pmem->flags & PMEM_FLAG_DIRTY;
277 }
278 
279 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
280 {
281 	size_t n;
282 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
283 
284 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
285 	    CORE_MMU_PGDIR_SHIFT;
286 	if (n >= ARRAY_SIZE(pager_tables))
287 		return NULL;
288 
289 	assert(va >= pager_tables[n].tbl_info.va_base &&
290 	       va <= (pager_tables[n].tbl_info.va_base | mask));
291 
292 	return pager_tables + n;
293 }
294 
295 static struct pager_table *find_pager_table(vaddr_t va)
296 {
297 	struct pager_table *pt = find_pager_table_may_fail(va);
298 
299 	assert(pt);
300 	return pt;
301 }
302 
303 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
304 {
305 	struct pager_table *pt = find_pager_table_may_fail(va);
306 
307 	if (!pt)
308 		return false;
309 
310 	*ti = pt->tbl_info;
311 	return true;
312 }
313 
314 static struct core_mmu_table_info *find_table_info(vaddr_t va)
315 {
316 	return &find_pager_table(va)->tbl_info;
317 }
318 
319 static struct pgt *find_core_pgt(vaddr_t va)
320 {
321 	return &find_pager_table(va)->pgt;
322 }
323 
324 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
325 {
326 	struct pager_table *pt;
327 	unsigned idx;
328 	vaddr_t smem = tee_mm_get_smem(mm);
329 	size_t nbytes = tee_mm_get_bytes(mm);
330 	vaddr_t v;
331 
332 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
333 
334 	assert(!pager_alias_area);
335 	pager_alias_area = mm;
336 	pager_alias_next_free = smem;
337 
338 	/* Clear all mapping in the alias area */
339 	pt = find_pager_table(smem);
340 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
341 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
342 		while (idx < TBL_NUM_ENTRIES) {
343 			v = core_mmu_idx2va(&pt->tbl_info, idx);
344 			if (v >= (smem + nbytes))
345 				goto out;
346 
347 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
348 			idx++;
349 		}
350 
351 		pt++;
352 		idx = 0;
353 	}
354 
355 out:
356 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
357 }
358 
359 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
360 {
361 	size_t n;
362 	paddr_t pa;
363 	size_t usage = 0;
364 
365 	for (n = 0; n < ti->num_entries; n++) {
366 		core_mmu_get_entry(ti, n, &pa, NULL);
367 		if (pa)
368 			usage++;
369 	}
370 	return usage;
371 }
372 
373 static void area_get_entry(struct tee_pager_area *area, size_t idx,
374 			   paddr_t *pa, uint32_t *attr)
375 {
376 	assert(area->pgt);
377 	assert(idx < TBL_NUM_ENTRIES);
378 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
379 }
380 
381 static void area_set_entry(struct tee_pager_area *area, size_t idx,
382 			   paddr_t pa, uint32_t attr)
383 {
384 	assert(area->pgt);
385 	assert(idx < TBL_NUM_ENTRIES);
386 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
387 }
388 
389 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
390 {
391 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
392 }
393 
394 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
395 {
396 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
397 }
398 
399 void tee_pager_early_init(void)
400 {
401 	size_t n;
402 
403 	/*
404 	 * Note that this depends on add_pager_vaspace() adding vaspace
405 	 * after end of memory.
406 	 */
407 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
408 		if (!core_mmu_find_table(NULL, TEE_RAM_VA_START +
409 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
410 					 &pager_tables[n].tbl_info))
411 			panic("can't find mmu tables");
412 
413 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
414 			panic("Unsupported page size in translation table");
415 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
416 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
417 
418 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
419 		pgt_set_used_entries(&pager_tables[n].pgt,
420 				tbl_usage_count(&pager_tables[n].tbl_info));
421 	}
422 }
423 
424 static void *pager_add_alias_page(paddr_t pa)
425 {
426 	unsigned idx;
427 	struct core_mmu_table_info *ti;
428 	/* Alias pages mapped without write permission: runtime will care */
429 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
430 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
431 			TEE_MATTR_SECURE | TEE_MATTR_PR;
432 
433 	DMSG("0x%" PRIxPA, pa);
434 
435 	ti = find_table_info(pager_alias_next_free);
436 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
437 	core_mmu_set_entry(ti, idx, pa, attr);
438 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
439 	pager_alias_next_free += SMALL_PAGE_SIZE;
440 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
441 				      tee_mm_get_bytes(pager_alias_area)))
442 		pager_alias_next_free = 0;
443 	return (void *)core_mmu_idx2va(ti, idx);
444 }
445 
446 static void area_insert_tail(struct tee_pager_area *area)
447 {
448 	uint32_t exceptions = pager_lock_check_stack(8);
449 
450 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
451 
452 	pager_unlock(exceptions);
453 }
454 KEEP_PAGER(area_insert_tail);
455 
456 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
457 			     struct fobj *fobj)
458 {
459 	struct tee_pager_area *area = NULL;
460 	uint32_t flags = 0;
461 	size_t fobj_pgidx = 0;
462 	vaddr_t b = base;
463 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
464 	size_t s2 = 0;
465 
466 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
467 
468 	if (base & SMALL_PAGE_MASK || !s) {
469 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
470 		panic();
471 	}
472 
473 	switch (type) {
474 	case PAGER_AREA_TYPE_RO:
475 		flags = TEE_MATTR_PRX;
476 		break;
477 	case PAGER_AREA_TYPE_RW:
478 		flags = TEE_MATTR_PRW;
479 		break;
480 	case PAGER_AREA_TYPE_LOCK:
481 		flags = TEE_MATTR_PRW | TEE_MATTR_LOCKED;
482 		break;
483 	default:
484 		panic();
485 	}
486 
487 	if (!fobj)
488 		panic();
489 
490 	while (s) {
491 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
492 		area = calloc(1, sizeof(*area));
493 		if (!area)
494 			panic("alloc_area");
495 
496 		area->fobj = fobj_get(fobj);
497 		area->fobj_pgidx = fobj_pgidx;
498 		area->type = type;
499 		area->pgt = find_core_pgt(b);
500 		area->base = b;
501 		area->size = s2;
502 		area->flags = flags;
503 		area_insert_tail(area);
504 
505 		b += s2;
506 		s -= s2;
507 		fobj_pgidx += s2 / SMALL_PAGE_SIZE;
508 	}
509 }
510 
511 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
512 					vaddr_t va)
513 {
514 	struct tee_pager_area *area;
515 
516 	if (!areas)
517 		return NULL;
518 
519 	TAILQ_FOREACH(area, areas, link) {
520 		if (core_is_buffer_inside(va, 1, area->base, area->size))
521 			return area;
522 	}
523 	return NULL;
524 }
525 
526 #ifdef CFG_PAGED_USER_TA
527 static struct tee_pager_area *find_uta_area(vaddr_t va)
528 {
529 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
530 
531 	if (!is_user_ta_ctx(ctx))
532 		return NULL;
533 	return find_area(to_user_ta_ctx(ctx)->areas, va);
534 }
535 #else
536 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
537 {
538 	return NULL;
539 }
540 #endif /*CFG_PAGED_USER_TA*/
541 
542 
543 static uint32_t get_area_mattr(uint32_t area_flags)
544 {
545 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
546 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
547 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
548 
549 	return attr;
550 }
551 
552 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
553 {
554 	struct core_mmu_table_info *ti;
555 	paddr_t pa;
556 	unsigned idx;
557 
558 	ti = find_table_info((vaddr_t)pmem->va_alias);
559 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
560 	core_mmu_get_entry(ti, idx, &pa, NULL);
561 	return pa;
562 }
563 
564 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
565 			void *va_alias)
566 {
567 	size_t fobj_pgidx = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
568 			    area->fobj_pgidx;
569 	struct core_mmu_table_info *ti;
570 	uint32_t attr_alias;
571 	paddr_t pa_alias;
572 	unsigned int idx_alias;
573 
574 	/* Insure we are allowed to write to aliased virtual page */
575 	ti = find_table_info((vaddr_t)va_alias);
576 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
577 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
578 	if (!(attr_alias & TEE_MATTR_PW)) {
579 		attr_alias |= TEE_MATTR_PW;
580 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
581 		tlbi_mva_allasid((vaddr_t)va_alias);
582 	}
583 
584 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
585 	if (fobj_load_page(area->fobj, fobj_pgidx, va_alias)) {
586 		EMSG("PH 0x%" PRIxVA " failed", page_va);
587 		panic();
588 	}
589 	switch (area->type) {
590 	case PAGER_AREA_TYPE_RO:
591 		incr_ro_hits();
592 		/* Forbid write to aliases for read-only (maybe exec) pages */
593 		attr_alias &= ~TEE_MATTR_PW;
594 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
595 		tlbi_mva_allasid((vaddr_t)va_alias);
596 		break;
597 	case PAGER_AREA_TYPE_RW:
598 		incr_rw_hits();
599 		break;
600 	case PAGER_AREA_TYPE_LOCK:
601 		break;
602 	default:
603 		panic();
604 	}
605 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
606 }
607 
608 static void tee_pager_save_page(struct tee_pager_pmem *pmem)
609 {
610 	if (pmem_is_dirty(pmem)) {
611 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
612 		size_t fobj_pgidx = (pmem->pgidx - (offs >> SMALL_PAGE_SHIFT)) +
613 				    pmem->area->fobj_pgidx;
614 
615 		asan_tag_access(pmem->va_alias,
616 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
617 		if (fobj_save_page(pmem->area->fobj, fobj_pgidx,
618 				   pmem->va_alias))
619 			panic("fobj_save_page");
620 		asan_tag_no_access(pmem->va_alias,
621 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
622 	}
623 }
624 
625 #ifdef CFG_PAGED_USER_TA
626 static void free_area(struct tee_pager_area *area)
627 {
628 	fobj_put(area->fobj);
629 	free(area);
630 }
631 
632 static TEE_Result pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
633 				     struct fobj *fobj)
634 {
635 	struct tee_pager_area *area;
636 	vaddr_t b = base;
637 	size_t fobj_pgidx = 0;
638 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
639 
640 	if (!utc->areas) {
641 		utc->areas = malloc(sizeof(*utc->areas));
642 		if (!utc->areas)
643 			return TEE_ERROR_OUT_OF_MEMORY;
644 		TAILQ_INIT(utc->areas);
645 	}
646 
647 	while (s) {
648 		size_t s2;
649 
650 		if (find_area(utc->areas, b))
651 			return TEE_ERROR_BAD_PARAMETERS;
652 
653 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
654 		area = calloc(1, sizeof(*area));
655 		if (!area)
656 			return TEE_ERROR_OUT_OF_MEMORY;
657 
658 		/* Table info will be set when the context is activated. */
659 		area->fobj = fobj_get(fobj);
660 		area->fobj_pgidx = fobj_pgidx;
661 		area->type = PAGER_AREA_TYPE_RW;
662 		area->base = b;
663 		area->size = s2;
664 		area->flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
665 
666 		TAILQ_INSERT_TAIL(utc->areas, area, link);
667 		b += s2;
668 		s -= s2;
669 		fobj_pgidx += s2 / SMALL_PAGE_SIZE;
670 	}
671 
672 	return TEE_SUCCESS;
673 }
674 
675 TEE_Result tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
676 			    struct fobj *fobj)
677 {
678 	TEE_Result res = TEE_SUCCESS;
679 	struct thread_specific_data *tsd = thread_get_tsd();
680 	struct tee_pager_area *area = NULL;
681 	struct core_mmu_table_info dir_info = { NULL };
682 
683 	if (&utc->ctx != tsd->ctx) {
684 		/*
685 		 * Changes are to an utc that isn't active. Just add the
686 		 * areas page tables will be dealt with later.
687 		 */
688 		return pager_add_uta_area(utc, base, fobj);
689 	}
690 
691 	/*
692 	 * Assign page tables before adding areas to be able to tell which
693 	 * are newly added and should be removed in case of failure.
694 	 */
695 	tee_pager_assign_uta_tables(utc);
696 	res = pager_add_uta_area(utc, base, fobj);
697 	if (res) {
698 		struct tee_pager_area *next_a;
699 
700 		/* Remove all added areas */
701 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
702 			if (!area->pgt) {
703 				TAILQ_REMOVE(utc->areas, area, link);
704 				free_area(area);
705 			}
706 		}
707 		return res;
708 	}
709 
710 	/*
711 	 * Assign page tables to the new areas and make sure that the page
712 	 * tables are registered in the upper table.
713 	 */
714 	tee_pager_assign_uta_tables(utc);
715 	core_mmu_get_user_pgdir(&dir_info);
716 	TAILQ_FOREACH(area, utc->areas, link) {
717 		paddr_t pa;
718 		size_t idx;
719 		uint32_t attr;
720 
721 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
722 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
723 
724 		/*
725 		 * Check if the page table already is used, if it is, it's
726 		 * already registered.
727 		 */
728 		if (area->pgt->num_used_entries) {
729 			assert(attr & TEE_MATTR_TABLE);
730 			assert(pa == virt_to_phys(area->pgt->tbl));
731 			continue;
732 		}
733 
734 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
735 		pa = virt_to_phys(area->pgt->tbl);
736 		assert(pa);
737 		/*
738 		 * Note that the update of the table entry is guaranteed to
739 		 * be atomic.
740 		 */
741 		core_mmu_set_entry(&dir_info, idx, pa, attr);
742 	}
743 
744 	return TEE_SUCCESS;
745 }
746 
747 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
748 				   struct pgt *pgt)
749 {
750 	assert(pgt);
751 	ti->table = pgt->tbl;
752 	ti->va_base = pgt->vabase;
753 	ti->level = TBL_LEVEL;
754 	ti->shift = TBL_SHIFT;
755 	ti->num_entries = TBL_NUM_ENTRIES;
756 }
757 
758 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
759 			   vaddr_t new_base)
760 {
761 	uint32_t exceptions = pager_lock_check_stack(64);
762 
763 	/*
764 	 * If there's no pgt assigned to the old area there's no pages to
765 	 * deal with either, just update with a new pgt and base.
766 	 */
767 	if (area->pgt) {
768 		struct core_mmu_table_info old_ti;
769 		struct core_mmu_table_info new_ti;
770 		struct tee_pager_pmem *pmem;
771 
772 		init_tbl_info_from_pgt(&old_ti, area->pgt);
773 		init_tbl_info_from_pgt(&new_ti, new_pgt);
774 
775 
776 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
777 			vaddr_t va;
778 			paddr_t pa;
779 			uint32_t attr;
780 
781 			if (pmem->area != area)
782 				continue;
783 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
784 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
785 
786 			assert(pa == get_pmem_pa(pmem));
787 			assert(attr);
788 			assert(area->pgt->num_used_entries);
789 			area->pgt->num_used_entries--;
790 
791 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
792 			va = va - area->base + new_base;
793 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
794 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
795 			new_pgt->num_used_entries++;
796 		}
797 	}
798 
799 	area->pgt = new_pgt;
800 	area->base = new_base;
801 	pager_unlock(exceptions);
802 }
803 KEEP_PAGER(transpose_area);
804 
805 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
806 				   vaddr_t src_base,
807 				   struct user_ta_ctx *dst_utc,
808 				   vaddr_t dst_base, struct pgt **dst_pgt,
809 				   size_t size)
810 {
811 	struct tee_pager_area *area;
812 	struct tee_pager_area *next_a;
813 
814 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
815 		vaddr_t new_area_base;
816 		size_t new_idx;
817 
818 		if (!core_is_buffer_inside(area->base, area->size,
819 					  src_base, size))
820 			continue;
821 
822 		TAILQ_REMOVE(src_utc->areas, area, link);
823 
824 		new_area_base = dst_base + (src_base - area->base);
825 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
826 			  CORE_MMU_PGDIR_SIZE;
827 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
828 		       dst_pgt[new_idx]->vabase);
829 		transpose_area(area, dst_pgt[new_idx], new_area_base);
830 
831 		/*
832 		 * Assert that this will not cause any conflicts in the new
833 		 * utc.  This should already be guaranteed, but a bug here
834 		 * could be tricky to find.
835 		 */
836 		assert(!find_area(dst_utc->areas, area->base));
837 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
838 	}
839 }
840 
841 static void rem_area(struct tee_pager_area_head *area_head,
842 		     struct tee_pager_area *area)
843 {
844 	struct tee_pager_pmem *pmem;
845 	uint32_t exceptions;
846 
847 	exceptions = pager_lock_check_stack(64);
848 
849 	TAILQ_REMOVE(area_head, area, link);
850 
851 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
852 		if (pmem->area == area) {
853 			area_set_entry(area, pmem->pgidx, 0, 0);
854 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
855 			pgt_dec_used_entries(area->pgt);
856 			pmem->area = NULL;
857 			pmem->pgidx = INVALID_PGIDX;
858 		}
859 	}
860 
861 	pager_unlock(exceptions);
862 	free_area(area);
863 }
864 KEEP_PAGER(rem_area);
865 
866 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
867 			      size_t size)
868 {
869 	struct tee_pager_area *area;
870 	struct tee_pager_area *next_a;
871 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
872 
873 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
874 		if (core_is_buffer_inside(area->base, area->size, base, s))
875 			rem_area(utc->areas, area);
876 	}
877 }
878 
879 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
880 {
881 	struct tee_pager_area *area;
882 
883 	if (!utc->areas)
884 		return;
885 
886 	while (true) {
887 		area = TAILQ_FIRST(utc->areas);
888 		if (!area)
889 			break;
890 		TAILQ_REMOVE(utc->areas, area, link);
891 		free_area(area);
892 	}
893 
894 	free(utc->areas);
895 }
896 
897 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
898 				 size_t size, uint32_t flags)
899 {
900 	bool ret = false;
901 	vaddr_t b = base;
902 	size_t s = size;
903 	size_t s2 = 0;
904 	struct tee_pager_area *area = find_area(utc->areas, b);
905 	uint32_t exceptions = 0;
906 	struct tee_pager_pmem *pmem = NULL;
907 	paddr_t pa = 0;
908 	uint32_t a = 0;
909 	uint32_t f = 0;
910 	uint32_t f2 = 0;
911 
912 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
913 	if (f & TEE_MATTR_UW)
914 		f |= TEE_MATTR_PW;
915 	f = get_area_mattr(f);
916 
917 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
918 
919 	while (s) {
920 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
921 		if (!area || area->base != b || area->size != s2) {
922 			ret = false;
923 			goto out;
924 		}
925 		b += s2;
926 		s -= s2;
927 
928 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
929 			if (pmem->area != area)
930 				continue;
931 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
932 			assert(pa == get_pmem_pa(pmem));
933 			if (a == f)
934 				continue;
935 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
936 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
937 
938 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
939 			if (pmem_is_dirty(pmem))
940 				f2 = f;
941 			else
942 				f2 = f & ~(TEE_MATTR_UW | TEE_MATTR_PW);
943 			area_set_entry(pmem->area, pmem->pgidx, pa, f2);
944 			/*
945 			 * Make sure the table update is visible before
946 			 * continuing.
947 			 */
948 			dsb_ishst();
949 
950 			if (flags & TEE_MATTR_UX) {
951 				void *va = (void *)area_idx2va(pmem->area,
952 							       pmem->pgidx);
953 
954 				cache_op_inner(DCACHE_AREA_CLEAN, va,
955 						SMALL_PAGE_SIZE);
956 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
957 						SMALL_PAGE_SIZE);
958 			}
959 		}
960 
961 		area->flags = f;
962 		area = TAILQ_NEXT(area, link);
963 	}
964 
965 	ret = true;
966 out:
967 	pager_unlock(exceptions);
968 	return ret;
969 }
970 KEEP_PAGER(tee_pager_set_uta_area_attr);
971 #endif /*CFG_PAGED_USER_TA*/
972 
973 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area,
974 					unsigned int pgidx)
975 {
976 	struct tee_pager_pmem *pmem = NULL;
977 
978 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
979 		if (pmem->area == area && pmem->pgidx == pgidx)
980 			return pmem;
981 
982 	return NULL;
983 }
984 
985 static bool tee_pager_unhide_page(struct tee_pager_area *area,
986 				  unsigned int pgidx)
987 {
988 	struct tee_pager_pmem *pmem = pmem_find(area, pgidx);
989 	uint32_t a = get_area_mattr(area->flags);
990 	paddr_t pa = 0;
991 
992 	if (!pmem || pmem->pgidx == INVALID_PGIDX || !pmem_is_hidden(pmem))
993 		return false;
994 
995 	/* page is hidden, show and move to back */
996 
997 	/* If it's not a dirty block, then it should be read only. */
998 	if (!pmem_is_dirty(pmem))
999 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1000 
1001 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1002 	area_get_entry(pmem->area, pmem->pgidx, &pa, NULL);
1003 	assert(pa == get_pmem_pa(pmem));
1004 	area_set_entry(pmem->area, pmem->pgidx, pa, a);
1005 	/*
1006 	 * Note that TLB invalidation isn't needed since
1007 	 * there wasn't a valid mapping before. We should
1008 	 * use a barrier though, to make sure that the
1009 	 * change is visible.
1010 	 */
1011 	dsb_ishst();
1012 
1013 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1014 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1015 	incr_hidden_hits();
1016 	return true;
1017 }
1018 
1019 static void tee_pager_hide_pages(void)
1020 {
1021 	struct tee_pager_pmem *pmem;
1022 	size_t n = 0;
1023 	paddr_t pa = 0;
1024 
1025 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1026 		if (n >= TEE_PAGER_NHIDE)
1027 			break;
1028 		n++;
1029 
1030 		/* we cannot hide pages when pmem->area is not defined. */
1031 		if (!pmem->area)
1032 			continue;
1033 
1034 		if (pmem_is_hidden(pmem))
1035 			continue;
1036 
1037 		pmem->flags |= PMEM_FLAG_HIDDEN;
1038 		area_get_entry(pmem->area, pmem->pgidx, &pa, NULL);
1039 		assert(pa == get_pmem_pa(pmem));
1040 		area_set_entry(pmem->area, pmem->pgidx, pa, 0);
1041 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1042 	}
1043 }
1044 
1045 /*
1046  * Find mapped pmem, hide and move to pageble pmem.
1047  * Return false if page was not mapped, and true if page was mapped.
1048  */
1049 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1050 				       vaddr_t page_va)
1051 {
1052 	struct tee_pager_pmem *pmem;
1053 	unsigned pgidx;
1054 	paddr_t pa;
1055 	uint32_t attr;
1056 
1057 	pgidx = area_va2idx(area, page_va);
1058 	area_get_entry(area, pgidx, &pa, &attr);
1059 
1060 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1061 
1062 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1063 		if (pmem->area != area || pmem->pgidx != pgidx)
1064 			continue;
1065 
1066 		assert(pa == get_pmem_pa(pmem));
1067 		area_set_entry(area, pgidx, 0, 0);
1068 		pgt_dec_used_entries(area->pgt);
1069 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1070 		pmem->area = NULL;
1071 		pmem->pgidx = INVALID_PGIDX;
1072 		tee_pager_npages++;
1073 		set_npages();
1074 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1075 		incr_zi_released();
1076 		return true;
1077 	}
1078 
1079 	return false;
1080 }
1081 
1082 /* Finds the oldest page and unmats it from its old virtual address */
1083 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1084 {
1085 	struct tee_pager_pmem *pmem;
1086 
1087 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1088 	if (!pmem) {
1089 		EMSG("No pmem entries");
1090 		return NULL;
1091 	}
1092 	if (pmem->pgidx != INVALID_PGIDX) {
1093 		assert(pmem->area && pmem->area->pgt);
1094 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1095 		pgt_dec_used_entries(pmem->area->pgt);
1096 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1097 		tee_pager_save_page(pmem);
1098 	}
1099 
1100 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1101 	pmem->pgidx = INVALID_PGIDX;
1102 	pmem->area = NULL;
1103 	pmem->flags = 0;
1104 	if (area->type == PAGER_AREA_TYPE_LOCK) {
1105 		/* Move page to lock list */
1106 		if (tee_pager_npages <= 0)
1107 			panic("running out of page");
1108 		tee_pager_npages--;
1109 		set_npages();
1110 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1111 	} else {
1112 		/* move page to back */
1113 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1114 	}
1115 
1116 	return pmem;
1117 }
1118 
1119 static bool pager_update_permissions(struct tee_pager_area *area,
1120 			struct abort_info *ai, bool *handled)
1121 {
1122 	unsigned int pgidx = area_va2idx(area, ai->va);
1123 	struct tee_pager_pmem *pmem = NULL;
1124 	uint32_t attr = 0;
1125 	paddr_t pa = 0;
1126 
1127 	*handled = false;
1128 
1129 	area_get_entry(area, pgidx, &pa, &attr);
1130 
1131 	/* Not mapped */
1132 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1133 		return false;
1134 
1135 	/* Not readable, should not happen */
1136 	if (abort_is_user_exception(ai)) {
1137 		if (!(attr & TEE_MATTR_UR))
1138 			return true;
1139 	} else {
1140 		if (!(attr & TEE_MATTR_PR)) {
1141 			abort_print_error(ai);
1142 			panic();
1143 		}
1144 	}
1145 
1146 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1147 	case CORE_MMU_FAULT_TRANSLATION:
1148 	case CORE_MMU_FAULT_READ_PERMISSION:
1149 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1150 			/* Check attempting to execute from an NOX page */
1151 			if (abort_is_user_exception(ai)) {
1152 				if (!(attr & TEE_MATTR_UX))
1153 					return true;
1154 			} else {
1155 				if (!(attr & TEE_MATTR_PX)) {
1156 					abort_print_error(ai);
1157 					panic();
1158 				}
1159 			}
1160 		}
1161 		/* Since the page is mapped now it's OK */
1162 		break;
1163 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1164 		/* Check attempting to write to an RO page */
1165 		pmem = pmem_find(area, pgidx);
1166 		if (!pmem)
1167 			panic();
1168 		if (abort_is_user_exception(ai)) {
1169 			if (!(area->flags & TEE_MATTR_UW))
1170 				return true;
1171 			if (!(attr & TEE_MATTR_UW)) {
1172 				FMSG("Dirty %p",
1173 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1174 				pmem->flags |= PMEM_FLAG_DIRTY;
1175 				area_set_entry(area, pgidx, pa,
1176 					       get_area_mattr(area->flags));
1177 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1178 			}
1179 
1180 		} else {
1181 			if (!(area->flags & TEE_MATTR_PW)) {
1182 				abort_print_error(ai);
1183 				panic();
1184 			}
1185 			if (!(attr & TEE_MATTR_PW)) {
1186 				FMSG("Dirty %p",
1187 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1188 				pmem->flags |= PMEM_FLAG_DIRTY;
1189 				area_set_entry(area, pgidx, pa,
1190 					       get_area_mattr(area->flags));
1191 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1192 			}
1193 		}
1194 		/* Since permissions has been updated now it's OK */
1195 		break;
1196 	default:
1197 		/* Some fault we can't deal with */
1198 		if (abort_is_user_exception(ai))
1199 			return true;
1200 		abort_print_error(ai);
1201 		panic();
1202 	}
1203 	*handled = true;
1204 	return true;
1205 }
1206 
1207 #ifdef CFG_TEE_CORE_DEBUG
1208 static void stat_handle_fault(void)
1209 {
1210 	static size_t num_faults;
1211 	static size_t min_npages = SIZE_MAX;
1212 	static size_t total_min_npages = SIZE_MAX;
1213 
1214 	num_faults++;
1215 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1216 		DMSG("nfaults %zu npages %zu (min %zu)",
1217 		     num_faults, tee_pager_npages, min_npages);
1218 		min_npages = tee_pager_npages; /* reset */
1219 	}
1220 	if (tee_pager_npages < min_npages)
1221 		min_npages = tee_pager_npages;
1222 	if (tee_pager_npages < total_min_npages)
1223 		total_min_npages = tee_pager_npages;
1224 }
1225 #else
1226 static void stat_handle_fault(void)
1227 {
1228 }
1229 #endif
1230 
1231 bool tee_pager_handle_fault(struct abort_info *ai)
1232 {
1233 	struct tee_pager_area *area;
1234 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1235 	uint32_t exceptions;
1236 	bool ret;
1237 
1238 #ifdef TEE_PAGER_DEBUG_PRINT
1239 	abort_print(ai);
1240 #endif
1241 
1242 	/*
1243 	 * We're updating pages that can affect several active CPUs at a
1244 	 * time below. We end up here because a thread tries to access some
1245 	 * memory that isn't available. We have to be careful when making
1246 	 * that memory available as other threads may succeed in accessing
1247 	 * that address the moment after we've made it available.
1248 	 *
1249 	 * That means that we can't just map the memory and populate the
1250 	 * page, instead we use the aliased mapping to populate the page
1251 	 * and once everything is ready we map it.
1252 	 */
1253 	exceptions = pager_lock(ai);
1254 
1255 	stat_handle_fault();
1256 
1257 	/* check if the access is valid */
1258 	if (abort_is_user_exception(ai)) {
1259 		area = find_uta_area(ai->va);
1260 
1261 	} else {
1262 		area = find_area(&tee_pager_area_head, ai->va);
1263 		if (!area)
1264 			area = find_uta_area(ai->va);
1265 	}
1266 	if (!area || !area->pgt) {
1267 		ret = false;
1268 		goto out;
1269 	}
1270 
1271 	if (!tee_pager_unhide_page(area, area_va2idx(area, page_va))) {
1272 		struct tee_pager_pmem *pmem = NULL;
1273 		uint32_t attr;
1274 		paddr_t pa;
1275 
1276 		/*
1277 		 * The page wasn't hidden, but some other core may have
1278 		 * updated the table entry before we got here or we need
1279 		 * to make a read-only page read-write (dirty).
1280 		 */
1281 		if (pager_update_permissions(area, ai, &ret)) {
1282 			/*
1283 			 * Nothing more to do with the abort. The problem
1284 			 * could already have been dealt with from another
1285 			 * core or if ret is false the TA will be paniced.
1286 			 */
1287 			goto out;
1288 		}
1289 
1290 		pmem = tee_pager_get_page(area);
1291 		if (!pmem) {
1292 			abort_print(ai);
1293 			panic();
1294 		}
1295 
1296 		/* load page code & data */
1297 		tee_pager_load_page(area, page_va, pmem->va_alias);
1298 
1299 
1300 		pmem->area = area;
1301 		pmem->pgidx = area_va2idx(area, ai->va);
1302 		attr = get_area_mattr(area->flags);
1303 		/*
1304 		 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1305 		 * able to tell when they are updated and should be tagged
1306 		 * as dirty.
1307 		 */
1308 		if (area->type == PAGER_AREA_TYPE_RW)
1309 			attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1310 		pa = get_pmem_pa(pmem);
1311 
1312 		/*
1313 		 * We've updated the page using the aliased mapping and
1314 		 * some cache maintenence is now needed if it's an
1315 		 * executable page.
1316 		 *
1317 		 * Since the d-cache is a Physically-indexed,
1318 		 * physically-tagged (PIPT) cache we can clean either the
1319 		 * aliased address or the real virtual address. In this
1320 		 * case we choose the real virtual address.
1321 		 *
1322 		 * The i-cache can also be PIPT, but may be something else
1323 		 * too like VIPT. The current code requires the caches to
1324 		 * implement the IVIPT extension, that is:
1325 		 * "instruction cache maintenance is required only after
1326 		 * writing new data to a physical address that holds an
1327 		 * instruction."
1328 		 *
1329 		 * To portably invalidate the icache the page has to
1330 		 * be mapped at the final virtual address but not
1331 		 * executable.
1332 		 */
1333 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1334 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1335 					TEE_MATTR_PW | TEE_MATTR_UW;
1336 
1337 			/* Set a temporary read-only mapping */
1338 			area_set_entry(pmem->area, pmem->pgidx, pa,
1339 				       attr & ~mask);
1340 			tlbi_mva_allasid(page_va);
1341 
1342 			/*
1343 			 * Doing these operations to LoUIS (Level of
1344 			 * unification, Inner Shareable) would be enough
1345 			 */
1346 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1347 				       SMALL_PAGE_SIZE);
1348 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1349 				       SMALL_PAGE_SIZE);
1350 
1351 			/* Set the final mapping */
1352 			area_set_entry(area, pmem->pgidx, pa, attr);
1353 			tlbi_mva_allasid(page_va);
1354 		} else {
1355 			area_set_entry(area, pmem->pgidx, pa, attr);
1356 			/*
1357 			 * No need to flush TLB for this entry, it was
1358 			 * invalid. We should use a barrier though, to make
1359 			 * sure that the change is visible.
1360 			 */
1361 			dsb_ishst();
1362 		}
1363 		pgt_inc_used_entries(area->pgt);
1364 
1365 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1366 
1367 	}
1368 
1369 	tee_pager_hide_pages();
1370 	ret = true;
1371 out:
1372 	pager_unlock(exceptions);
1373 	return ret;
1374 }
1375 
1376 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1377 {
1378 	size_t n;
1379 
1380 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1381 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1382 
1383 	/* setup memory */
1384 	for (n = 0; n < npages; n++) {
1385 		struct core_mmu_table_info *ti;
1386 		struct tee_pager_pmem *pmem;
1387 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1388 		unsigned int pgidx;
1389 		paddr_t pa;
1390 		uint32_t attr;
1391 
1392 		ti = find_table_info(va);
1393 		pgidx = core_mmu_va2idx(ti, va);
1394 		/*
1395 		 * Note that we can only support adding pages in the
1396 		 * valid range of this table info, currently not a problem.
1397 		 */
1398 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1399 
1400 		/* Ignore unmapped pages/blocks */
1401 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1402 			continue;
1403 
1404 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1405 		if (!pmem)
1406 			panic("out of mem");
1407 
1408 		pmem->va_alias = pager_add_alias_page(pa);
1409 
1410 		if (unmap) {
1411 			pmem->area = NULL;
1412 			pmem->pgidx = INVALID_PGIDX;
1413 			core_mmu_set_entry(ti, pgidx, 0, 0);
1414 			pgt_dec_used_entries(find_core_pgt(va));
1415 		} else {
1416 			/*
1417 			 * The page is still mapped, let's assign the area
1418 			 * and update the protection bits accordingly.
1419 			 */
1420 			pmem->area = find_area(&tee_pager_area_head, va);
1421 			assert(pmem->area->pgt == find_core_pgt(va));
1422 			pmem->pgidx = pgidx;
1423 			assert(pa == get_pmem_pa(pmem));
1424 			area_set_entry(pmem->area, pgidx, pa,
1425 				       get_area_mattr(pmem->area->flags));
1426 		}
1427 
1428 		tee_pager_npages++;
1429 		incr_npages_all();
1430 		set_npages();
1431 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1432 	}
1433 
1434 	/*
1435 	 * As this is done at inits, invalidate all TLBs once instead of
1436 	 * targeting only the modified entries.
1437 	 */
1438 	tlbi_all();
1439 }
1440 
1441 #ifdef CFG_PAGED_USER_TA
1442 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1443 {
1444 	struct pgt *p = pgt;
1445 
1446 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1447 		p = SLIST_NEXT(p, link);
1448 	return p;
1449 }
1450 
1451 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1452 {
1453 	struct tee_pager_area *area;
1454 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1455 
1456 	TAILQ_FOREACH(area, utc->areas, link) {
1457 		if (!area->pgt)
1458 			area->pgt = find_pgt(pgt, area->base);
1459 		else
1460 			assert(area->pgt == find_pgt(pgt, area->base));
1461 		if (!area->pgt)
1462 			panic();
1463 	}
1464 }
1465 
1466 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1467 {
1468 	assert(pmem->area && pmem->area->pgt);
1469 
1470 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1471 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1472 	tee_pager_save_page(pmem);
1473 	assert(pmem->area->pgt->num_used_entries);
1474 	pmem->area->pgt->num_used_entries--;
1475 	pmem->pgidx = INVALID_PGIDX;
1476 	pmem->area = NULL;
1477 	pmem->flags = 0;
1478 }
1479 
1480 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1481 {
1482 	struct tee_pager_pmem *pmem;
1483 	struct tee_pager_area *area;
1484 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1485 
1486 	if (!pgt->num_used_entries)
1487 		goto out;
1488 
1489 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1490 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1491 			continue;
1492 		if (pmem->area->pgt == pgt)
1493 			pager_save_and_release_entry(pmem);
1494 	}
1495 	assert(!pgt->num_used_entries);
1496 
1497 out:
1498 	if (is_user_ta_ctx(pgt->ctx)) {
1499 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1500 			if (area->pgt == pgt)
1501 				area->pgt = NULL;
1502 		}
1503 	}
1504 
1505 	pager_unlock(exceptions);
1506 }
1507 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1508 #endif /*CFG_PAGED_USER_TA*/
1509 
1510 void tee_pager_release_phys(void *addr, size_t size)
1511 {
1512 	bool unmaped = false;
1513 	vaddr_t va = (vaddr_t)addr;
1514 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1515 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1516 	struct tee_pager_area *area;
1517 	uint32_t exceptions;
1518 
1519 	if (end <= begin)
1520 		return;
1521 
1522 	exceptions = pager_lock_check_stack(128);
1523 
1524 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1525 		area = find_area(&tee_pager_area_head, va);
1526 		if (!area)
1527 			panic();
1528 		unmaped |= tee_pager_release_one_phys(area, va);
1529 	}
1530 
1531 	if (unmaped)
1532 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1533 
1534 	pager_unlock(exceptions);
1535 }
1536 KEEP_PAGER(tee_pager_release_phys);
1537 
1538 void *tee_pager_alloc(size_t size)
1539 {
1540 	tee_mm_entry_t *mm = NULL;
1541 	uint8_t *smem = NULL;
1542 	size_t num_pages = 0;
1543 	struct fobj *fobj = NULL;
1544 
1545 	if (!size)
1546 		return NULL;
1547 
1548 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1549 	if (!mm)
1550 		return NULL;
1551 
1552 	smem = (uint8_t *)tee_mm_get_smem(mm);
1553 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1554 	fobj = fobj_locked_paged_alloc(num_pages);
1555 	if (!fobj) {
1556 		tee_mm_free(mm);
1557 		return NULL;
1558 	}
1559 
1560 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1561 	fobj_put(fobj);
1562 
1563 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1564 
1565 	return smem;
1566 }
1567