xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 7513149ec5777eeb171b2705e6117667dea4fc54)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <kernel/tee_ta_manager.h>
17 #include <kernel/thread.h>
18 #include <kernel/tlb_helpers.h>
19 #include <mm/core_memprot.h>
20 #include <mm/fobj.h>
21 #include <mm/tee_mm.h>
22 #include <mm/tee_pager.h>
23 #include <stdlib.h>
24 #include <sys/queue.h>
25 #include <tee_api_defines.h>
26 #include <trace.h>
27 #include <types_ext.h>
28 #include <utee_defines.h>
29 #include <util.h>
30 
31 enum area_type {
32 	AREA_TYPE_RO,
33 	AREA_TYPE_RW,
34 	AREA_TYPE_LOCK,
35 };
36 
37 struct tee_pager_area {
38 	struct fobj *fobj;
39 	enum area_type type;
40 	uint32_t flags;
41 	vaddr_t base;
42 	size_t size;
43 	struct pgt *pgt;
44 	TAILQ_ENTRY(tee_pager_area) link;
45 };
46 
47 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
48 
49 static struct tee_pager_area_head tee_pager_area_head =
50 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
51 
52 #define INVALID_PGIDX	UINT_MAX
53 
54 /*
55  * struct tee_pager_pmem - Represents a physical page used for paging.
56  *
57  * @pgidx	an index of the entry in area->ti.
58  * @va_alias	Virtual address where the physical page always is aliased.
59  *		Used during remapping of the page when the content need to
60  *		be updated before it's available at the new location.
61  * @area	a pointer to the pager area
62  */
63 struct tee_pager_pmem {
64 	unsigned pgidx;
65 	void *va_alias;
66 	struct tee_pager_area *area;
67 	TAILQ_ENTRY(tee_pager_pmem) link;
68 };
69 
70 /* The list of physical pages. The first page in the list is the oldest */
71 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
72 
73 static struct tee_pager_pmem_head tee_pager_pmem_head =
74 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
75 
76 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
77 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
78 
79 /* number of pages hidden */
80 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
81 
82 /* Number of registered physical pages, used hiding pages. */
83 static size_t tee_pager_npages;
84 
85 #ifdef CFG_WITH_STATS
86 static struct tee_pager_stats pager_stats;
87 
88 static inline void incr_ro_hits(void)
89 {
90 	pager_stats.ro_hits++;
91 }
92 
93 static inline void incr_rw_hits(void)
94 {
95 	pager_stats.rw_hits++;
96 }
97 
98 static inline void incr_hidden_hits(void)
99 {
100 	pager_stats.hidden_hits++;
101 }
102 
103 static inline void incr_zi_released(void)
104 {
105 	pager_stats.zi_released++;
106 }
107 
108 static inline void incr_npages_all(void)
109 {
110 	pager_stats.npages_all++;
111 }
112 
113 static inline void set_npages(void)
114 {
115 	pager_stats.npages = tee_pager_npages;
116 }
117 
118 void tee_pager_get_stats(struct tee_pager_stats *stats)
119 {
120 	*stats = pager_stats;
121 
122 	pager_stats.hidden_hits = 0;
123 	pager_stats.ro_hits = 0;
124 	pager_stats.rw_hits = 0;
125 	pager_stats.zi_released = 0;
126 }
127 
128 #else /* CFG_WITH_STATS */
129 static inline void incr_ro_hits(void) { }
130 static inline void incr_rw_hits(void) { }
131 static inline void incr_hidden_hits(void) { }
132 static inline void incr_zi_released(void) { }
133 static inline void incr_npages_all(void) { }
134 static inline void set_npages(void) { }
135 
136 void tee_pager_get_stats(struct tee_pager_stats *stats)
137 {
138 	memset(stats, 0, sizeof(struct tee_pager_stats));
139 }
140 #endif /* CFG_WITH_STATS */
141 
142 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
143 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
144 #define TBL_SHIFT	SMALL_PAGE_SHIFT
145 
146 #define EFFECTIVE_VA_SIZE \
147 	(ROUNDUP(TEE_RAM_VA_START + TEE_RAM_VA_SIZE, \
148 		 CORE_MMU_PGDIR_SIZE) - \
149 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
150 
151 static struct pager_table {
152 	struct pgt pgt;
153 	struct core_mmu_table_info tbl_info;
154 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
155 
156 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
157 
158 /* Defines the range of the alias area */
159 static tee_mm_entry_t *pager_alias_area;
160 /*
161  * Physical pages are added in a stack like fashion to the alias area,
162  * @pager_alias_next_free gives the address of next free entry if
163  * @pager_alias_next_free is != 0
164  */
165 static uintptr_t pager_alias_next_free;
166 
167 #ifdef CFG_TEE_CORE_DEBUG
168 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
169 
170 static uint32_t pager_lock_dldetect(const char *func, const int line,
171 				    struct abort_info *ai)
172 {
173 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
174 	unsigned int retries = 0;
175 	unsigned int reminder = 0;
176 
177 	while (!cpu_spin_trylock(&pager_spinlock)) {
178 		retries++;
179 		if (!retries) {
180 			/* wrapped, time to report */
181 			trace_printf(func, line, TRACE_ERROR, true,
182 				     "possible spinlock deadlock reminder %u",
183 				     reminder);
184 			if (reminder < UINT_MAX)
185 				reminder++;
186 			if (ai)
187 				abort_print(ai);
188 		}
189 	}
190 
191 	return exceptions;
192 }
193 #else
194 static uint32_t pager_lock(struct abort_info __unused *ai)
195 {
196 	return cpu_spin_lock_xsave(&pager_spinlock);
197 }
198 #endif
199 
200 static uint32_t pager_lock_check_stack(size_t stack_size)
201 {
202 	if (stack_size) {
203 		int8_t buf[stack_size];
204 		size_t n;
205 
206 		/*
207 		 * Make sure to touch all pages of the stack that we expect
208 		 * to use with this lock held. We need to take eventual
209 		 * page faults before the lock is taken or we'll deadlock
210 		 * the pager. The pages that are populated in this way will
211 		 * eventually be released at certain save transitions of
212 		 * the thread.
213 		 */
214 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
215 			io_write8((vaddr_t)buf + n, 1);
216 		io_write8((vaddr_t)buf + stack_size - 1, 1);
217 	}
218 
219 	return pager_lock(NULL);
220 }
221 
222 static void pager_unlock(uint32_t exceptions)
223 {
224 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
225 }
226 
227 void *tee_pager_phys_to_virt(paddr_t pa)
228 {
229 	struct core_mmu_table_info ti;
230 	unsigned idx;
231 	uint32_t a;
232 	paddr_t p;
233 	vaddr_t v;
234 	size_t n;
235 
236 	/*
237 	 * Most addresses are mapped lineary, try that first if possible.
238 	 */
239 	if (!tee_pager_get_table_info(pa, &ti))
240 		return NULL; /* impossible pa */
241 	idx = core_mmu_va2idx(&ti, pa);
242 	core_mmu_get_entry(&ti, idx, &p, &a);
243 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
244 		return (void *)core_mmu_idx2va(&ti, idx);
245 
246 	n = 0;
247 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
248 	while (true) {
249 		while (idx < TBL_NUM_ENTRIES) {
250 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
251 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
252 				return NULL;
253 
254 			core_mmu_get_entry(&pager_tables[n].tbl_info,
255 					   idx, &p, &a);
256 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
257 				return (void *)v;
258 			idx++;
259 		}
260 
261 		n++;
262 		if (n >= ARRAY_SIZE(pager_tables))
263 			return NULL;
264 		idx = 0;
265 	}
266 
267 	return NULL;
268 }
269 
270 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
271 {
272 	size_t n;
273 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
274 
275 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
276 	    CORE_MMU_PGDIR_SHIFT;
277 	if (n >= ARRAY_SIZE(pager_tables))
278 		return NULL;
279 
280 	assert(va >= pager_tables[n].tbl_info.va_base &&
281 	       va <= (pager_tables[n].tbl_info.va_base | mask));
282 
283 	return pager_tables + n;
284 }
285 
286 static struct pager_table *find_pager_table(vaddr_t va)
287 {
288 	struct pager_table *pt = find_pager_table_may_fail(va);
289 
290 	assert(pt);
291 	return pt;
292 }
293 
294 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
295 {
296 	struct pager_table *pt = find_pager_table_may_fail(va);
297 
298 	if (!pt)
299 		return false;
300 
301 	*ti = pt->tbl_info;
302 	return true;
303 }
304 
305 static struct core_mmu_table_info *find_table_info(vaddr_t va)
306 {
307 	return &find_pager_table(va)->tbl_info;
308 }
309 
310 static struct pgt *find_core_pgt(vaddr_t va)
311 {
312 	return &find_pager_table(va)->pgt;
313 }
314 
315 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
316 {
317 	struct pager_table *pt;
318 	unsigned idx;
319 	vaddr_t smem = tee_mm_get_smem(mm);
320 	size_t nbytes = tee_mm_get_bytes(mm);
321 	vaddr_t v;
322 
323 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
324 
325 	assert(!pager_alias_area);
326 	pager_alias_area = mm;
327 	pager_alias_next_free = smem;
328 
329 	/* Clear all mapping in the alias area */
330 	pt = find_pager_table(smem);
331 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
332 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
333 		while (idx < TBL_NUM_ENTRIES) {
334 			v = core_mmu_idx2va(&pt->tbl_info, idx);
335 			if (v >= (smem + nbytes))
336 				goto out;
337 
338 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
339 			idx++;
340 		}
341 
342 		pt++;
343 		idx = 0;
344 	}
345 
346 out:
347 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
348 }
349 
350 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
351 {
352 	size_t n;
353 	paddr_t pa;
354 	size_t usage = 0;
355 
356 	for (n = 0; n < ti->num_entries; n++) {
357 		core_mmu_get_entry(ti, n, &pa, NULL);
358 		if (pa)
359 			usage++;
360 	}
361 	return usage;
362 }
363 
364 static void area_get_entry(struct tee_pager_area *area, size_t idx,
365 			   paddr_t *pa, uint32_t *attr)
366 {
367 	assert(area->pgt);
368 	assert(idx < TBL_NUM_ENTRIES);
369 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
370 }
371 
372 static void area_set_entry(struct tee_pager_area *area, size_t idx,
373 			   paddr_t pa, uint32_t attr)
374 {
375 	assert(area->pgt);
376 	assert(idx < TBL_NUM_ENTRIES);
377 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
378 }
379 
380 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
381 {
382 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
383 }
384 
385 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
386 {
387 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
388 }
389 
390 void tee_pager_early_init(void)
391 {
392 	size_t n;
393 
394 	/*
395 	 * Note that this depends on add_pager_vaspace() adding vaspace
396 	 * after end of memory.
397 	 */
398 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
399 		if (!core_mmu_find_table(NULL, TEE_RAM_VA_START +
400 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
401 					 &pager_tables[n].tbl_info))
402 			panic("can't find mmu tables");
403 
404 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
405 			panic("Unsupported page size in translation table");
406 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
407 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
408 
409 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
410 		pgt_set_used_entries(&pager_tables[n].pgt,
411 				tbl_usage_count(&pager_tables[n].tbl_info));
412 	}
413 }
414 
415 static void *pager_add_alias_page(paddr_t pa)
416 {
417 	unsigned idx;
418 	struct core_mmu_table_info *ti;
419 	/* Alias pages mapped without write permission: runtime will care */
420 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
421 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
422 			TEE_MATTR_SECURE | TEE_MATTR_PR;
423 
424 	DMSG("0x%" PRIxPA, pa);
425 
426 	ti = find_table_info(pager_alias_next_free);
427 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
428 	core_mmu_set_entry(ti, idx, pa, attr);
429 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
430 	pager_alias_next_free += SMALL_PAGE_SIZE;
431 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
432 				      tee_mm_get_bytes(pager_alias_area)))
433 		pager_alias_next_free = 0;
434 	return (void *)core_mmu_idx2va(ti, idx);
435 }
436 
437 static struct tee_pager_area *alloc_area(struct pgt *pgt,
438 					 vaddr_t base, size_t size,
439 					 uint32_t flags, void *store,
440 					 void *hashes)
441 {
442 	struct tee_pager_area *area = calloc(1, sizeof(*area));
443 	size_t num_pages = size / SMALL_PAGE_SIZE;
444 
445 	if (!area)
446 		return NULL;
447 
448 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
449 		if (flags & TEE_MATTR_LOCKED) {
450 			area->type = AREA_TYPE_LOCK;
451 			area->fobj = fobj_locked_paged_alloc(num_pages);
452 			goto out;
453 		}
454 		area->fobj = fobj_rw_paged_alloc(num_pages);
455 		area->type = AREA_TYPE_RW;
456 	} else {
457 		area->fobj = fobj_ro_paged_alloc(num_pages, hashes, store);
458 		area->type = AREA_TYPE_RO;
459 	}
460 
461 out:
462 	if (!area->fobj) {
463 		free(area);
464 		return NULL;
465 	}
466 
467 	area->pgt = pgt;
468 	area->base = base;
469 	area->size = size;
470 	area->flags = flags;
471 	return area;
472 }
473 
474 static void area_insert_tail(struct tee_pager_area *area)
475 {
476 	uint32_t exceptions = pager_lock_check_stack(8);
477 
478 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
479 
480 	pager_unlock(exceptions);
481 }
482 KEEP_PAGER(area_insert_tail);
483 
484 void tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
485 			     void *store, void *hashes)
486 {
487 	struct tee_pager_area *area;
488 	vaddr_t b = base;
489 	size_t s = size;
490 	size_t s2;
491 
492 
493 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
494 		base, base + size, flags, store, hashes);
495 
496 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
497 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
498 		panic();
499 	}
500 
501 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
502 		panic("write pages cannot provide store or hashes");
503 
504 	if ((flags & TEE_MATTR_PW) && (store || hashes))
505 		panic("non-write pages must provide store and hashes");
506 
507 	while (s) {
508 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
509 		area = alloc_area(find_core_pgt(b), b, s2, flags,
510 				  (uint8_t *)store + b - base,
511 				  (uint8_t *)hashes + (b - base) /
512 							SMALL_PAGE_SIZE *
513 							TEE_SHA256_HASH_SIZE);
514 		if (!area)
515 			panic("alloc_area");
516 		area_insert_tail(area);
517 		b += s2;
518 		s -= s2;
519 	}
520 }
521 
522 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
523 					vaddr_t va)
524 {
525 	struct tee_pager_area *area;
526 
527 	if (!areas)
528 		return NULL;
529 
530 	TAILQ_FOREACH(area, areas, link) {
531 		if (core_is_buffer_inside(va, 1, area->base, area->size))
532 			return area;
533 	}
534 	return NULL;
535 }
536 
537 #ifdef CFG_PAGED_USER_TA
538 static struct tee_pager_area *find_uta_area(vaddr_t va)
539 {
540 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
541 
542 	if (!is_user_ta_ctx(ctx))
543 		return NULL;
544 	return find_area(to_user_ta_ctx(ctx)->areas, va);
545 }
546 #else
547 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
548 {
549 	return NULL;
550 }
551 #endif /*CFG_PAGED_USER_TA*/
552 
553 
554 static uint32_t get_area_mattr(uint32_t area_flags)
555 {
556 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
557 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
558 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
559 
560 	return attr;
561 }
562 
563 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
564 {
565 	struct core_mmu_table_info *ti;
566 	paddr_t pa;
567 	unsigned idx;
568 
569 	ti = find_table_info((vaddr_t)pmem->va_alias);
570 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
571 	core_mmu_get_entry(ti, idx, &pa, NULL);
572 	return pa;
573 }
574 
575 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
576 			void *va_alias)
577 {
578 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
579 	struct core_mmu_table_info *ti;
580 	uint32_t attr_alias;
581 	paddr_t pa_alias;
582 	unsigned int idx_alias;
583 
584 	/* Insure we are allowed to write to aliased virtual page */
585 	ti = find_table_info((vaddr_t)va_alias);
586 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
587 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
588 	if (!(attr_alias & TEE_MATTR_PW)) {
589 		attr_alias |= TEE_MATTR_PW;
590 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
591 		tlbi_mva_allasid((vaddr_t)va_alias);
592 	}
593 
594 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
595 	if (fobj_load_page(area->fobj, idx, va_alias)) {
596 		EMSG("PH 0x%" PRIxVA " failed", page_va);
597 		panic();
598 	}
599 	switch (area->type) {
600 	case AREA_TYPE_RO:
601 		incr_ro_hits();
602 		/* Forbid write to aliases for read-only (maybe exec) pages */
603 		attr_alias &= ~TEE_MATTR_PW;
604 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
605 		tlbi_mva_allasid((vaddr_t)va_alias);
606 		break;
607 	case AREA_TYPE_RW:
608 		incr_rw_hits();
609 		break;
610 	case AREA_TYPE_LOCK:
611 		break;
612 	default:
613 		panic();
614 	}
615 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
616 }
617 
618 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
619 {
620 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
621 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
622 
623 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
624 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
625 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
626 
627 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
628 		asan_tag_access(pmem->va_alias,
629 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
630 		if (fobj_save_page(pmem->area->fobj, idx, pmem->va_alias))
631 			panic("fobj_save_page");
632 		asan_tag_no_access(pmem->va_alias,
633 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
634 	}
635 }
636 
637 #ifdef CFG_PAGED_USER_TA
638 static void free_area(struct tee_pager_area *area)
639 {
640 	fobj_put(area->fobj);
641 	free(area);
642 }
643 
644 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
645 			       size_t size)
646 {
647 	struct tee_pager_area *area;
648 	uint32_t flags;
649 	vaddr_t b = base;
650 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
651 
652 	if (!utc->areas) {
653 		utc->areas = malloc(sizeof(*utc->areas));
654 		if (!utc->areas)
655 			return false;
656 		TAILQ_INIT(utc->areas);
657 	}
658 
659 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
660 
661 	while (s) {
662 		size_t s2;
663 
664 		if (find_area(utc->areas, b))
665 			return false;
666 
667 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
668 
669 		/* Table info will be set when the context is activated. */
670 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
671 		if (!area)
672 			return false;
673 		TAILQ_INSERT_TAIL(utc->areas, area, link);
674 		b += s2;
675 		s -= s2;
676 	}
677 
678 	return true;
679 }
680 
681 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
682 {
683 	struct thread_specific_data *tsd = thread_get_tsd();
684 	struct tee_pager_area *area;
685 	struct core_mmu_table_info dir_info = { NULL };
686 
687 	if (&utc->ctx != tsd->ctx) {
688 		/*
689 		 * Changes are to an utc that isn't active. Just add the
690 		 * areas page tables will be dealt with later.
691 		 */
692 		return pager_add_uta_area(utc, base, size);
693 	}
694 
695 	/*
696 	 * Assign page tables before adding areas to be able to tell which
697 	 * are newly added and should be removed in case of failure.
698 	 */
699 	tee_pager_assign_uta_tables(utc);
700 	if (!pager_add_uta_area(utc, base, size)) {
701 		struct tee_pager_area *next_a;
702 
703 		/* Remove all added areas */
704 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
705 			if (!area->pgt) {
706 				TAILQ_REMOVE(utc->areas, area, link);
707 				free_area(area);
708 			}
709 		}
710 		return false;
711 	}
712 
713 	/*
714 	 * Assign page tables to the new areas and make sure that the page
715 	 * tables are registered in the upper table.
716 	 */
717 	tee_pager_assign_uta_tables(utc);
718 	core_mmu_get_user_pgdir(&dir_info);
719 	TAILQ_FOREACH(area, utc->areas, link) {
720 		paddr_t pa;
721 		size_t idx;
722 		uint32_t attr;
723 
724 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
725 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
726 
727 		/*
728 		 * Check if the page table already is used, if it is, it's
729 		 * already registered.
730 		 */
731 		if (area->pgt->num_used_entries) {
732 			assert(attr & TEE_MATTR_TABLE);
733 			assert(pa == virt_to_phys(area->pgt->tbl));
734 			continue;
735 		}
736 
737 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
738 		pa = virt_to_phys(area->pgt->tbl);
739 		assert(pa);
740 		/*
741 		 * Note that the update of the table entry is guaranteed to
742 		 * be atomic.
743 		 */
744 		core_mmu_set_entry(&dir_info, idx, pa, attr);
745 	}
746 
747 	return true;
748 }
749 
750 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
751 				   struct pgt *pgt)
752 {
753 	assert(pgt);
754 	ti->table = pgt->tbl;
755 	ti->va_base = pgt->vabase;
756 	ti->level = TBL_LEVEL;
757 	ti->shift = TBL_SHIFT;
758 	ti->num_entries = TBL_NUM_ENTRIES;
759 }
760 
761 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
762 			   vaddr_t new_base)
763 {
764 	uint32_t exceptions = pager_lock_check_stack(64);
765 
766 	/*
767 	 * If there's no pgt assigned to the old area there's no pages to
768 	 * deal with either, just update with a new pgt and base.
769 	 */
770 	if (area->pgt) {
771 		struct core_mmu_table_info old_ti;
772 		struct core_mmu_table_info new_ti;
773 		struct tee_pager_pmem *pmem;
774 
775 		init_tbl_info_from_pgt(&old_ti, area->pgt);
776 		init_tbl_info_from_pgt(&new_ti, new_pgt);
777 
778 
779 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
780 			vaddr_t va;
781 			paddr_t pa;
782 			uint32_t attr;
783 
784 			if (pmem->area != area)
785 				continue;
786 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
787 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
788 
789 			assert(pa == get_pmem_pa(pmem));
790 			assert(attr);
791 			assert(area->pgt->num_used_entries);
792 			area->pgt->num_used_entries--;
793 
794 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
795 			va = va - area->base + new_base;
796 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
797 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
798 			new_pgt->num_used_entries++;
799 		}
800 	}
801 
802 	area->pgt = new_pgt;
803 	area->base = new_base;
804 	pager_unlock(exceptions);
805 }
806 KEEP_PAGER(transpose_area);
807 
808 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
809 				   vaddr_t src_base,
810 				   struct user_ta_ctx *dst_utc,
811 				   vaddr_t dst_base, struct pgt **dst_pgt,
812 				   size_t size)
813 {
814 	struct tee_pager_area *area;
815 	struct tee_pager_area *next_a;
816 
817 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
818 		vaddr_t new_area_base;
819 		size_t new_idx;
820 
821 		if (!core_is_buffer_inside(area->base, area->size,
822 					  src_base, size))
823 			continue;
824 
825 		TAILQ_REMOVE(src_utc->areas, area, link);
826 
827 		new_area_base = dst_base + (src_base - area->base);
828 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
829 			  CORE_MMU_PGDIR_SIZE;
830 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
831 		       dst_pgt[new_idx]->vabase);
832 		transpose_area(area, dst_pgt[new_idx], new_area_base);
833 
834 		/*
835 		 * Assert that this will not cause any conflicts in the new
836 		 * utc.  This should already be guaranteed, but a bug here
837 		 * could be tricky to find.
838 		 */
839 		assert(!find_area(dst_utc->areas, area->base));
840 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
841 	}
842 }
843 
844 static void rem_area(struct tee_pager_area_head *area_head,
845 		     struct tee_pager_area *area)
846 {
847 	struct tee_pager_pmem *pmem;
848 	uint32_t exceptions;
849 
850 	exceptions = pager_lock_check_stack(64);
851 
852 	TAILQ_REMOVE(area_head, area, link);
853 
854 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
855 		if (pmem->area == area) {
856 			area_set_entry(area, pmem->pgidx, 0, 0);
857 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
858 			pgt_dec_used_entries(area->pgt);
859 			pmem->area = NULL;
860 			pmem->pgidx = INVALID_PGIDX;
861 		}
862 	}
863 
864 	pager_unlock(exceptions);
865 	free_area(area);
866 }
867 KEEP_PAGER(rem_area);
868 
869 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
870 			      size_t size)
871 {
872 	struct tee_pager_area *area;
873 	struct tee_pager_area *next_a;
874 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
875 
876 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
877 		if (core_is_buffer_inside(area->base, area->size, base, s))
878 			rem_area(utc->areas, area);
879 	}
880 }
881 
882 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
883 {
884 	struct tee_pager_area *area;
885 
886 	if (!utc->areas)
887 		return;
888 
889 	while (true) {
890 		area = TAILQ_FIRST(utc->areas);
891 		if (!area)
892 			break;
893 		TAILQ_REMOVE(utc->areas, area, link);
894 		free_area(area);
895 	}
896 
897 	free(utc->areas);
898 }
899 
900 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
901 				 size_t size, uint32_t flags)
902 {
903 	bool ret;
904 	vaddr_t b = base;
905 	size_t s = size;
906 	size_t s2;
907 	struct tee_pager_area *area = find_area(utc->areas, b);
908 	uint32_t exceptions;
909 	struct tee_pager_pmem *pmem;
910 	paddr_t pa;
911 	uint32_t a;
912 	uint32_t f;
913 
914 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
915 	if (f & TEE_MATTR_UW)
916 		f |= TEE_MATTR_PW;
917 	f = get_area_mattr(f);
918 
919 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
920 
921 	while (s) {
922 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
923 		if (!area || area->base != b || area->size != s2) {
924 			ret = false;
925 			goto out;
926 		}
927 		b += s2;
928 		s -= s2;
929 
930 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
931 			if (pmem->area != area)
932 				continue;
933 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
934 			if (a & TEE_MATTR_VALID_BLOCK)
935 				assert(pa == get_pmem_pa(pmem));
936 			else
937 				pa = get_pmem_pa(pmem);
938 			if (a == f)
939 				continue;
940 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
941 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
942 			if (!(flags & TEE_MATTR_UW))
943 				tee_pager_save_page(pmem, a);
944 
945 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
946 			/*
947 			 * Make sure the table update is visible before
948 			 * continuing.
949 			 */
950 			dsb_ishst();
951 
952 			if (flags & TEE_MATTR_UX) {
953 				void *va = (void *)area_idx2va(pmem->area,
954 							       pmem->pgidx);
955 
956 				cache_op_inner(DCACHE_AREA_CLEAN, va,
957 						SMALL_PAGE_SIZE);
958 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
959 						SMALL_PAGE_SIZE);
960 			}
961 		}
962 
963 		area->flags = f;
964 		area = TAILQ_NEXT(area, link);
965 	}
966 
967 	ret = true;
968 out:
969 	pager_unlock(exceptions);
970 	return ret;
971 }
972 KEEP_PAGER(tee_pager_set_uta_area_attr);
973 #endif /*CFG_PAGED_USER_TA*/
974 
975 static bool tee_pager_unhide_page(vaddr_t page_va)
976 {
977 	struct tee_pager_pmem *pmem;
978 
979 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
980 		paddr_t pa;
981 		uint32_t attr;
982 
983 		if (pmem->pgidx == INVALID_PGIDX)
984 			continue;
985 
986 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
987 
988 		if (!(attr &
989 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
990 			continue;
991 
992 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
993 			uint32_t a = get_area_mattr(pmem->area->flags);
994 
995 			/* page is hidden, show and move to back */
996 			if (pa != get_pmem_pa(pmem))
997 				panic("unexpected pa");
998 
999 			/*
1000 			 * If it's not a dirty block, then it should be
1001 			 * read only.
1002 			 */
1003 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
1004 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1005 			else
1006 				FMSG("Unhide %#" PRIxVA, page_va);
1007 
1008 			if (page_va == 0x8000a000)
1009 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
1010 					page_va, a);
1011 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
1012 			/*
1013 			 * Note that TLB invalidation isn't needed since
1014 			 * there wasn't a valid mapping before. We should
1015 			 * use a barrier though, to make sure that the
1016 			 * change is visible.
1017 			 */
1018 			dsb_ishst();
1019 
1020 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1021 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1022 			incr_hidden_hits();
1023 			return true;
1024 		}
1025 	}
1026 
1027 	return false;
1028 }
1029 
1030 static void tee_pager_hide_pages(void)
1031 {
1032 	struct tee_pager_pmem *pmem;
1033 	size_t n = 0;
1034 
1035 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1036 		paddr_t pa;
1037 		uint32_t attr;
1038 		uint32_t a;
1039 
1040 		if (n >= TEE_PAGER_NHIDE)
1041 			break;
1042 		n++;
1043 
1044 		/* we cannot hide pages when pmem->area is not defined. */
1045 		if (!pmem->area)
1046 			continue;
1047 
1048 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1049 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1050 			continue;
1051 
1052 		assert(pa == get_pmem_pa(pmem));
1053 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1054 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1055 			FMSG("Hide %#" PRIxVA,
1056 			     area_idx2va(pmem->area, pmem->pgidx));
1057 		} else
1058 			a = TEE_MATTR_HIDDEN_BLOCK;
1059 
1060 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1061 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1062 	}
1063 }
1064 
1065 /*
1066  * Find mapped pmem, hide and move to pageble pmem.
1067  * Return false if page was not mapped, and true if page was mapped.
1068  */
1069 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1070 				       vaddr_t page_va)
1071 {
1072 	struct tee_pager_pmem *pmem;
1073 	unsigned pgidx;
1074 	paddr_t pa;
1075 	uint32_t attr;
1076 
1077 	pgidx = area_va2idx(area, page_va);
1078 	area_get_entry(area, pgidx, &pa, &attr);
1079 
1080 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1081 
1082 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1083 		if (pmem->area != area || pmem->pgidx != pgidx)
1084 			continue;
1085 
1086 		assert(pa == get_pmem_pa(pmem));
1087 		area_set_entry(area, pgidx, 0, 0);
1088 		pgt_dec_used_entries(area->pgt);
1089 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1090 		pmem->area = NULL;
1091 		pmem->pgidx = INVALID_PGIDX;
1092 		tee_pager_npages++;
1093 		set_npages();
1094 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1095 		incr_zi_released();
1096 		return true;
1097 	}
1098 
1099 	return false;
1100 }
1101 
1102 /* Finds the oldest page and unmats it from its old virtual address */
1103 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1104 {
1105 	struct tee_pager_pmem *pmem;
1106 
1107 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1108 	if (!pmem) {
1109 		EMSG("No pmem entries");
1110 		return NULL;
1111 	}
1112 	if (pmem->pgidx != INVALID_PGIDX) {
1113 		uint32_t a;
1114 
1115 		assert(pmem->area && pmem->area->pgt);
1116 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1117 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1118 		pgt_dec_used_entries(pmem->area->pgt);
1119 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1120 		tee_pager_save_page(pmem, a);
1121 	}
1122 
1123 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1124 	pmem->pgidx = INVALID_PGIDX;
1125 	pmem->area = NULL;
1126 	if (area->type == AREA_TYPE_LOCK) {
1127 		/* Move page to lock list */
1128 		if (tee_pager_npages <= 0)
1129 			panic("running out of page");
1130 		tee_pager_npages--;
1131 		set_npages();
1132 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1133 	} else {
1134 		/* move page to back */
1135 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1136 	}
1137 
1138 	return pmem;
1139 }
1140 
1141 static bool pager_update_permissions(struct tee_pager_area *area,
1142 			struct abort_info *ai, bool *handled)
1143 {
1144 	unsigned int pgidx = area_va2idx(area, ai->va);
1145 	uint32_t attr;
1146 	paddr_t pa;
1147 
1148 	*handled = false;
1149 
1150 	area_get_entry(area, pgidx, &pa, &attr);
1151 
1152 	/* Not mapped */
1153 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1154 		return false;
1155 
1156 	/* Not readable, should not happen */
1157 	if (abort_is_user_exception(ai)) {
1158 		if (!(attr & TEE_MATTR_UR))
1159 			return true;
1160 	} else {
1161 		if (!(attr & TEE_MATTR_PR)) {
1162 			abort_print_error(ai);
1163 			panic();
1164 		}
1165 	}
1166 
1167 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1168 	case CORE_MMU_FAULT_TRANSLATION:
1169 	case CORE_MMU_FAULT_READ_PERMISSION:
1170 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1171 			/* Check attempting to execute from an NOX page */
1172 			if (abort_is_user_exception(ai)) {
1173 				if (!(attr & TEE_MATTR_UX))
1174 					return true;
1175 			} else {
1176 				if (!(attr & TEE_MATTR_PX)) {
1177 					abort_print_error(ai);
1178 					panic();
1179 				}
1180 			}
1181 		}
1182 		/* Since the page is mapped now it's OK */
1183 		break;
1184 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1185 		/* Check attempting to write to an RO page */
1186 		if (abort_is_user_exception(ai)) {
1187 			if (!(area->flags & TEE_MATTR_UW))
1188 				return true;
1189 			if (!(attr & TEE_MATTR_UW)) {
1190 				FMSG("Dirty %p",
1191 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1192 				area_set_entry(area, pgidx, pa,
1193 					       get_area_mattr(area->flags));
1194 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1195 			}
1196 
1197 		} else {
1198 			if (!(area->flags & TEE_MATTR_PW)) {
1199 				abort_print_error(ai);
1200 				panic();
1201 			}
1202 			if (!(attr & TEE_MATTR_PW)) {
1203 				FMSG("Dirty %p",
1204 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1205 				area_set_entry(area, pgidx, pa,
1206 					       get_area_mattr(area->flags));
1207 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1208 			}
1209 		}
1210 		/* Since permissions has been updated now it's OK */
1211 		break;
1212 	default:
1213 		/* Some fault we can't deal with */
1214 		if (abort_is_user_exception(ai))
1215 			return true;
1216 		abort_print_error(ai);
1217 		panic();
1218 	}
1219 	*handled = true;
1220 	return true;
1221 }
1222 
1223 #ifdef CFG_TEE_CORE_DEBUG
1224 static void stat_handle_fault(void)
1225 {
1226 	static size_t num_faults;
1227 	static size_t min_npages = SIZE_MAX;
1228 	static size_t total_min_npages = SIZE_MAX;
1229 
1230 	num_faults++;
1231 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1232 		DMSG("nfaults %zu npages %zu (min %zu)",
1233 		     num_faults, tee_pager_npages, min_npages);
1234 		min_npages = tee_pager_npages; /* reset */
1235 	}
1236 	if (tee_pager_npages < min_npages)
1237 		min_npages = tee_pager_npages;
1238 	if (tee_pager_npages < total_min_npages)
1239 		total_min_npages = tee_pager_npages;
1240 }
1241 #else
1242 static void stat_handle_fault(void)
1243 {
1244 }
1245 #endif
1246 
1247 bool tee_pager_handle_fault(struct abort_info *ai)
1248 {
1249 	struct tee_pager_area *area;
1250 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1251 	uint32_t exceptions;
1252 	bool ret;
1253 
1254 #ifdef TEE_PAGER_DEBUG_PRINT
1255 	abort_print(ai);
1256 #endif
1257 
1258 	/*
1259 	 * We're updating pages that can affect several active CPUs at a
1260 	 * time below. We end up here because a thread tries to access some
1261 	 * memory that isn't available. We have to be careful when making
1262 	 * that memory available as other threads may succeed in accessing
1263 	 * that address the moment after we've made it available.
1264 	 *
1265 	 * That means that we can't just map the memory and populate the
1266 	 * page, instead we use the aliased mapping to populate the page
1267 	 * and once everything is ready we map it.
1268 	 */
1269 	exceptions = pager_lock(ai);
1270 
1271 	stat_handle_fault();
1272 
1273 	/* check if the access is valid */
1274 	if (abort_is_user_exception(ai)) {
1275 		area = find_uta_area(ai->va);
1276 
1277 	} else {
1278 		area = find_area(&tee_pager_area_head, ai->va);
1279 		if (!area)
1280 			area = find_uta_area(ai->va);
1281 	}
1282 	if (!area || !area->pgt) {
1283 		ret = false;
1284 		goto out;
1285 	}
1286 
1287 	if (!tee_pager_unhide_page(page_va)) {
1288 		struct tee_pager_pmem *pmem = NULL;
1289 		uint32_t attr;
1290 		paddr_t pa;
1291 
1292 		/*
1293 		 * The page wasn't hidden, but some other core may have
1294 		 * updated the table entry before we got here or we need
1295 		 * to make a read-only page read-write (dirty).
1296 		 */
1297 		if (pager_update_permissions(area, ai, &ret)) {
1298 			/*
1299 			 * Nothing more to do with the abort. The problem
1300 			 * could already have been dealt with from another
1301 			 * core or if ret is false the TA will be paniced.
1302 			 */
1303 			goto out;
1304 		}
1305 
1306 		pmem = tee_pager_get_page(area);
1307 		if (!pmem) {
1308 			abort_print(ai);
1309 			panic();
1310 		}
1311 
1312 		/* load page code & data */
1313 		tee_pager_load_page(area, page_va, pmem->va_alias);
1314 
1315 
1316 		pmem->area = area;
1317 		pmem->pgidx = area_va2idx(area, ai->va);
1318 		attr = get_area_mattr(area->flags) &
1319 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1320 		pa = get_pmem_pa(pmem);
1321 
1322 		/*
1323 		 * We've updated the page using the aliased mapping and
1324 		 * some cache maintenence is now needed if it's an
1325 		 * executable page.
1326 		 *
1327 		 * Since the d-cache is a Physically-indexed,
1328 		 * physically-tagged (PIPT) cache we can clean either the
1329 		 * aliased address or the real virtual address. In this
1330 		 * case we choose the real virtual address.
1331 		 *
1332 		 * The i-cache can also be PIPT, but may be something else
1333 		 * too like VIPT. The current code requires the caches to
1334 		 * implement the IVIPT extension, that is:
1335 		 * "instruction cache maintenance is required only after
1336 		 * writing new data to a physical address that holds an
1337 		 * instruction."
1338 		 *
1339 		 * To portably invalidate the icache the page has to
1340 		 * be mapped at the final virtual address but not
1341 		 * executable.
1342 		 */
1343 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1344 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1345 					TEE_MATTR_PW | TEE_MATTR_UW;
1346 
1347 			/* Set a temporary read-only mapping */
1348 			area_set_entry(pmem->area, pmem->pgidx, pa,
1349 				       attr & ~mask);
1350 			tlbi_mva_allasid(page_va);
1351 
1352 			/*
1353 			 * Doing these operations to LoUIS (Level of
1354 			 * unification, Inner Shareable) would be enough
1355 			 */
1356 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1357 				       SMALL_PAGE_SIZE);
1358 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1359 				       SMALL_PAGE_SIZE);
1360 
1361 			/* Set the final mapping */
1362 			area_set_entry(area, pmem->pgidx, pa, attr);
1363 			tlbi_mva_allasid(page_va);
1364 		} else {
1365 			area_set_entry(area, pmem->pgidx, pa, attr);
1366 			/*
1367 			 * No need to flush TLB for this entry, it was
1368 			 * invalid. We should use a barrier though, to make
1369 			 * sure that the change is visible.
1370 			 */
1371 			dsb_ishst();
1372 		}
1373 		pgt_inc_used_entries(area->pgt);
1374 
1375 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1376 
1377 	}
1378 
1379 	tee_pager_hide_pages();
1380 	ret = true;
1381 out:
1382 	pager_unlock(exceptions);
1383 	return ret;
1384 }
1385 
1386 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1387 {
1388 	size_t n;
1389 
1390 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1391 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1392 
1393 	/* setup memory */
1394 	for (n = 0; n < npages; n++) {
1395 		struct core_mmu_table_info *ti;
1396 		struct tee_pager_pmem *pmem;
1397 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1398 		unsigned int pgidx;
1399 		paddr_t pa;
1400 		uint32_t attr;
1401 
1402 		ti = find_table_info(va);
1403 		pgidx = core_mmu_va2idx(ti, va);
1404 		/*
1405 		 * Note that we can only support adding pages in the
1406 		 * valid range of this table info, currently not a problem.
1407 		 */
1408 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1409 
1410 		/* Ignore unmapped pages/blocks */
1411 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1412 			continue;
1413 
1414 		pmem = malloc(sizeof(struct tee_pager_pmem));
1415 		if (!pmem)
1416 			panic("out of mem");
1417 
1418 		pmem->va_alias = pager_add_alias_page(pa);
1419 
1420 		if (unmap) {
1421 			pmem->area = NULL;
1422 			pmem->pgidx = INVALID_PGIDX;
1423 			core_mmu_set_entry(ti, pgidx, 0, 0);
1424 			pgt_dec_used_entries(find_core_pgt(va));
1425 		} else {
1426 			/*
1427 			 * The page is still mapped, let's assign the area
1428 			 * and update the protection bits accordingly.
1429 			 */
1430 			pmem->area = find_area(&tee_pager_area_head, va);
1431 			assert(pmem->area->pgt == find_core_pgt(va));
1432 			pmem->pgidx = pgidx;
1433 			assert(pa == get_pmem_pa(pmem));
1434 			area_set_entry(pmem->area, pgidx, pa,
1435 				       get_area_mattr(pmem->area->flags));
1436 		}
1437 
1438 		tee_pager_npages++;
1439 		incr_npages_all();
1440 		set_npages();
1441 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1442 	}
1443 
1444 	/*
1445 	 * As this is done at inits, invalidate all TLBs once instead of
1446 	 * targeting only the modified entries.
1447 	 */
1448 	tlbi_all();
1449 }
1450 
1451 #ifdef CFG_PAGED_USER_TA
1452 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1453 {
1454 	struct pgt *p = pgt;
1455 
1456 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1457 		p = SLIST_NEXT(p, link);
1458 	return p;
1459 }
1460 
1461 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1462 {
1463 	struct tee_pager_area *area;
1464 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1465 
1466 	TAILQ_FOREACH(area, utc->areas, link) {
1467 		if (!area->pgt)
1468 			area->pgt = find_pgt(pgt, area->base);
1469 		else
1470 			assert(area->pgt == find_pgt(pgt, area->base));
1471 		if (!area->pgt)
1472 			panic();
1473 	}
1474 }
1475 
1476 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1477 {
1478 	uint32_t attr;
1479 
1480 	assert(pmem->area && pmem->area->pgt);
1481 
1482 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1483 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1484 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1485 	tee_pager_save_page(pmem, attr);
1486 	assert(pmem->area->pgt->num_used_entries);
1487 	pmem->area->pgt->num_used_entries--;
1488 	pmem->pgidx = INVALID_PGIDX;
1489 	pmem->area = NULL;
1490 }
1491 
1492 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1493 {
1494 	struct tee_pager_pmem *pmem;
1495 	struct tee_pager_area *area;
1496 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1497 
1498 	if (!pgt->num_used_entries)
1499 		goto out;
1500 
1501 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1502 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1503 			continue;
1504 		if (pmem->area->pgt == pgt)
1505 			pager_save_and_release_entry(pmem);
1506 	}
1507 	assert(!pgt->num_used_entries);
1508 
1509 out:
1510 	if (is_user_ta_ctx(pgt->ctx)) {
1511 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1512 			if (area->pgt == pgt)
1513 				area->pgt = NULL;
1514 		}
1515 	}
1516 
1517 	pager_unlock(exceptions);
1518 }
1519 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1520 #endif /*CFG_PAGED_USER_TA*/
1521 
1522 void tee_pager_release_phys(void *addr, size_t size)
1523 {
1524 	bool unmaped = false;
1525 	vaddr_t va = (vaddr_t)addr;
1526 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1527 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1528 	struct tee_pager_area *area;
1529 	uint32_t exceptions;
1530 
1531 	if (end <= begin)
1532 		return;
1533 
1534 	exceptions = pager_lock_check_stack(128);
1535 
1536 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1537 		area = find_area(&tee_pager_area_head, va);
1538 		if (!area)
1539 			panic();
1540 		unmaped |= tee_pager_release_one_phys(area, va);
1541 	}
1542 
1543 	if (unmaped)
1544 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1545 
1546 	pager_unlock(exceptions);
1547 }
1548 KEEP_PAGER(tee_pager_release_phys);
1549 
1550 void *tee_pager_alloc(size_t size)
1551 {
1552 	tee_mm_entry_t *mm;
1553 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | TEE_MATTR_LOCKED;
1554 	uint8_t *smem;
1555 	size_t bytes;
1556 
1557 	if (!size)
1558 		return NULL;
1559 
1560 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1561 	if (!mm)
1562 		return NULL;
1563 
1564 	bytes = tee_mm_get_bytes(mm);
1565 	smem = (uint8_t *)tee_mm_get_smem(mm);
1566 	tee_pager_add_core_area((vaddr_t)smem, bytes, f, NULL, NULL);
1567 	asan_tag_access(smem, smem + bytes);
1568 
1569 	return smem;
1570 }
1571