xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 2bb1139b4fd40631e59c81f119ea1b37b7a754de)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <kernel/tee_ta_manager.h>
17 #include <kernel/thread.h>
18 #include <kernel/tlb_helpers.h>
19 #include <mm/core_memprot.h>
20 #include <mm/fobj.h>
21 #include <mm/tee_mm.h>
22 #include <mm/tee_pager.h>
23 #include <stdlib.h>
24 #include <sys/queue.h>
25 #include <tee_api_defines.h>
26 #include <trace.h>
27 #include <types_ext.h>
28 #include <utee_defines.h>
29 #include <util.h>
30 
31 enum area_type {
32 	AREA_TYPE_RO,
33 	AREA_TYPE_RW,
34 	AREA_TYPE_LOCK,
35 };
36 
37 struct tee_pager_area {
38 	struct fobj *fobj;
39 	size_t fobj_pgidx;
40 	enum area_type type;
41 	uint32_t flags;
42 	vaddr_t base;
43 	size_t size;
44 	struct pgt *pgt;
45 	TAILQ_ENTRY(tee_pager_area) link;
46 };
47 
48 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
49 
50 static struct tee_pager_area_head tee_pager_area_head =
51 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
52 
53 #define INVALID_PGIDX	UINT_MAX
54 
55 /*
56  * struct tee_pager_pmem - Represents a physical page used for paging.
57  *
58  * @pgidx	an index of the entry in area->ti.
59  * @va_alias	Virtual address where the physical page always is aliased.
60  *		Used during remapping of the page when the content need to
61  *		be updated before it's available at the new location.
62  * @area	a pointer to the pager area
63  */
64 struct tee_pager_pmem {
65 	unsigned pgidx;
66 	void *va_alias;
67 	struct tee_pager_area *area;
68 	TAILQ_ENTRY(tee_pager_pmem) link;
69 };
70 
71 /* The list of physical pages. The first page in the list is the oldest */
72 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
73 
74 static struct tee_pager_pmem_head tee_pager_pmem_head =
75 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
76 
77 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
78 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
79 
80 /* number of pages hidden */
81 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
82 
83 /* Number of registered physical pages, used hiding pages. */
84 static size_t tee_pager_npages;
85 
86 #ifdef CFG_WITH_STATS
87 static struct tee_pager_stats pager_stats;
88 
89 static inline void incr_ro_hits(void)
90 {
91 	pager_stats.ro_hits++;
92 }
93 
94 static inline void incr_rw_hits(void)
95 {
96 	pager_stats.rw_hits++;
97 }
98 
99 static inline void incr_hidden_hits(void)
100 {
101 	pager_stats.hidden_hits++;
102 }
103 
104 static inline void incr_zi_released(void)
105 {
106 	pager_stats.zi_released++;
107 }
108 
109 static inline void incr_npages_all(void)
110 {
111 	pager_stats.npages_all++;
112 }
113 
114 static inline void set_npages(void)
115 {
116 	pager_stats.npages = tee_pager_npages;
117 }
118 
119 void tee_pager_get_stats(struct tee_pager_stats *stats)
120 {
121 	*stats = pager_stats;
122 
123 	pager_stats.hidden_hits = 0;
124 	pager_stats.ro_hits = 0;
125 	pager_stats.rw_hits = 0;
126 	pager_stats.zi_released = 0;
127 }
128 
129 #else /* CFG_WITH_STATS */
130 static inline void incr_ro_hits(void) { }
131 static inline void incr_rw_hits(void) { }
132 static inline void incr_hidden_hits(void) { }
133 static inline void incr_zi_released(void) { }
134 static inline void incr_npages_all(void) { }
135 static inline void set_npages(void) { }
136 
137 void tee_pager_get_stats(struct tee_pager_stats *stats)
138 {
139 	memset(stats, 0, sizeof(struct tee_pager_stats));
140 }
141 #endif /* CFG_WITH_STATS */
142 
143 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
144 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
145 #define TBL_SHIFT	SMALL_PAGE_SHIFT
146 
147 #define EFFECTIVE_VA_SIZE \
148 	(ROUNDUP(TEE_RAM_VA_START + TEE_RAM_VA_SIZE, \
149 		 CORE_MMU_PGDIR_SIZE) - \
150 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
151 
152 static struct pager_table {
153 	struct pgt pgt;
154 	struct core_mmu_table_info tbl_info;
155 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
156 
157 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
158 
159 /* Defines the range of the alias area */
160 static tee_mm_entry_t *pager_alias_area;
161 /*
162  * Physical pages are added in a stack like fashion to the alias area,
163  * @pager_alias_next_free gives the address of next free entry if
164  * @pager_alias_next_free is != 0
165  */
166 static uintptr_t pager_alias_next_free;
167 
168 #ifdef CFG_TEE_CORE_DEBUG
169 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
170 
171 static uint32_t pager_lock_dldetect(const char *func, const int line,
172 				    struct abort_info *ai)
173 {
174 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
175 	unsigned int retries = 0;
176 	unsigned int reminder = 0;
177 
178 	while (!cpu_spin_trylock(&pager_spinlock)) {
179 		retries++;
180 		if (!retries) {
181 			/* wrapped, time to report */
182 			trace_printf(func, line, TRACE_ERROR, true,
183 				     "possible spinlock deadlock reminder %u",
184 				     reminder);
185 			if (reminder < UINT_MAX)
186 				reminder++;
187 			if (ai)
188 				abort_print(ai);
189 		}
190 	}
191 
192 	return exceptions;
193 }
194 #else
195 static uint32_t pager_lock(struct abort_info __unused *ai)
196 {
197 	return cpu_spin_lock_xsave(&pager_spinlock);
198 }
199 #endif
200 
201 static uint32_t pager_lock_check_stack(size_t stack_size)
202 {
203 	if (stack_size) {
204 		int8_t buf[stack_size];
205 		size_t n;
206 
207 		/*
208 		 * Make sure to touch all pages of the stack that we expect
209 		 * to use with this lock held. We need to take eventual
210 		 * page faults before the lock is taken or we'll deadlock
211 		 * the pager. The pages that are populated in this way will
212 		 * eventually be released at certain save transitions of
213 		 * the thread.
214 		 */
215 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
216 			io_write8((vaddr_t)buf + n, 1);
217 		io_write8((vaddr_t)buf + stack_size - 1, 1);
218 	}
219 
220 	return pager_lock(NULL);
221 }
222 
223 static void pager_unlock(uint32_t exceptions)
224 {
225 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
226 }
227 
228 void *tee_pager_phys_to_virt(paddr_t pa)
229 {
230 	struct core_mmu_table_info ti;
231 	unsigned idx;
232 	uint32_t a;
233 	paddr_t p;
234 	vaddr_t v;
235 	size_t n;
236 
237 	/*
238 	 * Most addresses are mapped lineary, try that first if possible.
239 	 */
240 	if (!tee_pager_get_table_info(pa, &ti))
241 		return NULL; /* impossible pa */
242 	idx = core_mmu_va2idx(&ti, pa);
243 	core_mmu_get_entry(&ti, idx, &p, &a);
244 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
245 		return (void *)core_mmu_idx2va(&ti, idx);
246 
247 	n = 0;
248 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
249 	while (true) {
250 		while (idx < TBL_NUM_ENTRIES) {
251 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
252 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
253 				return NULL;
254 
255 			core_mmu_get_entry(&pager_tables[n].tbl_info,
256 					   idx, &p, &a);
257 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
258 				return (void *)v;
259 			idx++;
260 		}
261 
262 		n++;
263 		if (n >= ARRAY_SIZE(pager_tables))
264 			return NULL;
265 		idx = 0;
266 	}
267 
268 	return NULL;
269 }
270 
271 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
272 {
273 	size_t n;
274 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
275 
276 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
277 	    CORE_MMU_PGDIR_SHIFT;
278 	if (n >= ARRAY_SIZE(pager_tables))
279 		return NULL;
280 
281 	assert(va >= pager_tables[n].tbl_info.va_base &&
282 	       va <= (pager_tables[n].tbl_info.va_base | mask));
283 
284 	return pager_tables + n;
285 }
286 
287 static struct pager_table *find_pager_table(vaddr_t va)
288 {
289 	struct pager_table *pt = find_pager_table_may_fail(va);
290 
291 	assert(pt);
292 	return pt;
293 }
294 
295 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
296 {
297 	struct pager_table *pt = find_pager_table_may_fail(va);
298 
299 	if (!pt)
300 		return false;
301 
302 	*ti = pt->tbl_info;
303 	return true;
304 }
305 
306 static struct core_mmu_table_info *find_table_info(vaddr_t va)
307 {
308 	return &find_pager_table(va)->tbl_info;
309 }
310 
311 static struct pgt *find_core_pgt(vaddr_t va)
312 {
313 	return &find_pager_table(va)->pgt;
314 }
315 
316 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
317 {
318 	struct pager_table *pt;
319 	unsigned idx;
320 	vaddr_t smem = tee_mm_get_smem(mm);
321 	size_t nbytes = tee_mm_get_bytes(mm);
322 	vaddr_t v;
323 
324 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
325 
326 	assert(!pager_alias_area);
327 	pager_alias_area = mm;
328 	pager_alias_next_free = smem;
329 
330 	/* Clear all mapping in the alias area */
331 	pt = find_pager_table(smem);
332 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
333 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
334 		while (idx < TBL_NUM_ENTRIES) {
335 			v = core_mmu_idx2va(&pt->tbl_info, idx);
336 			if (v >= (smem + nbytes))
337 				goto out;
338 
339 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
340 			idx++;
341 		}
342 
343 		pt++;
344 		idx = 0;
345 	}
346 
347 out:
348 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
349 }
350 
351 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
352 {
353 	size_t n;
354 	paddr_t pa;
355 	size_t usage = 0;
356 
357 	for (n = 0; n < ti->num_entries; n++) {
358 		core_mmu_get_entry(ti, n, &pa, NULL);
359 		if (pa)
360 			usage++;
361 	}
362 	return usage;
363 }
364 
365 static void area_get_entry(struct tee_pager_area *area, size_t idx,
366 			   paddr_t *pa, uint32_t *attr)
367 {
368 	assert(area->pgt);
369 	assert(idx < TBL_NUM_ENTRIES);
370 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
371 }
372 
373 static void area_set_entry(struct tee_pager_area *area, size_t idx,
374 			   paddr_t pa, uint32_t attr)
375 {
376 	assert(area->pgt);
377 	assert(idx < TBL_NUM_ENTRIES);
378 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
379 }
380 
381 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
382 {
383 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
384 }
385 
386 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
387 {
388 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
389 }
390 
391 void tee_pager_early_init(void)
392 {
393 	size_t n;
394 
395 	/*
396 	 * Note that this depends on add_pager_vaspace() adding vaspace
397 	 * after end of memory.
398 	 */
399 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
400 		if (!core_mmu_find_table(NULL, TEE_RAM_VA_START +
401 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
402 					 &pager_tables[n].tbl_info))
403 			panic("can't find mmu tables");
404 
405 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
406 			panic("Unsupported page size in translation table");
407 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
408 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
409 
410 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
411 		pgt_set_used_entries(&pager_tables[n].pgt,
412 				tbl_usage_count(&pager_tables[n].tbl_info));
413 	}
414 }
415 
416 static void *pager_add_alias_page(paddr_t pa)
417 {
418 	unsigned idx;
419 	struct core_mmu_table_info *ti;
420 	/* Alias pages mapped without write permission: runtime will care */
421 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
422 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
423 			TEE_MATTR_SECURE | TEE_MATTR_PR;
424 
425 	DMSG("0x%" PRIxPA, pa);
426 
427 	ti = find_table_info(pager_alias_next_free);
428 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
429 	core_mmu_set_entry(ti, idx, pa, attr);
430 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
431 	pager_alias_next_free += SMALL_PAGE_SIZE;
432 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
433 				      tee_mm_get_bytes(pager_alias_area)))
434 		pager_alias_next_free = 0;
435 	return (void *)core_mmu_idx2va(ti, idx);
436 }
437 
438 static void area_insert_tail(struct tee_pager_area *area)
439 {
440 	uint32_t exceptions = pager_lock_check_stack(8);
441 
442 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
443 
444 	pager_unlock(exceptions);
445 }
446 KEEP_PAGER(area_insert_tail);
447 
448 void tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
449 			     void *store, void *hashes)
450 {
451 	size_t num_pages = size / SMALL_PAGE_SIZE;
452 	struct tee_pager_area *area = NULL;
453 	enum area_type at = AREA_TYPE_RO;
454 	struct fobj *fobj = NULL;
455 	size_t fobj_pgidx = 0;
456 	vaddr_t b = base;
457 	size_t s = size;
458 	size_t s2 = 0;
459 
460 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
461 		base, base + size, flags, store, hashes);
462 
463 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
464 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
465 		panic();
466 	}
467 
468 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
469 		panic("write pages cannot provide store or hashes");
470 
471 	if ((flags & TEE_MATTR_PW) && (store || hashes))
472 		panic("non-write pages must provide store and hashes");
473 
474 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
475 		if (flags & TEE_MATTR_LOCKED) {
476 			at = AREA_TYPE_LOCK;
477 			fobj = fobj_locked_paged_alloc(num_pages);
478 		} else {
479 			at = AREA_TYPE_RW;
480 			fobj = fobj_rw_paged_alloc(num_pages);
481 		}
482 	} else {
483 		at = AREA_TYPE_RO;
484 		fobj = fobj_ro_paged_alloc(num_pages, hashes, store);
485 	}
486 
487 	if (!fobj)
488 		panic();
489 
490 	while (s) {
491 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
492 		area = calloc(1, sizeof(*area));
493 		if (!area)
494 			panic("alloc_area");
495 
496 		if (b != base)
497 			fobj_get(fobj);
498 
499 		area->fobj = fobj;
500 		area->fobj_pgidx = fobj_pgidx;
501 		area->type = at;
502 		area->pgt = find_core_pgt(b);
503 		area->base = b;
504 		area->size = s2;
505 		area->flags = flags;
506 		area_insert_tail(area);
507 
508 		b += s2;
509 		s -= s2;
510 		fobj_pgidx += s2 / SMALL_PAGE_SIZE;
511 	}
512 }
513 
514 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
515 					vaddr_t va)
516 {
517 	struct tee_pager_area *area;
518 
519 	if (!areas)
520 		return NULL;
521 
522 	TAILQ_FOREACH(area, areas, link) {
523 		if (core_is_buffer_inside(va, 1, area->base, area->size))
524 			return area;
525 	}
526 	return NULL;
527 }
528 
529 #ifdef CFG_PAGED_USER_TA
530 static struct tee_pager_area *find_uta_area(vaddr_t va)
531 {
532 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
533 
534 	if (!is_user_ta_ctx(ctx))
535 		return NULL;
536 	return find_area(to_user_ta_ctx(ctx)->areas, va);
537 }
538 #else
539 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
540 {
541 	return NULL;
542 }
543 #endif /*CFG_PAGED_USER_TA*/
544 
545 
546 static uint32_t get_area_mattr(uint32_t area_flags)
547 {
548 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
549 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
550 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
551 
552 	return attr;
553 }
554 
555 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
556 {
557 	struct core_mmu_table_info *ti;
558 	paddr_t pa;
559 	unsigned idx;
560 
561 	ti = find_table_info((vaddr_t)pmem->va_alias);
562 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
563 	core_mmu_get_entry(ti, idx, &pa, NULL);
564 	return pa;
565 }
566 
567 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
568 			void *va_alias)
569 {
570 	size_t fobj_pgidx = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
571 			    area->fobj_pgidx;
572 	struct core_mmu_table_info *ti;
573 	uint32_t attr_alias;
574 	paddr_t pa_alias;
575 	unsigned int idx_alias;
576 
577 	/* Insure we are allowed to write to aliased virtual page */
578 	ti = find_table_info((vaddr_t)va_alias);
579 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
580 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
581 	if (!(attr_alias & TEE_MATTR_PW)) {
582 		attr_alias |= TEE_MATTR_PW;
583 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
584 		tlbi_mva_allasid((vaddr_t)va_alias);
585 	}
586 
587 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
588 	if (fobj_load_page(area->fobj, fobj_pgidx, va_alias)) {
589 		EMSG("PH 0x%" PRIxVA " failed", page_va);
590 		panic();
591 	}
592 	switch (area->type) {
593 	case AREA_TYPE_RO:
594 		incr_ro_hits();
595 		/* Forbid write to aliases for read-only (maybe exec) pages */
596 		attr_alias &= ~TEE_MATTR_PW;
597 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
598 		tlbi_mva_allasid((vaddr_t)va_alias);
599 		break;
600 	case AREA_TYPE_RW:
601 		incr_rw_hits();
602 		break;
603 	case AREA_TYPE_LOCK:
604 		break;
605 	default:
606 		panic();
607 	}
608 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
609 }
610 
611 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
612 {
613 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
614 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
615 
616 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
617 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
618 		size_t fobj_pgidx = (pmem->pgidx - (offs >> SMALL_PAGE_SHIFT)) +
619 				    pmem->area->fobj_pgidx;
620 
621 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
622 		asan_tag_access(pmem->va_alias,
623 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
624 		if (fobj_save_page(pmem->area->fobj, fobj_pgidx,
625 				   pmem->va_alias))
626 			panic("fobj_save_page");
627 		asan_tag_no_access(pmem->va_alias,
628 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
629 	}
630 }
631 
632 #ifdef CFG_PAGED_USER_TA
633 static void free_area(struct tee_pager_area *area)
634 {
635 	fobj_put(area->fobj);
636 	free(area);
637 }
638 
639 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
640 			       size_t size)
641 {
642 	struct tee_pager_area *area = NULL;
643 	vaddr_t b = base;
644 	struct fobj *fobj = NULL;
645 	size_t fobj_pgidx = 0;
646 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
647 
648 	if (!utc->areas) {
649 		utc->areas = malloc(sizeof(*utc->areas));
650 		if (!utc->areas)
651 			return false;
652 		TAILQ_INIT(utc->areas);
653 	}
654 
655 	fobj = fobj_rw_paged_alloc(s / SMALL_PAGE_SIZE);
656 	if (!fobj)
657 		return false;
658 
659 	while (s) {
660 		size_t s2;
661 
662 		if (find_area(utc->areas, b)) {
663 			fobj_put(fobj);
664 			return false;
665 		}
666 
667 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
668 		area = calloc(1, sizeof(*area));
669 		if (!area) {
670 			fobj_put(fobj);
671 			return false;
672 		}
673 
674 		if (b != base)
675 			fobj_get(fobj);
676 
677 		/* Table info will be set when the context is activated. */
678 
679 		area->fobj = fobj;
680 		area->fobj_pgidx = fobj_pgidx;
681 		area->type = AREA_TYPE_RW;
682 		area->base = b;
683 		area->size = s2;
684 		area->flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
685 
686 		TAILQ_INSERT_TAIL(utc->areas, area, link);
687 		b += s2;
688 		s -= s2;
689 		fobj_pgidx += s2 / SMALL_PAGE_SIZE;
690 	}
691 
692 	return true;
693 }
694 
695 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
696 {
697 	struct thread_specific_data *tsd = thread_get_tsd();
698 	struct tee_pager_area *area;
699 	struct core_mmu_table_info dir_info = { NULL };
700 
701 	if (&utc->ctx != tsd->ctx) {
702 		/*
703 		 * Changes are to an utc that isn't active. Just add the
704 		 * areas page tables will be dealt with later.
705 		 */
706 		return pager_add_uta_area(utc, base, size);
707 	}
708 
709 	/*
710 	 * Assign page tables before adding areas to be able to tell which
711 	 * are newly added and should be removed in case of failure.
712 	 */
713 	tee_pager_assign_uta_tables(utc);
714 	if (!pager_add_uta_area(utc, base, size)) {
715 		struct tee_pager_area *next_a;
716 
717 		/* Remove all added areas */
718 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
719 			if (!area->pgt) {
720 				TAILQ_REMOVE(utc->areas, area, link);
721 				free_area(area);
722 			}
723 		}
724 		return false;
725 	}
726 
727 	/*
728 	 * Assign page tables to the new areas and make sure that the page
729 	 * tables are registered in the upper table.
730 	 */
731 	tee_pager_assign_uta_tables(utc);
732 	core_mmu_get_user_pgdir(&dir_info);
733 	TAILQ_FOREACH(area, utc->areas, link) {
734 		paddr_t pa;
735 		size_t idx;
736 		uint32_t attr;
737 
738 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
739 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
740 
741 		/*
742 		 * Check if the page table already is used, if it is, it's
743 		 * already registered.
744 		 */
745 		if (area->pgt->num_used_entries) {
746 			assert(attr & TEE_MATTR_TABLE);
747 			assert(pa == virt_to_phys(area->pgt->tbl));
748 			continue;
749 		}
750 
751 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
752 		pa = virt_to_phys(area->pgt->tbl);
753 		assert(pa);
754 		/*
755 		 * Note that the update of the table entry is guaranteed to
756 		 * be atomic.
757 		 */
758 		core_mmu_set_entry(&dir_info, idx, pa, attr);
759 	}
760 
761 	return true;
762 }
763 
764 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
765 				   struct pgt *pgt)
766 {
767 	assert(pgt);
768 	ti->table = pgt->tbl;
769 	ti->va_base = pgt->vabase;
770 	ti->level = TBL_LEVEL;
771 	ti->shift = TBL_SHIFT;
772 	ti->num_entries = TBL_NUM_ENTRIES;
773 }
774 
775 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
776 			   vaddr_t new_base)
777 {
778 	uint32_t exceptions = pager_lock_check_stack(64);
779 
780 	/*
781 	 * If there's no pgt assigned to the old area there's no pages to
782 	 * deal with either, just update with a new pgt and base.
783 	 */
784 	if (area->pgt) {
785 		struct core_mmu_table_info old_ti;
786 		struct core_mmu_table_info new_ti;
787 		struct tee_pager_pmem *pmem;
788 
789 		init_tbl_info_from_pgt(&old_ti, area->pgt);
790 		init_tbl_info_from_pgt(&new_ti, new_pgt);
791 
792 
793 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
794 			vaddr_t va;
795 			paddr_t pa;
796 			uint32_t attr;
797 
798 			if (pmem->area != area)
799 				continue;
800 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
801 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
802 
803 			assert(pa == get_pmem_pa(pmem));
804 			assert(attr);
805 			assert(area->pgt->num_used_entries);
806 			area->pgt->num_used_entries--;
807 
808 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
809 			va = va - area->base + new_base;
810 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
811 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
812 			new_pgt->num_used_entries++;
813 		}
814 	}
815 
816 	area->pgt = new_pgt;
817 	area->base = new_base;
818 	pager_unlock(exceptions);
819 }
820 KEEP_PAGER(transpose_area);
821 
822 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
823 				   vaddr_t src_base,
824 				   struct user_ta_ctx *dst_utc,
825 				   vaddr_t dst_base, struct pgt **dst_pgt,
826 				   size_t size)
827 {
828 	struct tee_pager_area *area;
829 	struct tee_pager_area *next_a;
830 
831 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
832 		vaddr_t new_area_base;
833 		size_t new_idx;
834 
835 		if (!core_is_buffer_inside(area->base, area->size,
836 					  src_base, size))
837 			continue;
838 
839 		TAILQ_REMOVE(src_utc->areas, area, link);
840 
841 		new_area_base = dst_base + (src_base - area->base);
842 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
843 			  CORE_MMU_PGDIR_SIZE;
844 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
845 		       dst_pgt[new_idx]->vabase);
846 		transpose_area(area, dst_pgt[new_idx], new_area_base);
847 
848 		/*
849 		 * Assert that this will not cause any conflicts in the new
850 		 * utc.  This should already be guaranteed, but a bug here
851 		 * could be tricky to find.
852 		 */
853 		assert(!find_area(dst_utc->areas, area->base));
854 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
855 	}
856 }
857 
858 static void rem_area(struct tee_pager_area_head *area_head,
859 		     struct tee_pager_area *area)
860 {
861 	struct tee_pager_pmem *pmem;
862 	uint32_t exceptions;
863 
864 	exceptions = pager_lock_check_stack(64);
865 
866 	TAILQ_REMOVE(area_head, area, link);
867 
868 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
869 		if (pmem->area == area) {
870 			area_set_entry(area, pmem->pgidx, 0, 0);
871 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
872 			pgt_dec_used_entries(area->pgt);
873 			pmem->area = NULL;
874 			pmem->pgidx = INVALID_PGIDX;
875 		}
876 	}
877 
878 	pager_unlock(exceptions);
879 	free_area(area);
880 }
881 KEEP_PAGER(rem_area);
882 
883 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
884 			      size_t size)
885 {
886 	struct tee_pager_area *area;
887 	struct tee_pager_area *next_a;
888 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
889 
890 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
891 		if (core_is_buffer_inside(area->base, area->size, base, s))
892 			rem_area(utc->areas, area);
893 	}
894 }
895 
896 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
897 {
898 	struct tee_pager_area *area;
899 
900 	if (!utc->areas)
901 		return;
902 
903 	while (true) {
904 		area = TAILQ_FIRST(utc->areas);
905 		if (!area)
906 			break;
907 		TAILQ_REMOVE(utc->areas, area, link);
908 		free_area(area);
909 	}
910 
911 	free(utc->areas);
912 }
913 
914 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
915 				 size_t size, uint32_t flags)
916 {
917 	bool ret;
918 	vaddr_t b = base;
919 	size_t s = size;
920 	size_t s2;
921 	struct tee_pager_area *area = find_area(utc->areas, b);
922 	uint32_t exceptions;
923 	struct tee_pager_pmem *pmem;
924 	paddr_t pa;
925 	uint32_t a;
926 	uint32_t f;
927 
928 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
929 	if (f & TEE_MATTR_UW)
930 		f |= TEE_MATTR_PW;
931 	f = get_area_mattr(f);
932 
933 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
934 
935 	while (s) {
936 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
937 		if (!area || area->base != b || area->size != s2) {
938 			ret = false;
939 			goto out;
940 		}
941 		b += s2;
942 		s -= s2;
943 
944 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
945 			if (pmem->area != area)
946 				continue;
947 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
948 			if (a & TEE_MATTR_VALID_BLOCK)
949 				assert(pa == get_pmem_pa(pmem));
950 			else
951 				pa = get_pmem_pa(pmem);
952 			if (a == f)
953 				continue;
954 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
955 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
956 			if (!(flags & TEE_MATTR_UW))
957 				tee_pager_save_page(pmem, a);
958 
959 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
960 			/*
961 			 * Make sure the table update is visible before
962 			 * continuing.
963 			 */
964 			dsb_ishst();
965 
966 			if (flags & TEE_MATTR_UX) {
967 				void *va = (void *)area_idx2va(pmem->area,
968 							       pmem->pgidx);
969 
970 				cache_op_inner(DCACHE_AREA_CLEAN, va,
971 						SMALL_PAGE_SIZE);
972 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
973 						SMALL_PAGE_SIZE);
974 			}
975 		}
976 
977 		area->flags = f;
978 		area = TAILQ_NEXT(area, link);
979 	}
980 
981 	ret = true;
982 out:
983 	pager_unlock(exceptions);
984 	return ret;
985 }
986 KEEP_PAGER(tee_pager_set_uta_area_attr);
987 #endif /*CFG_PAGED_USER_TA*/
988 
989 static bool tee_pager_unhide_page(vaddr_t page_va)
990 {
991 	struct tee_pager_pmem *pmem;
992 
993 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
994 		paddr_t pa;
995 		uint32_t attr;
996 
997 		if (pmem->pgidx == INVALID_PGIDX)
998 			continue;
999 
1000 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1001 
1002 		if (!(attr &
1003 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
1004 			continue;
1005 
1006 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
1007 			uint32_t a = get_area_mattr(pmem->area->flags);
1008 
1009 			/* page is hidden, show and move to back */
1010 			if (pa != get_pmem_pa(pmem))
1011 				panic("unexpected pa");
1012 
1013 			/*
1014 			 * If it's not a dirty block, then it should be
1015 			 * read only.
1016 			 */
1017 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
1018 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1019 			else
1020 				FMSG("Unhide %#" PRIxVA, page_va);
1021 
1022 			if (page_va == 0x8000a000)
1023 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
1024 					page_va, a);
1025 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
1026 			/*
1027 			 * Note that TLB invalidation isn't needed since
1028 			 * there wasn't a valid mapping before. We should
1029 			 * use a barrier though, to make sure that the
1030 			 * change is visible.
1031 			 */
1032 			dsb_ishst();
1033 
1034 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1035 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1036 			incr_hidden_hits();
1037 			return true;
1038 		}
1039 	}
1040 
1041 	return false;
1042 }
1043 
1044 static void tee_pager_hide_pages(void)
1045 {
1046 	struct tee_pager_pmem *pmem;
1047 	size_t n = 0;
1048 
1049 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1050 		paddr_t pa;
1051 		uint32_t attr;
1052 		uint32_t a;
1053 
1054 		if (n >= TEE_PAGER_NHIDE)
1055 			break;
1056 		n++;
1057 
1058 		/* we cannot hide pages when pmem->area is not defined. */
1059 		if (!pmem->area)
1060 			continue;
1061 
1062 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1063 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1064 			continue;
1065 
1066 		assert(pa == get_pmem_pa(pmem));
1067 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1068 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1069 			FMSG("Hide %#" PRIxVA,
1070 			     area_idx2va(pmem->area, pmem->pgidx));
1071 		} else
1072 			a = TEE_MATTR_HIDDEN_BLOCK;
1073 
1074 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1075 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1076 	}
1077 }
1078 
1079 /*
1080  * Find mapped pmem, hide and move to pageble pmem.
1081  * Return false if page was not mapped, and true if page was mapped.
1082  */
1083 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1084 				       vaddr_t page_va)
1085 {
1086 	struct tee_pager_pmem *pmem;
1087 	unsigned pgidx;
1088 	paddr_t pa;
1089 	uint32_t attr;
1090 
1091 	pgidx = area_va2idx(area, page_va);
1092 	area_get_entry(area, pgidx, &pa, &attr);
1093 
1094 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1095 
1096 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1097 		if (pmem->area != area || pmem->pgidx != pgidx)
1098 			continue;
1099 
1100 		assert(pa == get_pmem_pa(pmem));
1101 		area_set_entry(area, pgidx, 0, 0);
1102 		pgt_dec_used_entries(area->pgt);
1103 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1104 		pmem->area = NULL;
1105 		pmem->pgidx = INVALID_PGIDX;
1106 		tee_pager_npages++;
1107 		set_npages();
1108 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1109 		incr_zi_released();
1110 		return true;
1111 	}
1112 
1113 	return false;
1114 }
1115 
1116 /* Finds the oldest page and unmats it from its old virtual address */
1117 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1118 {
1119 	struct tee_pager_pmem *pmem;
1120 
1121 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1122 	if (!pmem) {
1123 		EMSG("No pmem entries");
1124 		return NULL;
1125 	}
1126 	if (pmem->pgidx != INVALID_PGIDX) {
1127 		uint32_t a;
1128 
1129 		assert(pmem->area && pmem->area->pgt);
1130 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1131 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1132 		pgt_dec_used_entries(pmem->area->pgt);
1133 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1134 		tee_pager_save_page(pmem, a);
1135 	}
1136 
1137 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1138 	pmem->pgidx = INVALID_PGIDX;
1139 	pmem->area = NULL;
1140 	if (area->type == AREA_TYPE_LOCK) {
1141 		/* Move page to lock list */
1142 		if (tee_pager_npages <= 0)
1143 			panic("running out of page");
1144 		tee_pager_npages--;
1145 		set_npages();
1146 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1147 	} else {
1148 		/* move page to back */
1149 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1150 	}
1151 
1152 	return pmem;
1153 }
1154 
1155 static bool pager_update_permissions(struct tee_pager_area *area,
1156 			struct abort_info *ai, bool *handled)
1157 {
1158 	unsigned int pgidx = area_va2idx(area, ai->va);
1159 	uint32_t attr;
1160 	paddr_t pa;
1161 
1162 	*handled = false;
1163 
1164 	area_get_entry(area, pgidx, &pa, &attr);
1165 
1166 	/* Not mapped */
1167 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1168 		return false;
1169 
1170 	/* Not readable, should not happen */
1171 	if (abort_is_user_exception(ai)) {
1172 		if (!(attr & TEE_MATTR_UR))
1173 			return true;
1174 	} else {
1175 		if (!(attr & TEE_MATTR_PR)) {
1176 			abort_print_error(ai);
1177 			panic();
1178 		}
1179 	}
1180 
1181 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1182 	case CORE_MMU_FAULT_TRANSLATION:
1183 	case CORE_MMU_FAULT_READ_PERMISSION:
1184 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1185 			/* Check attempting to execute from an NOX page */
1186 			if (abort_is_user_exception(ai)) {
1187 				if (!(attr & TEE_MATTR_UX))
1188 					return true;
1189 			} else {
1190 				if (!(attr & TEE_MATTR_PX)) {
1191 					abort_print_error(ai);
1192 					panic();
1193 				}
1194 			}
1195 		}
1196 		/* Since the page is mapped now it's OK */
1197 		break;
1198 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1199 		/* Check attempting to write to an RO page */
1200 		if (abort_is_user_exception(ai)) {
1201 			if (!(area->flags & TEE_MATTR_UW))
1202 				return true;
1203 			if (!(attr & TEE_MATTR_UW)) {
1204 				FMSG("Dirty %p",
1205 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1206 				area_set_entry(area, pgidx, pa,
1207 					       get_area_mattr(area->flags));
1208 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1209 			}
1210 
1211 		} else {
1212 			if (!(area->flags & TEE_MATTR_PW)) {
1213 				abort_print_error(ai);
1214 				panic();
1215 			}
1216 			if (!(attr & TEE_MATTR_PW)) {
1217 				FMSG("Dirty %p",
1218 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1219 				area_set_entry(area, pgidx, pa,
1220 					       get_area_mattr(area->flags));
1221 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1222 			}
1223 		}
1224 		/* Since permissions has been updated now it's OK */
1225 		break;
1226 	default:
1227 		/* Some fault we can't deal with */
1228 		if (abort_is_user_exception(ai))
1229 			return true;
1230 		abort_print_error(ai);
1231 		panic();
1232 	}
1233 	*handled = true;
1234 	return true;
1235 }
1236 
1237 #ifdef CFG_TEE_CORE_DEBUG
1238 static void stat_handle_fault(void)
1239 {
1240 	static size_t num_faults;
1241 	static size_t min_npages = SIZE_MAX;
1242 	static size_t total_min_npages = SIZE_MAX;
1243 
1244 	num_faults++;
1245 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1246 		DMSG("nfaults %zu npages %zu (min %zu)",
1247 		     num_faults, tee_pager_npages, min_npages);
1248 		min_npages = tee_pager_npages; /* reset */
1249 	}
1250 	if (tee_pager_npages < min_npages)
1251 		min_npages = tee_pager_npages;
1252 	if (tee_pager_npages < total_min_npages)
1253 		total_min_npages = tee_pager_npages;
1254 }
1255 #else
1256 static void stat_handle_fault(void)
1257 {
1258 }
1259 #endif
1260 
1261 bool tee_pager_handle_fault(struct abort_info *ai)
1262 {
1263 	struct tee_pager_area *area;
1264 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1265 	uint32_t exceptions;
1266 	bool ret;
1267 
1268 #ifdef TEE_PAGER_DEBUG_PRINT
1269 	abort_print(ai);
1270 #endif
1271 
1272 	/*
1273 	 * We're updating pages that can affect several active CPUs at a
1274 	 * time below. We end up here because a thread tries to access some
1275 	 * memory that isn't available. We have to be careful when making
1276 	 * that memory available as other threads may succeed in accessing
1277 	 * that address the moment after we've made it available.
1278 	 *
1279 	 * That means that we can't just map the memory and populate the
1280 	 * page, instead we use the aliased mapping to populate the page
1281 	 * and once everything is ready we map it.
1282 	 */
1283 	exceptions = pager_lock(ai);
1284 
1285 	stat_handle_fault();
1286 
1287 	/* check if the access is valid */
1288 	if (abort_is_user_exception(ai)) {
1289 		area = find_uta_area(ai->va);
1290 
1291 	} else {
1292 		area = find_area(&tee_pager_area_head, ai->va);
1293 		if (!area)
1294 			area = find_uta_area(ai->va);
1295 	}
1296 	if (!area || !area->pgt) {
1297 		ret = false;
1298 		goto out;
1299 	}
1300 
1301 	if (!tee_pager_unhide_page(page_va)) {
1302 		struct tee_pager_pmem *pmem = NULL;
1303 		uint32_t attr;
1304 		paddr_t pa;
1305 
1306 		/*
1307 		 * The page wasn't hidden, but some other core may have
1308 		 * updated the table entry before we got here or we need
1309 		 * to make a read-only page read-write (dirty).
1310 		 */
1311 		if (pager_update_permissions(area, ai, &ret)) {
1312 			/*
1313 			 * Nothing more to do with the abort. The problem
1314 			 * could already have been dealt with from another
1315 			 * core or if ret is false the TA will be paniced.
1316 			 */
1317 			goto out;
1318 		}
1319 
1320 		pmem = tee_pager_get_page(area);
1321 		if (!pmem) {
1322 			abort_print(ai);
1323 			panic();
1324 		}
1325 
1326 		/* load page code & data */
1327 		tee_pager_load_page(area, page_va, pmem->va_alias);
1328 
1329 
1330 		pmem->area = area;
1331 		pmem->pgidx = area_va2idx(area, ai->va);
1332 		attr = get_area_mattr(area->flags) &
1333 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1334 		pa = get_pmem_pa(pmem);
1335 
1336 		/*
1337 		 * We've updated the page using the aliased mapping and
1338 		 * some cache maintenence is now needed if it's an
1339 		 * executable page.
1340 		 *
1341 		 * Since the d-cache is a Physically-indexed,
1342 		 * physically-tagged (PIPT) cache we can clean either the
1343 		 * aliased address or the real virtual address. In this
1344 		 * case we choose the real virtual address.
1345 		 *
1346 		 * The i-cache can also be PIPT, but may be something else
1347 		 * too like VIPT. The current code requires the caches to
1348 		 * implement the IVIPT extension, that is:
1349 		 * "instruction cache maintenance is required only after
1350 		 * writing new data to a physical address that holds an
1351 		 * instruction."
1352 		 *
1353 		 * To portably invalidate the icache the page has to
1354 		 * be mapped at the final virtual address but not
1355 		 * executable.
1356 		 */
1357 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1358 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1359 					TEE_MATTR_PW | TEE_MATTR_UW;
1360 
1361 			/* Set a temporary read-only mapping */
1362 			area_set_entry(pmem->area, pmem->pgidx, pa,
1363 				       attr & ~mask);
1364 			tlbi_mva_allasid(page_va);
1365 
1366 			/*
1367 			 * Doing these operations to LoUIS (Level of
1368 			 * unification, Inner Shareable) would be enough
1369 			 */
1370 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1371 				       SMALL_PAGE_SIZE);
1372 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1373 				       SMALL_PAGE_SIZE);
1374 
1375 			/* Set the final mapping */
1376 			area_set_entry(area, pmem->pgidx, pa, attr);
1377 			tlbi_mva_allasid(page_va);
1378 		} else {
1379 			area_set_entry(area, pmem->pgidx, pa, attr);
1380 			/*
1381 			 * No need to flush TLB for this entry, it was
1382 			 * invalid. We should use a barrier though, to make
1383 			 * sure that the change is visible.
1384 			 */
1385 			dsb_ishst();
1386 		}
1387 		pgt_inc_used_entries(area->pgt);
1388 
1389 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1390 
1391 	}
1392 
1393 	tee_pager_hide_pages();
1394 	ret = true;
1395 out:
1396 	pager_unlock(exceptions);
1397 	return ret;
1398 }
1399 
1400 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1401 {
1402 	size_t n;
1403 
1404 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1405 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1406 
1407 	/* setup memory */
1408 	for (n = 0; n < npages; n++) {
1409 		struct core_mmu_table_info *ti;
1410 		struct tee_pager_pmem *pmem;
1411 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1412 		unsigned int pgidx;
1413 		paddr_t pa;
1414 		uint32_t attr;
1415 
1416 		ti = find_table_info(va);
1417 		pgidx = core_mmu_va2idx(ti, va);
1418 		/*
1419 		 * Note that we can only support adding pages in the
1420 		 * valid range of this table info, currently not a problem.
1421 		 */
1422 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1423 
1424 		/* Ignore unmapped pages/blocks */
1425 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1426 			continue;
1427 
1428 		pmem = malloc(sizeof(struct tee_pager_pmem));
1429 		if (!pmem)
1430 			panic("out of mem");
1431 
1432 		pmem->va_alias = pager_add_alias_page(pa);
1433 
1434 		if (unmap) {
1435 			pmem->area = NULL;
1436 			pmem->pgidx = INVALID_PGIDX;
1437 			core_mmu_set_entry(ti, pgidx, 0, 0);
1438 			pgt_dec_used_entries(find_core_pgt(va));
1439 		} else {
1440 			/*
1441 			 * The page is still mapped, let's assign the area
1442 			 * and update the protection bits accordingly.
1443 			 */
1444 			pmem->area = find_area(&tee_pager_area_head, va);
1445 			assert(pmem->area->pgt == find_core_pgt(va));
1446 			pmem->pgidx = pgidx;
1447 			assert(pa == get_pmem_pa(pmem));
1448 			area_set_entry(pmem->area, pgidx, pa,
1449 				       get_area_mattr(pmem->area->flags));
1450 		}
1451 
1452 		tee_pager_npages++;
1453 		incr_npages_all();
1454 		set_npages();
1455 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1456 	}
1457 
1458 	/*
1459 	 * As this is done at inits, invalidate all TLBs once instead of
1460 	 * targeting only the modified entries.
1461 	 */
1462 	tlbi_all();
1463 }
1464 
1465 #ifdef CFG_PAGED_USER_TA
1466 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1467 {
1468 	struct pgt *p = pgt;
1469 
1470 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1471 		p = SLIST_NEXT(p, link);
1472 	return p;
1473 }
1474 
1475 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1476 {
1477 	struct tee_pager_area *area;
1478 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1479 
1480 	TAILQ_FOREACH(area, utc->areas, link) {
1481 		if (!area->pgt)
1482 			area->pgt = find_pgt(pgt, area->base);
1483 		else
1484 			assert(area->pgt == find_pgt(pgt, area->base));
1485 		if (!area->pgt)
1486 			panic();
1487 	}
1488 }
1489 
1490 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1491 {
1492 	uint32_t attr;
1493 
1494 	assert(pmem->area && pmem->area->pgt);
1495 
1496 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1497 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1498 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1499 	tee_pager_save_page(pmem, attr);
1500 	assert(pmem->area->pgt->num_used_entries);
1501 	pmem->area->pgt->num_used_entries--;
1502 	pmem->pgidx = INVALID_PGIDX;
1503 	pmem->area = NULL;
1504 }
1505 
1506 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1507 {
1508 	struct tee_pager_pmem *pmem;
1509 	struct tee_pager_area *area;
1510 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1511 
1512 	if (!pgt->num_used_entries)
1513 		goto out;
1514 
1515 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1516 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1517 			continue;
1518 		if (pmem->area->pgt == pgt)
1519 			pager_save_and_release_entry(pmem);
1520 	}
1521 	assert(!pgt->num_used_entries);
1522 
1523 out:
1524 	if (is_user_ta_ctx(pgt->ctx)) {
1525 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1526 			if (area->pgt == pgt)
1527 				area->pgt = NULL;
1528 		}
1529 	}
1530 
1531 	pager_unlock(exceptions);
1532 }
1533 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1534 #endif /*CFG_PAGED_USER_TA*/
1535 
1536 void tee_pager_release_phys(void *addr, size_t size)
1537 {
1538 	bool unmaped = false;
1539 	vaddr_t va = (vaddr_t)addr;
1540 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1541 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1542 	struct tee_pager_area *area;
1543 	uint32_t exceptions;
1544 
1545 	if (end <= begin)
1546 		return;
1547 
1548 	exceptions = pager_lock_check_stack(128);
1549 
1550 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1551 		area = find_area(&tee_pager_area_head, va);
1552 		if (!area)
1553 			panic();
1554 		unmaped |= tee_pager_release_one_phys(area, va);
1555 	}
1556 
1557 	if (unmaped)
1558 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1559 
1560 	pager_unlock(exceptions);
1561 }
1562 KEEP_PAGER(tee_pager_release_phys);
1563 
1564 void *tee_pager_alloc(size_t size)
1565 {
1566 	tee_mm_entry_t *mm;
1567 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | TEE_MATTR_LOCKED;
1568 	uint8_t *smem;
1569 	size_t bytes;
1570 
1571 	if (!size)
1572 		return NULL;
1573 
1574 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1575 	if (!mm)
1576 		return NULL;
1577 
1578 	bytes = tee_mm_get_bytes(mm);
1579 	smem = (uint8_t *)tee_mm_get_smem(mm);
1580 	tee_pager_add_core_area((vaddr_t)smem, bytes, f, NULL, NULL);
1581 	asan_tag_access(smem, smem + bytes);
1582 
1583 	return smem;
1584 }
1585