xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision b83c0d5fb4acad7d4b990bad9488fada0f2bf9a7)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/panic.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <kernel/tee_ta_manager.h>
17 #include <kernel/thread.h>
18 #include <kernel/tlb_helpers.h>
19 #include <mm/core_memprot.h>
20 #include <mm/fobj.h>
21 #include <mm/tee_mm.h>
22 #include <mm/tee_pager.h>
23 #include <stdlib.h>
24 #include <sys/queue.h>
25 #include <tee_api_defines.h>
26 #include <trace.h>
27 #include <types_ext.h>
28 #include <utee_defines.h>
29 #include <util.h>
30 
31 
32 static struct tee_pager_area_head tee_pager_area_head =
33 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
34 
35 #define INVALID_PGIDX		UINT_MAX
36 #define PMEM_FLAG_DIRTY		BIT(0)
37 #define PMEM_FLAG_HIDDEN	BIT(1)
38 
39 /*
40  * struct tee_pager_pmem - Represents a physical page used for paging.
41  *
42  * @flags	flags defined by PMEM_FLAG_* above
43  * @fobj_pgidx	index of the page in the @fobj
44  * @fobj	File object of which a page is made visible.
45  * @va_alias	Virtual address where the physical page always is aliased.
46  *		Used during remapping of the page when the content need to
47  *		be updated before it's available at the new location.
48  */
49 struct tee_pager_pmem {
50 	unsigned int flags;
51 	unsigned int fobj_pgidx;
52 	struct fobj *fobj;
53 	void *va_alias;
54 	TAILQ_ENTRY(tee_pager_pmem) link;
55 };
56 
57 /* The list of physical pages. The first page in the list is the oldest */
58 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
59 
60 static struct tee_pager_pmem_head tee_pager_pmem_head =
61 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
62 
63 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
64 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
65 
66 /* number of pages hidden */
67 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
68 
69 /* Number of registered physical pages, used hiding pages. */
70 static size_t tee_pager_npages;
71 
72 #ifdef CFG_WITH_STATS
73 static struct tee_pager_stats pager_stats;
74 
75 static inline void incr_ro_hits(void)
76 {
77 	pager_stats.ro_hits++;
78 }
79 
80 static inline void incr_rw_hits(void)
81 {
82 	pager_stats.rw_hits++;
83 }
84 
85 static inline void incr_hidden_hits(void)
86 {
87 	pager_stats.hidden_hits++;
88 }
89 
90 static inline void incr_zi_released(void)
91 {
92 	pager_stats.zi_released++;
93 }
94 
95 static inline void incr_npages_all(void)
96 {
97 	pager_stats.npages_all++;
98 }
99 
100 static inline void set_npages(void)
101 {
102 	pager_stats.npages = tee_pager_npages;
103 }
104 
105 void tee_pager_get_stats(struct tee_pager_stats *stats)
106 {
107 	*stats = pager_stats;
108 
109 	pager_stats.hidden_hits = 0;
110 	pager_stats.ro_hits = 0;
111 	pager_stats.rw_hits = 0;
112 	pager_stats.zi_released = 0;
113 }
114 
115 #else /* CFG_WITH_STATS */
116 static inline void incr_ro_hits(void) { }
117 static inline void incr_rw_hits(void) { }
118 static inline void incr_hidden_hits(void) { }
119 static inline void incr_zi_released(void) { }
120 static inline void incr_npages_all(void) { }
121 static inline void set_npages(void) { }
122 
123 void tee_pager_get_stats(struct tee_pager_stats *stats)
124 {
125 	memset(stats, 0, sizeof(struct tee_pager_stats));
126 }
127 #endif /* CFG_WITH_STATS */
128 
129 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
130 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
131 #define TBL_SHIFT	SMALL_PAGE_SHIFT
132 
133 #define EFFECTIVE_VA_SIZE \
134 	(ROUNDUP(TEE_RAM_VA_START + TEE_RAM_VA_SIZE, \
135 		 CORE_MMU_PGDIR_SIZE) - \
136 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
137 
138 static struct pager_table {
139 	struct pgt pgt;
140 	struct core_mmu_table_info tbl_info;
141 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
142 
143 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
144 
145 /* Defines the range of the alias area */
146 static tee_mm_entry_t *pager_alias_area;
147 /*
148  * Physical pages are added in a stack like fashion to the alias area,
149  * @pager_alias_next_free gives the address of next free entry if
150  * @pager_alias_next_free is != 0
151  */
152 static uintptr_t pager_alias_next_free;
153 
154 #ifdef CFG_TEE_CORE_DEBUG
155 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
156 
157 static uint32_t pager_lock_dldetect(const char *func, const int line,
158 				    struct abort_info *ai)
159 {
160 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
161 	unsigned int retries = 0;
162 	unsigned int reminder = 0;
163 
164 	while (!cpu_spin_trylock(&pager_spinlock)) {
165 		retries++;
166 		if (!retries) {
167 			/* wrapped, time to report */
168 			trace_printf(func, line, TRACE_ERROR, true,
169 				     "possible spinlock deadlock reminder %u",
170 				     reminder);
171 			if (reminder < UINT_MAX)
172 				reminder++;
173 			if (ai)
174 				abort_print(ai);
175 		}
176 	}
177 
178 	return exceptions;
179 }
180 #else
181 static uint32_t pager_lock(struct abort_info __unused *ai)
182 {
183 	return cpu_spin_lock_xsave(&pager_spinlock);
184 }
185 #endif
186 
187 static uint32_t pager_lock_check_stack(size_t stack_size)
188 {
189 	if (stack_size) {
190 		int8_t buf[stack_size];
191 		size_t n;
192 
193 		/*
194 		 * Make sure to touch all pages of the stack that we expect
195 		 * to use with this lock held. We need to take eventual
196 		 * page faults before the lock is taken or we'll deadlock
197 		 * the pager. The pages that are populated in this way will
198 		 * eventually be released at certain save transitions of
199 		 * the thread.
200 		 */
201 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
202 			io_write8((vaddr_t)buf + n, 1);
203 		io_write8((vaddr_t)buf + stack_size - 1, 1);
204 	}
205 
206 	return pager_lock(NULL);
207 }
208 
209 static void pager_unlock(uint32_t exceptions)
210 {
211 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
212 }
213 
214 void *tee_pager_phys_to_virt(paddr_t pa)
215 {
216 	struct core_mmu_table_info ti;
217 	unsigned idx;
218 	uint32_t a;
219 	paddr_t p;
220 	vaddr_t v;
221 	size_t n;
222 
223 	/*
224 	 * Most addresses are mapped lineary, try that first if possible.
225 	 */
226 	if (!tee_pager_get_table_info(pa, &ti))
227 		return NULL; /* impossible pa */
228 	idx = core_mmu_va2idx(&ti, pa);
229 	core_mmu_get_entry(&ti, idx, &p, &a);
230 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
231 		return (void *)core_mmu_idx2va(&ti, idx);
232 
233 	n = 0;
234 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
235 	while (true) {
236 		while (idx < TBL_NUM_ENTRIES) {
237 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
238 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
239 				return NULL;
240 
241 			core_mmu_get_entry(&pager_tables[n].tbl_info,
242 					   idx, &p, &a);
243 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
244 				return (void *)v;
245 			idx++;
246 		}
247 
248 		n++;
249 		if (n >= ARRAY_SIZE(pager_tables))
250 			return NULL;
251 		idx = 0;
252 	}
253 
254 	return NULL;
255 }
256 
257 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
258 {
259 	return pmem->flags & PMEM_FLAG_HIDDEN;
260 }
261 
262 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
263 {
264 	return pmem->flags & PMEM_FLAG_DIRTY;
265 }
266 
267 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
268 				    struct tee_pager_area *area)
269 {
270 	if (pmem->fobj != area->fobj)
271 		return false;
272 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
273 		return false;
274 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
275 	    (area->size >> SMALL_PAGE_SHIFT))
276 		return false;
277 
278 	return true;
279 }
280 
281 static size_t pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
282 				   struct tee_pager_area *area)
283 {
284 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
285 
286 	return pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
287 }
288 
289 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
290 {
291 	size_t n;
292 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
293 
294 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
295 	    CORE_MMU_PGDIR_SHIFT;
296 	if (n >= ARRAY_SIZE(pager_tables))
297 		return NULL;
298 
299 	assert(va >= pager_tables[n].tbl_info.va_base &&
300 	       va <= (pager_tables[n].tbl_info.va_base | mask));
301 
302 	return pager_tables + n;
303 }
304 
305 static struct pager_table *find_pager_table(vaddr_t va)
306 {
307 	struct pager_table *pt = find_pager_table_may_fail(va);
308 
309 	assert(pt);
310 	return pt;
311 }
312 
313 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
314 {
315 	struct pager_table *pt = find_pager_table_may_fail(va);
316 
317 	if (!pt)
318 		return false;
319 
320 	*ti = pt->tbl_info;
321 	return true;
322 }
323 
324 static struct core_mmu_table_info *find_table_info(vaddr_t va)
325 {
326 	return &find_pager_table(va)->tbl_info;
327 }
328 
329 static struct pgt *find_core_pgt(vaddr_t va)
330 {
331 	return &find_pager_table(va)->pgt;
332 }
333 
334 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
335 {
336 	struct pager_table *pt;
337 	unsigned idx;
338 	vaddr_t smem = tee_mm_get_smem(mm);
339 	size_t nbytes = tee_mm_get_bytes(mm);
340 	vaddr_t v;
341 	uint32_t a = 0;
342 
343 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
344 
345 	assert(!pager_alias_area);
346 	pager_alias_area = mm;
347 	pager_alias_next_free = smem;
348 
349 	/* Clear all mapping in the alias area */
350 	pt = find_pager_table(smem);
351 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
352 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
353 		while (idx < TBL_NUM_ENTRIES) {
354 			v = core_mmu_idx2va(&pt->tbl_info, idx);
355 			if (v >= (smem + nbytes))
356 				goto out;
357 
358 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
359 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
360 			if (a & TEE_MATTR_VALID_BLOCK)
361 				pgt_dec_used_entries(&pt->pgt);
362 			idx++;
363 		}
364 
365 		pt++;
366 		idx = 0;
367 	}
368 
369 out:
370 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
371 }
372 
373 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
374 {
375 	size_t n;
376 	uint32_t a = 0;
377 	size_t usage = 0;
378 
379 	for (n = 0; n < ti->num_entries; n++) {
380 		core_mmu_get_entry(ti, n, NULL, &a);
381 		if (a & TEE_MATTR_VALID_BLOCK)
382 			usage++;
383 	}
384 	return usage;
385 }
386 
387 static void area_get_entry(struct tee_pager_area *area, size_t idx,
388 			   paddr_t *pa, uint32_t *attr)
389 {
390 	assert(area->pgt);
391 	assert(idx < TBL_NUM_ENTRIES);
392 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
393 }
394 
395 static void area_set_entry(struct tee_pager_area *area, size_t idx,
396 			   paddr_t pa, uint32_t attr)
397 {
398 	assert(area->pgt);
399 	assert(idx < TBL_NUM_ENTRIES);
400 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
401 }
402 
403 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
404 {
405 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
406 }
407 
408 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
409 {
410 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
411 }
412 
413 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
414 {
415 	struct tee_pager_area *area = NULL;
416 	size_t tblidx = 0;
417 	uint32_t a = 0;
418 
419 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
420 		/*
421 		 * If only_this_pgt points to a pgt then the pgt of this
422 		 * area has to match or we'll skip over it.
423 		 */
424 		if (only_this_pgt && area->pgt != only_this_pgt)
425 			continue;
426 		if (!area->pgt || !pmem_is_covered_by_area(pmem, area))
427 			continue;
428 		tblidx = pmem_get_area_tblidx(pmem, area);
429 		area_get_entry(area, tblidx, NULL, &a);
430 		if (a & TEE_MATTR_VALID_BLOCK) {
431 			area_set_entry(area, tblidx, 0, 0);
432 			pgt_dec_used_entries(area->pgt);
433 			tlbi_mva_allasid(area_idx2va(area, tblidx));
434 		}
435 	}
436 }
437 
438 void tee_pager_early_init(void)
439 {
440 	size_t n;
441 
442 	/*
443 	 * Note that this depends on add_pager_vaspace() adding vaspace
444 	 * after end of memory.
445 	 */
446 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
447 		if (!core_mmu_find_table(NULL, TEE_RAM_VA_START +
448 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
449 					 &pager_tables[n].tbl_info))
450 			panic("can't find mmu tables");
451 
452 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
453 			panic("Unsupported page size in translation table");
454 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
455 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
456 
457 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
458 		pgt_set_used_entries(&pager_tables[n].pgt,
459 				tbl_usage_count(&pager_tables[n].tbl_info));
460 	}
461 }
462 
463 static void *pager_add_alias_page(paddr_t pa)
464 {
465 	unsigned idx;
466 	struct core_mmu_table_info *ti;
467 	/* Alias pages mapped without write permission: runtime will care */
468 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
469 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
470 			TEE_MATTR_SECURE | TEE_MATTR_PR;
471 
472 	DMSG("0x%" PRIxPA, pa);
473 
474 	ti = find_table_info(pager_alias_next_free);
475 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
476 	core_mmu_set_entry(ti, idx, pa, attr);
477 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
478 	pager_alias_next_free += SMALL_PAGE_SIZE;
479 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
480 				      tee_mm_get_bytes(pager_alias_area)))
481 		pager_alias_next_free = 0;
482 	return (void *)core_mmu_idx2va(ti, idx);
483 }
484 
485 static void area_insert_tail(struct tee_pager_area *area)
486 {
487 	uint32_t exceptions = pager_lock_check_stack(8);
488 
489 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
490 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
491 
492 	pager_unlock(exceptions);
493 }
494 KEEP_PAGER(area_insert_tail);
495 
496 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
497 			     struct fobj *fobj)
498 {
499 	struct tee_pager_area *area = NULL;
500 	uint32_t flags = 0;
501 	size_t fobj_pgoffs = 0;
502 	vaddr_t b = base;
503 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
504 	size_t s2 = 0;
505 
506 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
507 
508 	if (base & SMALL_PAGE_MASK || !s) {
509 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
510 		panic();
511 	}
512 
513 	switch (type) {
514 	case PAGER_AREA_TYPE_RO:
515 		flags = TEE_MATTR_PRX;
516 		break;
517 	case PAGER_AREA_TYPE_RW:
518 		flags = TEE_MATTR_PRW;
519 		break;
520 	case PAGER_AREA_TYPE_LOCK:
521 		flags = TEE_MATTR_PRW | TEE_MATTR_LOCKED;
522 		break;
523 	default:
524 		panic();
525 	}
526 
527 	if (!fobj)
528 		panic();
529 
530 	while (s) {
531 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
532 		area = calloc(1, sizeof(*area));
533 		if (!area)
534 			panic("alloc_area");
535 
536 		area->fobj = fobj_get(fobj);
537 		area->fobj_pgoffs = fobj_pgoffs;
538 		area->type = type;
539 		area->pgt = find_core_pgt(b);
540 		area->base = b;
541 		area->size = s2;
542 		area->flags = flags;
543 		area_insert_tail(area);
544 
545 		b += s2;
546 		s -= s2;
547 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
548 	}
549 }
550 
551 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
552 					vaddr_t va)
553 {
554 	struct tee_pager_area *area;
555 
556 	if (!areas)
557 		return NULL;
558 
559 	TAILQ_FOREACH(area, areas, link) {
560 		if (core_is_buffer_inside(va, 1, area->base, area->size))
561 			return area;
562 	}
563 	return NULL;
564 }
565 
566 #ifdef CFG_PAGED_USER_TA
567 static struct tee_pager_area *find_uta_area(vaddr_t va)
568 {
569 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
570 
571 	if (!is_user_ta_ctx(ctx))
572 		return NULL;
573 	return find_area(to_user_ta_ctx(ctx)->areas, va);
574 }
575 #else
576 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
577 {
578 	return NULL;
579 }
580 #endif /*CFG_PAGED_USER_TA*/
581 
582 
583 static uint32_t get_area_mattr(uint32_t area_flags)
584 {
585 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
586 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
587 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
588 
589 	return attr;
590 }
591 
592 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
593 {
594 	struct core_mmu_table_info *ti;
595 	paddr_t pa;
596 	unsigned idx;
597 
598 	ti = find_table_info((vaddr_t)pmem->va_alias);
599 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
600 	core_mmu_get_entry(ti, idx, &pa, NULL);
601 	return pa;
602 }
603 
604 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
605 			void *va_alias)
606 {
607 	size_t fobj_pgoffs = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
608 			     area->fobj_pgoffs;
609 	struct core_mmu_table_info *ti;
610 	uint32_t attr_alias;
611 	paddr_t pa_alias;
612 	unsigned int idx_alias;
613 
614 	/* Insure we are allowed to write to aliased virtual page */
615 	ti = find_table_info((vaddr_t)va_alias);
616 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
617 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
618 	if (!(attr_alias & TEE_MATTR_PW)) {
619 		attr_alias |= TEE_MATTR_PW;
620 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
621 		tlbi_mva_allasid((vaddr_t)va_alias);
622 	}
623 
624 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
625 	if (fobj_load_page(area->fobj, fobj_pgoffs, va_alias)) {
626 		EMSG("PH 0x%" PRIxVA " failed", page_va);
627 		panic();
628 	}
629 	switch (area->type) {
630 	case PAGER_AREA_TYPE_RO:
631 		incr_ro_hits();
632 		/* Forbid write to aliases for read-only (maybe exec) pages */
633 		attr_alias &= ~TEE_MATTR_PW;
634 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
635 		tlbi_mva_allasid((vaddr_t)va_alias);
636 		break;
637 	case PAGER_AREA_TYPE_RW:
638 		incr_rw_hits();
639 		break;
640 	case PAGER_AREA_TYPE_LOCK:
641 		break;
642 	default:
643 		panic();
644 	}
645 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
646 }
647 
648 static void tee_pager_save_page(struct tee_pager_pmem *pmem)
649 {
650 	if (pmem_is_dirty(pmem)) {
651 		asan_tag_access(pmem->va_alias,
652 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
653 		if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
654 				   pmem->va_alias))
655 			panic("fobj_save_page");
656 		asan_tag_no_access(pmem->va_alias,
657 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
658 	}
659 }
660 
661 #ifdef CFG_PAGED_USER_TA
662 static void unlink_area(struct tee_pager_area_head *area_head,
663 			struct tee_pager_area *area)
664 {
665 	uint32_t exceptions = pager_lock_check_stack(64);
666 
667 	TAILQ_REMOVE(area_head, area, link);
668 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
669 
670 	pager_unlock(exceptions);
671 }
672 KEEP_PAGER(unlink_area);
673 
674 static void free_area(struct tee_pager_area *area)
675 {
676 	fobj_put(area->fobj);
677 	free(area);
678 }
679 
680 static TEE_Result pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
681 				     struct fobj *fobj)
682 {
683 	struct tee_pager_area *area;
684 	vaddr_t b = base;
685 	size_t fobj_pgoffs = 0;
686 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
687 
688 	if (!utc->areas) {
689 		utc->areas = malloc(sizeof(*utc->areas));
690 		if (!utc->areas)
691 			return TEE_ERROR_OUT_OF_MEMORY;
692 		TAILQ_INIT(utc->areas);
693 	}
694 
695 	while (s) {
696 		size_t s2;
697 
698 		if (find_area(utc->areas, b))
699 			return TEE_ERROR_BAD_PARAMETERS;
700 
701 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
702 		area = calloc(1, sizeof(*area));
703 		if (!area)
704 			return TEE_ERROR_OUT_OF_MEMORY;
705 
706 		/* Table info will be set when the context is activated. */
707 		area->fobj = fobj_get(fobj);
708 		area->fobj_pgoffs = fobj_pgoffs;
709 		area->type = PAGER_AREA_TYPE_RW;
710 		area->base = b;
711 		area->size = s2;
712 		area->flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
713 
714 		TAILQ_INSERT_TAIL(utc->areas, area, link);
715 		TAILQ_INSERT_TAIL(&fobj->areas, area, fobj_link);
716 		b += s2;
717 		s -= s2;
718 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
719 	}
720 
721 	return TEE_SUCCESS;
722 }
723 
724 TEE_Result tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
725 			    struct fobj *fobj)
726 {
727 	TEE_Result res = TEE_SUCCESS;
728 	struct thread_specific_data *tsd = thread_get_tsd();
729 	struct tee_pager_area *area = NULL;
730 	struct core_mmu_table_info dir_info = { NULL };
731 
732 	if (&utc->ctx != tsd->ctx) {
733 		/*
734 		 * Changes are to an utc that isn't active. Just add the
735 		 * areas page tables will be dealt with later.
736 		 */
737 		return pager_add_uta_area(utc, base, fobj);
738 	}
739 
740 	/*
741 	 * Assign page tables before adding areas to be able to tell which
742 	 * are newly added and should be removed in case of failure.
743 	 */
744 	tee_pager_assign_uta_tables(utc);
745 	res = pager_add_uta_area(utc, base, fobj);
746 	if (res) {
747 		struct tee_pager_area *next_a;
748 
749 		/* Remove all added areas */
750 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
751 			if (!area->pgt) {
752 				unlink_area(utc->areas, area);
753 				free_area(area);
754 			}
755 		}
756 		return res;
757 	}
758 
759 	/*
760 	 * Assign page tables to the new areas and make sure that the page
761 	 * tables are registered in the upper table.
762 	 */
763 	tee_pager_assign_uta_tables(utc);
764 	core_mmu_get_user_pgdir(&dir_info);
765 	TAILQ_FOREACH(area, utc->areas, link) {
766 		paddr_t pa;
767 		size_t idx;
768 		uint32_t attr;
769 
770 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
771 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
772 
773 		/*
774 		 * Check if the page table already is used, if it is, it's
775 		 * already registered.
776 		 */
777 		if (area->pgt->num_used_entries) {
778 			assert(attr & TEE_MATTR_TABLE);
779 			assert(pa == virt_to_phys(area->pgt->tbl));
780 			continue;
781 		}
782 
783 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
784 		pa = virt_to_phys(area->pgt->tbl);
785 		assert(pa);
786 		/*
787 		 * Note that the update of the table entry is guaranteed to
788 		 * be atomic.
789 		 */
790 		core_mmu_set_entry(&dir_info, idx, pa, attr);
791 	}
792 
793 	return TEE_SUCCESS;
794 }
795 
796 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
797 				   struct pgt *pgt)
798 {
799 	assert(pgt);
800 	ti->table = pgt->tbl;
801 	ti->va_base = pgt->vabase;
802 	ti->level = TBL_LEVEL;
803 	ti->shift = TBL_SHIFT;
804 	ti->num_entries = TBL_NUM_ENTRIES;
805 }
806 
807 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
808 			   vaddr_t new_base)
809 {
810 	uint32_t exceptions = pager_lock_check_stack(64);
811 
812 	/*
813 	 * If there's no pgt assigned to the old area there's no pages to
814 	 * deal with either, just update with a new pgt and base.
815 	 */
816 	if (area->pgt) {
817 		/*
818 		 * Just clear the old page table for now, once proper page
819 		 * sharing is in place we can remove this.
820 		 */
821 		struct core_mmu_table_info old_ti;
822 		struct tee_pager_pmem *pmem;
823 		uint32_t a = 0;
824 		unsigned int last_pgoffs = area->fobj_pgoffs +
825 					   (area->size >> SMALL_PAGE_SHIFT) - 1;
826 
827 		init_tbl_info_from_pgt(&old_ti, area->pgt);
828 
829 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
830 			size_t idx = 0;
831 
832 			if (pmem->fobj != area->fobj ||
833 			    pmem->fobj_pgidx < area->fobj_pgoffs ||
834 			    pmem->fobj_pgidx > last_pgoffs)
835 				continue;
836 
837 			idx = pmem_get_area_tblidx(pmem, area);
838 			core_mmu_get_entry(&old_ti, idx, NULL, &a);
839 			if (a & TEE_MATTR_VALID_BLOCK) {
840 				core_mmu_set_entry(&old_ti, idx, 0, 0);
841 				pgt_dec_used_entries(area->pgt);
842 			}
843 		}
844 	}
845 
846 	area->pgt = new_pgt;
847 	area->base = new_base;
848 	pager_unlock(exceptions);
849 }
850 KEEP_PAGER(transpose_area);
851 
852 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
853 				   vaddr_t src_base,
854 				   struct user_ta_ctx *dst_utc,
855 				   vaddr_t dst_base, struct pgt **dst_pgt,
856 				   size_t size)
857 {
858 	struct tee_pager_area *area;
859 	struct tee_pager_area *next_a;
860 
861 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
862 		vaddr_t new_area_base;
863 		size_t new_idx;
864 
865 		if (!core_is_buffer_inside(area->base, area->size,
866 					  src_base, size))
867 			continue;
868 
869 		TAILQ_REMOVE(src_utc->areas, area, link);
870 
871 		new_area_base = dst_base + (src_base - area->base);
872 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
873 			  CORE_MMU_PGDIR_SIZE;
874 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
875 		       dst_pgt[new_idx]->vabase);
876 		transpose_area(area, dst_pgt[new_idx], new_area_base);
877 		tlbi_asid(src_utc->vm_info->asid);
878 
879 		/*
880 		 * Assert that this will not cause any conflicts in the new
881 		 * utc.  This should already be guaranteed, but a bug here
882 		 * could be tricky to find.
883 		 */
884 		assert(!find_area(dst_utc->areas, area->base));
885 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
886 	}
887 }
888 
889 static void rem_area(struct tee_pager_area_head *area_head,
890 		     struct tee_pager_area *area)
891 {
892 	struct tee_pager_pmem *pmem;
893 	size_t last_pgoffs = area->fobj_pgoffs +
894 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
895 	uint32_t exceptions;
896 	size_t idx = 0;
897 	uint32_t a = 0;
898 
899 	exceptions = pager_lock_check_stack(64);
900 
901 	TAILQ_REMOVE(area_head, area, link);
902 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
903 
904 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
905 		if (pmem->fobj != area->fobj ||
906 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
907 		    pmem->fobj_pgidx > last_pgoffs)
908 			continue;
909 
910 		idx = pmem_get_area_tblidx(pmem, area);
911 		area_get_entry(area, idx, NULL, &a);
912 		if (!(a & TEE_MATTR_VALID_BLOCK))
913 			continue;
914 
915 		area_set_entry(area, idx, 0, 0);
916 		tlbi_mva_allasid(area_idx2va(area, idx));
917 		pgt_dec_used_entries(area->pgt);
918 	}
919 
920 	pager_unlock(exceptions);
921 
922 	free_area(area);
923 }
924 KEEP_PAGER(rem_area);
925 
926 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
927 			      size_t size)
928 {
929 	struct tee_pager_area *area;
930 	struct tee_pager_area *next_a;
931 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
932 
933 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
934 		if (core_is_buffer_inside(area->base, area->size, base, s))
935 			rem_area(utc->areas, area);
936 	}
937 	tlbi_asid(utc->vm_info->asid);
938 }
939 
940 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
941 {
942 	struct tee_pager_area *area;
943 
944 	if (!utc->areas)
945 		return;
946 
947 	while (true) {
948 		area = TAILQ_FIRST(utc->areas);
949 		if (!area)
950 			break;
951 		unlink_area(utc->areas, area);
952 		free_area(area);
953 	}
954 
955 	free(utc->areas);
956 }
957 
958 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
959 				 size_t size, uint32_t flags)
960 {
961 	bool ret = false;
962 	vaddr_t b = base;
963 	size_t s = size;
964 	size_t s2 = 0;
965 	struct tee_pager_area *area = find_area(utc->areas, b);
966 	uint32_t exceptions = 0;
967 	struct tee_pager_pmem *pmem = NULL;
968 	uint32_t a = 0;
969 	uint32_t f = 0;
970 	uint32_t f2 = 0;
971 	size_t tblidx = 0;
972 
973 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
974 	if (f & TEE_MATTR_UW)
975 		f |= TEE_MATTR_PW;
976 	f = get_area_mattr(f);
977 
978 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
979 
980 	while (s) {
981 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
982 		if (!area || area->base != b || area->size != s2) {
983 			ret = false;
984 			goto out;
985 		}
986 		b += s2;
987 		s -= s2;
988 
989 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
990 			if (!pmem_is_covered_by_area(pmem, area))
991 				continue;
992 
993 			tblidx = pmem_get_area_tblidx(pmem, area);
994 			area_get_entry(area, tblidx, NULL, &a);
995 			if (a == f)
996 				continue;
997 			area_set_entry(area, tblidx, 0, 0);
998 			tlbi_mva_allasid(area_idx2va(area, tblidx));
999 
1000 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1001 			if (pmem_is_dirty(pmem))
1002 				f2 = f;
1003 			else
1004 				f2 = f & ~(TEE_MATTR_UW | TEE_MATTR_PW);
1005 			area_set_entry(area, tblidx, get_pmem_pa(pmem), f2);
1006 			if (!(a & TEE_MATTR_VALID_BLOCK))
1007 				pgt_inc_used_entries(area->pgt);
1008 			/*
1009 			 * Make sure the table update is visible before
1010 			 * continuing.
1011 			 */
1012 			dsb_ishst();
1013 
1014 			/*
1015 			 * Here's a problem if this page already is shared.
1016 			 * We need do icache invalidate for each context
1017 			 * in which it is shared. In practice this will
1018 			 * never happen.
1019 			 */
1020 			if (flags & TEE_MATTR_UX) {
1021 				void *va = (void *)area_idx2va(area, tblidx);
1022 
1023 				/* Assert that the pmem isn't shared. */
1024 				assert(TAILQ_FIRST(&pmem->fobj->areas) ==
1025 				       TAILQ_LAST(&pmem->fobj->areas,
1026 						  tee_pager_area_head));
1027 
1028 				cache_op_inner(DCACHE_AREA_CLEAN, va,
1029 						SMALL_PAGE_SIZE);
1030 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
1031 						SMALL_PAGE_SIZE);
1032 			}
1033 		}
1034 
1035 		area->flags = f;
1036 		area = TAILQ_NEXT(area, link);
1037 	}
1038 
1039 	ret = true;
1040 out:
1041 	pager_unlock(exceptions);
1042 	return ret;
1043 }
1044 KEEP_PAGER(tee_pager_set_uta_area_attr);
1045 #endif /*CFG_PAGED_USER_TA*/
1046 
1047 void tee_pager_invalidate_fobj(struct fobj *fobj)
1048 {
1049 	struct tee_pager_pmem *pmem;
1050 	uint32_t exceptions;
1051 
1052 	exceptions = pager_lock_check_stack(64);
1053 
1054 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1055 		if (pmem->fobj == fobj) {
1056 			pmem->fobj = NULL;
1057 			pmem->fobj_pgidx = INVALID_PGIDX;
1058 		}
1059 	}
1060 
1061 	pager_unlock(exceptions);
1062 }
1063 KEEP_PAGER(tee_pager_invalidate_fobj);
1064 
1065 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area,
1066 					unsigned int tblidx)
1067 {
1068 	struct tee_pager_pmem *pmem = NULL;
1069 
1070 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1071 		if (pmem->fobj == area->fobj &&
1072 		    pmem_get_area_tblidx(pmem, area) == tblidx)
1073 			return pmem;
1074 
1075 	return NULL;
1076 }
1077 
1078 static bool tee_pager_unhide_page(struct tee_pager_area *area,
1079 				  unsigned int tblidx)
1080 {
1081 	struct tee_pager_pmem *pmem = pmem_find(area, tblidx);
1082 	uint32_t a = get_area_mattr(area->flags);
1083 	uint32_t attr = 0;
1084 
1085 	if (!pmem)
1086 		return false;
1087 
1088 	area_get_entry(area, tblidx, NULL, &attr);
1089 	if (attr & TEE_MATTR_VALID_BLOCK)
1090 		return false;
1091 
1092 	/* page is hidden, show and move to back */
1093 
1094 	/* If it's not a dirty block, then it should be read only. */
1095 	if (!pmem_is_dirty(pmem))
1096 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1097 
1098 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1099 	area_set_entry(area, tblidx, get_pmem_pa(pmem), a);
1100 	pgt_inc_used_entries(area->pgt);
1101 	/*
1102 	 * Note that TLB invalidation isn't needed since
1103 	 * there wasn't a valid mapping before. We should
1104 	 * use a barrier though, to make sure that the
1105 	 * change is visible.
1106 	 */
1107 	dsb_ishst();
1108 
1109 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1110 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1111 	incr_hidden_hits();
1112 	return true;
1113 }
1114 
1115 static void tee_pager_hide_pages(void)
1116 {
1117 	struct tee_pager_pmem *pmem = NULL;
1118 	size_t n = 0;
1119 
1120 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1121 		if (n >= TEE_PAGER_NHIDE)
1122 			break;
1123 		n++;
1124 
1125 		/* we cannot hide pages when pmem->fobj is not defined. */
1126 		if (!pmem->fobj)
1127 			continue;
1128 
1129 		if (pmem_is_hidden(pmem))
1130 			continue;
1131 
1132 		pmem->flags |= PMEM_FLAG_HIDDEN;
1133 		pmem_unmap(pmem, NULL);
1134 	}
1135 }
1136 
1137 /*
1138  * Find mapped pmem, hide and move to pageble pmem.
1139  * Return false if page was not mapped, and true if page was mapped.
1140  */
1141 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1142 				       vaddr_t page_va)
1143 {
1144 	struct tee_pager_pmem *pmem;
1145 	size_t tblidx = 0;
1146 	size_t pgidx = area_va2idx(area, page_va) + area->fobj_pgoffs -
1147 		       ((area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT);
1148 
1149 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1150 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != pgidx)
1151 			continue;
1152 
1153 		/*
1154 		 * Locked pages may not be shared, these two asserts checks
1155 		 * that there's only a signed area recorded with this pmem.
1156 		 */
1157 		assert(TAILQ_FIRST(&pmem->fobj->areas) == area);
1158 		assert(TAILQ_LAST(&pmem->fobj->areas,
1159 				  tee_pager_area_head) == area);
1160 
1161 		tblidx = pmem_get_area_tblidx(pmem, area);
1162 		area_set_entry(area, tblidx, 0, 0);
1163 		pgt_dec_used_entries(area->pgt);
1164 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1165 		pmem->fobj = NULL;
1166 		pmem->fobj_pgidx = INVALID_PGIDX;
1167 		tee_pager_npages++;
1168 		set_npages();
1169 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1170 		incr_zi_released();
1171 		return true;
1172 	}
1173 
1174 	return false;
1175 }
1176 
1177 /* Finds the oldest page and unmaps it from all tables */
1178 static struct tee_pager_pmem *tee_pager_get_page(enum tee_pager_area_type at)
1179 {
1180 	struct tee_pager_pmem *pmem;
1181 
1182 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1183 	if (!pmem) {
1184 		EMSG("No pmem entries");
1185 		return NULL;
1186 	}
1187 
1188 	if (pmem->fobj) {
1189 		pmem_unmap(pmem, NULL);
1190 		tee_pager_save_page(pmem);
1191 	}
1192 
1193 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1194 	pmem->fobj = NULL;
1195 	pmem->fobj_pgidx = INVALID_PGIDX;
1196 	pmem->flags = 0;
1197 	if (at == PAGER_AREA_TYPE_LOCK) {
1198 		/* Move page to lock list */
1199 		if (tee_pager_npages <= 0)
1200 			panic("running out of page");
1201 		tee_pager_npages--;
1202 		set_npages();
1203 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1204 	} else {
1205 		/* move page to back */
1206 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1207 	}
1208 
1209 	return pmem;
1210 }
1211 
1212 static bool pager_update_permissions(struct tee_pager_area *area,
1213 			struct abort_info *ai, bool *handled)
1214 {
1215 	unsigned int pgidx = area_va2idx(area, ai->va);
1216 	struct tee_pager_pmem *pmem = NULL;
1217 	uint32_t attr = 0;
1218 	paddr_t pa = 0;
1219 
1220 	*handled = false;
1221 
1222 	area_get_entry(area, pgidx, &pa, &attr);
1223 
1224 	/* Not mapped */
1225 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1226 		return false;
1227 
1228 	/* Not readable, should not happen */
1229 	if (abort_is_user_exception(ai)) {
1230 		if (!(attr & TEE_MATTR_UR))
1231 			return true;
1232 	} else {
1233 		if (!(attr & TEE_MATTR_PR)) {
1234 			abort_print_error(ai);
1235 			panic();
1236 		}
1237 	}
1238 
1239 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1240 	case CORE_MMU_FAULT_TRANSLATION:
1241 	case CORE_MMU_FAULT_READ_PERMISSION:
1242 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1243 			/* Check attempting to execute from an NOX page */
1244 			if (abort_is_user_exception(ai)) {
1245 				if (!(attr & TEE_MATTR_UX))
1246 					return true;
1247 			} else {
1248 				if (!(attr & TEE_MATTR_PX)) {
1249 					abort_print_error(ai);
1250 					panic();
1251 				}
1252 			}
1253 		}
1254 		/* Since the page is mapped now it's OK */
1255 		break;
1256 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1257 		/* Check attempting to write to an RO page */
1258 		pmem = pmem_find(area, pgidx);
1259 		if (!pmem)
1260 			panic();
1261 		if (abort_is_user_exception(ai)) {
1262 			if (!(area->flags & TEE_MATTR_UW))
1263 				return true;
1264 			if (!(attr & TEE_MATTR_UW)) {
1265 				FMSG("Dirty %p",
1266 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1267 				pmem->flags |= PMEM_FLAG_DIRTY;
1268 				area_set_entry(area, pgidx, pa,
1269 					       get_area_mattr(area->flags));
1270 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1271 			}
1272 
1273 		} else {
1274 			if (!(area->flags & TEE_MATTR_PW)) {
1275 				abort_print_error(ai);
1276 				panic();
1277 			}
1278 			if (!(attr & TEE_MATTR_PW)) {
1279 				FMSG("Dirty %p",
1280 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1281 				pmem->flags |= PMEM_FLAG_DIRTY;
1282 				area_set_entry(area, pgidx, pa,
1283 					       get_area_mattr(area->flags));
1284 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1285 			}
1286 		}
1287 		/* Since permissions has been updated now it's OK */
1288 		break;
1289 	default:
1290 		/* Some fault we can't deal with */
1291 		if (abort_is_user_exception(ai))
1292 			return true;
1293 		abort_print_error(ai);
1294 		panic();
1295 	}
1296 	*handled = true;
1297 	return true;
1298 }
1299 
1300 #ifdef CFG_TEE_CORE_DEBUG
1301 static void stat_handle_fault(void)
1302 {
1303 	static size_t num_faults;
1304 	static size_t min_npages = SIZE_MAX;
1305 	static size_t total_min_npages = SIZE_MAX;
1306 
1307 	num_faults++;
1308 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1309 		DMSG("nfaults %zu npages %zu (min %zu)",
1310 		     num_faults, tee_pager_npages, min_npages);
1311 		min_npages = tee_pager_npages; /* reset */
1312 	}
1313 	if (tee_pager_npages < min_npages)
1314 		min_npages = tee_pager_npages;
1315 	if (tee_pager_npages < total_min_npages)
1316 		total_min_npages = tee_pager_npages;
1317 }
1318 #else
1319 static void stat_handle_fault(void)
1320 {
1321 }
1322 #endif
1323 
1324 bool tee_pager_handle_fault(struct abort_info *ai)
1325 {
1326 	struct tee_pager_area *area;
1327 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1328 	uint32_t exceptions;
1329 	bool ret;
1330 
1331 #ifdef TEE_PAGER_DEBUG_PRINT
1332 	abort_print(ai);
1333 #endif
1334 
1335 	/*
1336 	 * We're updating pages that can affect several active CPUs at a
1337 	 * time below. We end up here because a thread tries to access some
1338 	 * memory that isn't available. We have to be careful when making
1339 	 * that memory available as other threads may succeed in accessing
1340 	 * that address the moment after we've made it available.
1341 	 *
1342 	 * That means that we can't just map the memory and populate the
1343 	 * page, instead we use the aliased mapping to populate the page
1344 	 * and once everything is ready we map it.
1345 	 */
1346 	exceptions = pager_lock(ai);
1347 
1348 	stat_handle_fault();
1349 
1350 	/* check if the access is valid */
1351 	if (abort_is_user_exception(ai)) {
1352 		area = find_uta_area(ai->va);
1353 
1354 	} else {
1355 		area = find_area(&tee_pager_area_head, ai->va);
1356 		if (!area)
1357 			area = find_uta_area(ai->va);
1358 	}
1359 	if (!area || !area->pgt) {
1360 		ret = false;
1361 		goto out;
1362 	}
1363 
1364 	if (!tee_pager_unhide_page(area, area_va2idx(area, page_va))) {
1365 		struct tee_pager_pmem *pmem = NULL;
1366 		uint32_t attr = 0;
1367 		paddr_t pa = 0;
1368 		size_t tblidx = 0;
1369 
1370 		/*
1371 		 * The page wasn't hidden, but some other core may have
1372 		 * updated the table entry before we got here or we need
1373 		 * to make a read-only page read-write (dirty).
1374 		 */
1375 		if (pager_update_permissions(area, ai, &ret)) {
1376 			/*
1377 			 * Nothing more to do with the abort. The problem
1378 			 * could already have been dealt with from another
1379 			 * core or if ret is false the TA will be paniced.
1380 			 */
1381 			goto out;
1382 		}
1383 
1384 		pmem = tee_pager_get_page(area->type);
1385 		if (!pmem) {
1386 			abort_print(ai);
1387 			panic();
1388 		}
1389 
1390 		/* load page code & data */
1391 		tee_pager_load_page(area, page_va, pmem->va_alias);
1392 
1393 
1394 		pmem->fobj = area->fobj;
1395 		pmem->fobj_pgidx = area_va2idx(area, page_va) +
1396 				   area->fobj_pgoffs -
1397 				   ((area->base & CORE_MMU_PGDIR_MASK) >>
1398 					SMALL_PAGE_SHIFT);
1399 		tblidx = pmem_get_area_tblidx(pmem, area);
1400 		attr = get_area_mattr(area->flags);
1401 		/*
1402 		 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1403 		 * able to tell when they are updated and should be tagged
1404 		 * as dirty.
1405 		 */
1406 		if (area->type == PAGER_AREA_TYPE_RW)
1407 			attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1408 		pa = get_pmem_pa(pmem);
1409 
1410 		/*
1411 		 * We've updated the page using the aliased mapping and
1412 		 * some cache maintenence is now needed if it's an
1413 		 * executable page.
1414 		 *
1415 		 * Since the d-cache is a Physically-indexed,
1416 		 * physically-tagged (PIPT) cache we can clean either the
1417 		 * aliased address or the real virtual address. In this
1418 		 * case we choose the real virtual address.
1419 		 *
1420 		 * The i-cache can also be PIPT, but may be something else
1421 		 * too like VIPT. The current code requires the caches to
1422 		 * implement the IVIPT extension, that is:
1423 		 * "instruction cache maintenance is required only after
1424 		 * writing new data to a physical address that holds an
1425 		 * instruction."
1426 		 *
1427 		 * To portably invalidate the icache the page has to
1428 		 * be mapped at the final virtual address but not
1429 		 * executable.
1430 		 */
1431 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1432 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1433 					TEE_MATTR_PW | TEE_MATTR_UW;
1434 
1435 			/* Set a temporary read-only mapping */
1436 			area_set_entry(area, tblidx, pa, attr & ~mask);
1437 			tlbi_mva_allasid(page_va);
1438 
1439 			/*
1440 			 * Doing these operations to LoUIS (Level of
1441 			 * unification, Inner Shareable) would be enough
1442 			 */
1443 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1444 				       SMALL_PAGE_SIZE);
1445 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1446 				       SMALL_PAGE_SIZE);
1447 
1448 			/* Set the final mapping */
1449 			area_set_entry(area, tblidx, pa, attr);
1450 			tlbi_mva_allasid(page_va);
1451 		} else {
1452 			area_set_entry(area, tblidx, pa, attr);
1453 			/*
1454 			 * No need to flush TLB for this entry, it was
1455 			 * invalid. We should use a barrier though, to make
1456 			 * sure that the change is visible.
1457 			 */
1458 			dsb_ishst();
1459 		}
1460 		pgt_inc_used_entries(area->pgt);
1461 
1462 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1463 
1464 	}
1465 
1466 	tee_pager_hide_pages();
1467 	ret = true;
1468 out:
1469 	pager_unlock(exceptions);
1470 	return ret;
1471 }
1472 
1473 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1474 {
1475 	size_t n;
1476 
1477 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1478 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1479 
1480 	/* setup memory */
1481 	for (n = 0; n < npages; n++) {
1482 		struct core_mmu_table_info *ti;
1483 		struct tee_pager_pmem *pmem;
1484 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1485 		unsigned int pgidx;
1486 		paddr_t pa;
1487 		uint32_t attr;
1488 
1489 		ti = find_table_info(va);
1490 		pgidx = core_mmu_va2idx(ti, va);
1491 		/*
1492 		 * Note that we can only support adding pages in the
1493 		 * valid range of this table info, currently not a problem.
1494 		 */
1495 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1496 
1497 		/* Ignore unmapped pages/blocks */
1498 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1499 			continue;
1500 
1501 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1502 		if (!pmem)
1503 			panic("out of mem");
1504 
1505 		pmem->va_alias = pager_add_alias_page(pa);
1506 
1507 		if (unmap) {
1508 			pmem->fobj = NULL;
1509 			pmem->fobj_pgidx = INVALID_PGIDX;
1510 			core_mmu_set_entry(ti, pgidx, 0, 0);
1511 			pgt_dec_used_entries(find_core_pgt(va));
1512 		} else {
1513 			struct tee_pager_area *area = NULL;
1514 
1515 			/*
1516 			 * The page is still mapped, let's assign the area
1517 			 * and update the protection bits accordingly.
1518 			 */
1519 			area = find_area(&tee_pager_area_head, va);
1520 			assert(area && area->pgt == find_core_pgt(va));
1521 			pmem->fobj = area->fobj;
1522 			pmem->fobj_pgidx = pgidx + area->fobj_pgoffs -
1523 					   ((area->base &
1524 							CORE_MMU_PGDIR_MASK) >>
1525 						SMALL_PAGE_SHIFT);
1526 			assert(pgidx == pmem_get_area_tblidx(pmem, area));
1527 			assert(pa == get_pmem_pa(pmem));
1528 			area_set_entry(area, pgidx, pa,
1529 				       get_area_mattr(area->flags));
1530 		}
1531 
1532 		tee_pager_npages++;
1533 		incr_npages_all();
1534 		set_npages();
1535 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1536 	}
1537 
1538 	/*
1539 	 * As this is done at inits, invalidate all TLBs once instead of
1540 	 * targeting only the modified entries.
1541 	 */
1542 	tlbi_all();
1543 }
1544 
1545 #ifdef CFG_PAGED_USER_TA
1546 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1547 {
1548 	struct pgt *p = pgt;
1549 
1550 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1551 		p = SLIST_NEXT(p, link);
1552 	return p;
1553 }
1554 
1555 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1556 {
1557 	struct tee_pager_area *area;
1558 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1559 
1560 	TAILQ_FOREACH(area, utc->areas, link) {
1561 		if (!area->pgt)
1562 			area->pgt = find_pgt(pgt, area->base);
1563 		else
1564 			assert(area->pgt == find_pgt(pgt, area->base));
1565 		if (!area->pgt)
1566 			panic();
1567 	}
1568 }
1569 
1570 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1571 {
1572 	struct tee_pager_pmem *pmem;
1573 	struct tee_pager_area *area;
1574 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1575 
1576 	if (!pgt->num_used_entries)
1577 		goto out;
1578 
1579 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1580 		if (pmem->fobj)
1581 			pmem_unmap(pmem, pgt);
1582 	}
1583 	assert(!pgt->num_used_entries);
1584 
1585 out:
1586 	TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1587 		if (area->pgt == pgt)
1588 			area->pgt = NULL;
1589 	}
1590 
1591 	pager_unlock(exceptions);
1592 }
1593 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1594 #endif /*CFG_PAGED_USER_TA*/
1595 
1596 void tee_pager_release_phys(void *addr, size_t size)
1597 {
1598 	bool unmaped = false;
1599 	vaddr_t va = (vaddr_t)addr;
1600 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1601 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1602 	struct tee_pager_area *area;
1603 	uint32_t exceptions;
1604 
1605 	if (end <= begin)
1606 		return;
1607 
1608 	exceptions = pager_lock_check_stack(128);
1609 
1610 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1611 		area = find_area(&tee_pager_area_head, va);
1612 		if (!area)
1613 			panic();
1614 		unmaped |= tee_pager_release_one_phys(area, va);
1615 	}
1616 
1617 	if (unmaped)
1618 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1619 
1620 	pager_unlock(exceptions);
1621 }
1622 KEEP_PAGER(tee_pager_release_phys);
1623 
1624 void *tee_pager_alloc(size_t size)
1625 {
1626 	tee_mm_entry_t *mm = NULL;
1627 	uint8_t *smem = NULL;
1628 	size_t num_pages = 0;
1629 	struct fobj *fobj = NULL;
1630 
1631 	if (!size)
1632 		return NULL;
1633 
1634 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1635 	if (!mm)
1636 		return NULL;
1637 
1638 	smem = (uint8_t *)tee_mm_get_smem(mm);
1639 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1640 	fobj = fobj_locked_paged_alloc(num_pages);
1641 	if (!fobj) {
1642 		tee_mm_free(mm);
1643 		return NULL;
1644 	}
1645 
1646 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1647 	fobj_put(fobj);
1648 
1649 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1650 
1651 	return smem;
1652 }
1653