xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 15ba8c1f4c9e41016104425e94dad2eedf2b38fd)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/panic.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/tee_ta_manager.h>
18 #include <kernel/thread.h>
19 #include <kernel/tlb_helpers.h>
20 #include <kernel/user_mode_ctx.h>
21 #include <mm/core_memprot.h>
22 #include <mm/fobj.h>
23 #include <mm/tee_mm.h>
24 #include <mm/tee_pager.h>
25 #include <stdlib.h>
26 #include <sys/queue.h>
27 #include <tee_api_defines.h>
28 #include <trace.h>
29 #include <types_ext.h>
30 #include <utee_defines.h>
31 #include <util.h>
32 
33 
34 static struct tee_pager_area_head tee_pager_area_head =
35 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
36 
37 #define INVALID_PGIDX		UINT_MAX
38 #define PMEM_FLAG_DIRTY		BIT(0)
39 #define PMEM_FLAG_HIDDEN	BIT(1)
40 
41 /*
42  * struct tee_pager_pmem - Represents a physical page used for paging.
43  *
44  * @flags	flags defined by PMEM_FLAG_* above
45  * @fobj_pgidx	index of the page in the @fobj
46  * @fobj	File object of which a page is made visible.
47  * @va_alias	Virtual address where the physical page always is aliased.
48  *		Used during remapping of the page when the content need to
49  *		be updated before it's available at the new location.
50  */
51 struct tee_pager_pmem {
52 	unsigned int flags;
53 	unsigned int fobj_pgidx;
54 	struct fobj *fobj;
55 	void *va_alias;
56 	TAILQ_ENTRY(tee_pager_pmem) link;
57 };
58 
59 /* The list of physical pages. The first page in the list is the oldest */
60 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
61 
62 static struct tee_pager_pmem_head tee_pager_pmem_head =
63 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
64 
65 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
66 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
67 
68 /* number of pages hidden */
69 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
70 
71 /* Number of registered physical pages, used hiding pages. */
72 static size_t tee_pager_npages;
73 
74 #ifdef CFG_WITH_STATS
75 static struct tee_pager_stats pager_stats;
76 
77 static inline void incr_ro_hits(void)
78 {
79 	pager_stats.ro_hits++;
80 }
81 
82 static inline void incr_rw_hits(void)
83 {
84 	pager_stats.rw_hits++;
85 }
86 
87 static inline void incr_hidden_hits(void)
88 {
89 	pager_stats.hidden_hits++;
90 }
91 
92 static inline void incr_zi_released(void)
93 {
94 	pager_stats.zi_released++;
95 }
96 
97 static inline void incr_npages_all(void)
98 {
99 	pager_stats.npages_all++;
100 }
101 
102 static inline void set_npages(void)
103 {
104 	pager_stats.npages = tee_pager_npages;
105 }
106 
107 void tee_pager_get_stats(struct tee_pager_stats *stats)
108 {
109 	*stats = pager_stats;
110 
111 	pager_stats.hidden_hits = 0;
112 	pager_stats.ro_hits = 0;
113 	pager_stats.rw_hits = 0;
114 	pager_stats.zi_released = 0;
115 }
116 
117 #else /* CFG_WITH_STATS */
118 static inline void incr_ro_hits(void) { }
119 static inline void incr_rw_hits(void) { }
120 static inline void incr_hidden_hits(void) { }
121 static inline void incr_zi_released(void) { }
122 static inline void incr_npages_all(void) { }
123 static inline void set_npages(void) { }
124 
125 void tee_pager_get_stats(struct tee_pager_stats *stats)
126 {
127 	memset(stats, 0, sizeof(struct tee_pager_stats));
128 }
129 #endif /* CFG_WITH_STATS */
130 
131 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
132 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
133 #define TBL_SHIFT	SMALL_PAGE_SHIFT
134 
135 #define EFFECTIVE_VA_SIZE \
136 	(ROUNDUP(TEE_RAM_VA_START + TEE_RAM_VA_SIZE, \
137 		 CORE_MMU_PGDIR_SIZE) - \
138 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
139 
140 static struct pager_table {
141 	struct pgt pgt;
142 	struct core_mmu_table_info tbl_info;
143 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
144 
145 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
146 
147 /* Defines the range of the alias area */
148 static tee_mm_entry_t *pager_alias_area;
149 /*
150  * Physical pages are added in a stack like fashion to the alias area,
151  * @pager_alias_next_free gives the address of next free entry if
152  * @pager_alias_next_free is != 0
153  */
154 static uintptr_t pager_alias_next_free;
155 
156 #ifdef CFG_TEE_CORE_DEBUG
157 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
158 
159 static uint32_t pager_lock_dldetect(const char *func, const int line,
160 				    struct abort_info *ai)
161 {
162 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
163 	unsigned int retries = 0;
164 	unsigned int reminder = 0;
165 
166 	while (!cpu_spin_trylock(&pager_spinlock)) {
167 		retries++;
168 		if (!retries) {
169 			/* wrapped, time to report */
170 			trace_printf(func, line, TRACE_ERROR, true,
171 				     "possible spinlock deadlock reminder %u",
172 				     reminder);
173 			if (reminder < UINT_MAX)
174 				reminder++;
175 			if (ai)
176 				abort_print(ai);
177 		}
178 	}
179 
180 	return exceptions;
181 }
182 #else
183 static uint32_t pager_lock(struct abort_info __unused *ai)
184 {
185 	return cpu_spin_lock_xsave(&pager_spinlock);
186 }
187 #endif
188 
189 static uint32_t pager_lock_check_stack(size_t stack_size)
190 {
191 	if (stack_size) {
192 		int8_t buf[stack_size];
193 		size_t n;
194 
195 		/*
196 		 * Make sure to touch all pages of the stack that we expect
197 		 * to use with this lock held. We need to take eventual
198 		 * page faults before the lock is taken or we'll deadlock
199 		 * the pager. The pages that are populated in this way will
200 		 * eventually be released at certain save transitions of
201 		 * the thread.
202 		 */
203 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
204 			io_write8((vaddr_t)buf + n, 1);
205 		io_write8((vaddr_t)buf + stack_size - 1, 1);
206 	}
207 
208 	return pager_lock(NULL);
209 }
210 
211 static void pager_unlock(uint32_t exceptions)
212 {
213 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
214 }
215 
216 void *tee_pager_phys_to_virt(paddr_t pa)
217 {
218 	struct core_mmu_table_info ti;
219 	unsigned idx;
220 	uint32_t a;
221 	paddr_t p;
222 	vaddr_t v;
223 	size_t n;
224 
225 	/*
226 	 * Most addresses are mapped lineary, try that first if possible.
227 	 */
228 	if (!tee_pager_get_table_info(pa, &ti))
229 		return NULL; /* impossible pa */
230 	idx = core_mmu_va2idx(&ti, pa);
231 	core_mmu_get_entry(&ti, idx, &p, &a);
232 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
233 		return (void *)core_mmu_idx2va(&ti, idx);
234 
235 	n = 0;
236 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
237 	while (true) {
238 		while (idx < TBL_NUM_ENTRIES) {
239 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
240 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
241 				return NULL;
242 
243 			core_mmu_get_entry(&pager_tables[n].tbl_info,
244 					   idx, &p, &a);
245 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
246 				return (void *)v;
247 			idx++;
248 		}
249 
250 		n++;
251 		if (n >= ARRAY_SIZE(pager_tables))
252 			return NULL;
253 		idx = 0;
254 	}
255 
256 	return NULL;
257 }
258 
259 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
260 {
261 	return pmem->flags & PMEM_FLAG_HIDDEN;
262 }
263 
264 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
265 {
266 	return pmem->flags & PMEM_FLAG_DIRTY;
267 }
268 
269 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
270 				    struct tee_pager_area *area)
271 {
272 	if (pmem->fobj != area->fobj)
273 		return false;
274 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
275 		return false;
276 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
277 	    (area->size >> SMALL_PAGE_SHIFT))
278 		return false;
279 
280 	return true;
281 }
282 
283 static size_t pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
284 				   struct tee_pager_area *area)
285 {
286 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
287 
288 	return pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
289 }
290 
291 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
292 {
293 	size_t n;
294 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
295 
296 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
297 	    CORE_MMU_PGDIR_SHIFT;
298 	if (n >= ARRAY_SIZE(pager_tables))
299 		return NULL;
300 
301 	assert(va >= pager_tables[n].tbl_info.va_base &&
302 	       va <= (pager_tables[n].tbl_info.va_base | mask));
303 
304 	return pager_tables + n;
305 }
306 
307 static struct pager_table *find_pager_table(vaddr_t va)
308 {
309 	struct pager_table *pt = find_pager_table_may_fail(va);
310 
311 	assert(pt);
312 	return pt;
313 }
314 
315 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
316 {
317 	struct pager_table *pt = find_pager_table_may_fail(va);
318 
319 	if (!pt)
320 		return false;
321 
322 	*ti = pt->tbl_info;
323 	return true;
324 }
325 
326 static struct core_mmu_table_info *find_table_info(vaddr_t va)
327 {
328 	return &find_pager_table(va)->tbl_info;
329 }
330 
331 static struct pgt *find_core_pgt(vaddr_t va)
332 {
333 	return &find_pager_table(va)->pgt;
334 }
335 
336 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
337 {
338 	struct pager_table *pt;
339 	unsigned idx;
340 	vaddr_t smem = tee_mm_get_smem(mm);
341 	size_t nbytes = tee_mm_get_bytes(mm);
342 	vaddr_t v;
343 	uint32_t a = 0;
344 
345 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
346 
347 	assert(!pager_alias_area);
348 	pager_alias_area = mm;
349 	pager_alias_next_free = smem;
350 
351 	/* Clear all mapping in the alias area */
352 	pt = find_pager_table(smem);
353 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
354 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
355 		while (idx < TBL_NUM_ENTRIES) {
356 			v = core_mmu_idx2va(&pt->tbl_info, idx);
357 			if (v >= (smem + nbytes))
358 				goto out;
359 
360 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
361 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
362 			if (a & TEE_MATTR_VALID_BLOCK)
363 				pgt_dec_used_entries(&pt->pgt);
364 			idx++;
365 		}
366 
367 		pt++;
368 		idx = 0;
369 	}
370 
371 out:
372 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
373 }
374 
375 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
376 {
377 	size_t n;
378 	uint32_t a = 0;
379 	size_t usage = 0;
380 
381 	for (n = 0; n < ti->num_entries; n++) {
382 		core_mmu_get_entry(ti, n, NULL, &a);
383 		if (a & TEE_MATTR_VALID_BLOCK)
384 			usage++;
385 	}
386 	return usage;
387 }
388 
389 static void area_get_entry(struct tee_pager_area *area, size_t idx,
390 			   paddr_t *pa, uint32_t *attr)
391 {
392 	assert(area->pgt);
393 	assert(idx < TBL_NUM_ENTRIES);
394 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
395 }
396 
397 static void area_set_entry(struct tee_pager_area *area, size_t idx,
398 			   paddr_t pa, uint32_t attr)
399 {
400 	assert(area->pgt);
401 	assert(idx < TBL_NUM_ENTRIES);
402 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
403 }
404 
405 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
406 {
407 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
408 }
409 
410 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
411 {
412 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
413 }
414 
415 static void area_tlbi_entry(struct tee_pager_area *area, size_t idx)
416 {
417 	vaddr_t va = area_idx2va(area, idx);
418 
419 #if defined(CFG_PAGED_USER_TA)
420 	assert(area->pgt);
421 	if (area->pgt->ctx) {
422 		uint32_t asid = to_user_mode_ctx(area->pgt->ctx)->vm_info.asid;
423 
424 		tlbi_mva_asid(va, asid);
425 		return;
426 	}
427 #endif
428 	tlbi_mva_allasid(va);
429 }
430 
431 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
432 {
433 	struct tee_pager_area *area = NULL;
434 	size_t tblidx = 0;
435 	uint32_t a = 0;
436 
437 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
438 		/*
439 		 * If only_this_pgt points to a pgt then the pgt of this
440 		 * area has to match or we'll skip over it.
441 		 */
442 		if (only_this_pgt && area->pgt != only_this_pgt)
443 			continue;
444 		if (!area->pgt || !pmem_is_covered_by_area(pmem, area))
445 			continue;
446 		tblidx = pmem_get_area_tblidx(pmem, area);
447 		area_get_entry(area, tblidx, NULL, &a);
448 		if (a & TEE_MATTR_VALID_BLOCK) {
449 			area_set_entry(area, tblidx, 0, 0);
450 			pgt_dec_used_entries(area->pgt);
451 			area_tlbi_entry(area, tblidx);
452 		}
453 	}
454 }
455 
456 void tee_pager_early_init(void)
457 {
458 	size_t n;
459 
460 	/*
461 	 * Note that this depends on add_pager_vaspace() adding vaspace
462 	 * after end of memory.
463 	 */
464 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
465 		if (!core_mmu_find_table(NULL, TEE_RAM_VA_START +
466 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
467 					 &pager_tables[n].tbl_info))
468 			panic("can't find mmu tables");
469 
470 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
471 			panic("Unsupported page size in translation table");
472 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
473 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
474 
475 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
476 		pgt_set_used_entries(&pager_tables[n].pgt,
477 				tbl_usage_count(&pager_tables[n].tbl_info));
478 	}
479 }
480 
481 static void *pager_add_alias_page(paddr_t pa)
482 {
483 	unsigned idx;
484 	struct core_mmu_table_info *ti;
485 	/* Alias pages mapped without write permission: runtime will care */
486 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
487 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
488 			TEE_MATTR_SECURE | TEE_MATTR_PR;
489 
490 	DMSG("0x%" PRIxPA, pa);
491 
492 	ti = find_table_info(pager_alias_next_free);
493 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
494 	core_mmu_set_entry(ti, idx, pa, attr);
495 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
496 	pager_alias_next_free += SMALL_PAGE_SIZE;
497 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
498 				      tee_mm_get_bytes(pager_alias_area)))
499 		pager_alias_next_free = 0;
500 	return (void *)core_mmu_idx2va(ti, idx);
501 }
502 
503 static void area_insert(struct tee_pager_area_head *head,
504 			struct tee_pager_area *area,
505 			struct tee_pager_area *a_prev)
506 {
507 	uint32_t exceptions = pager_lock_check_stack(8);
508 
509 	if (a_prev)
510 		TAILQ_INSERT_AFTER(head, a_prev, area, link);
511 	else
512 		TAILQ_INSERT_HEAD(head, area, link);
513 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
514 
515 	pager_unlock(exceptions);
516 }
517 KEEP_PAGER(area_insert);
518 
519 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
520 			     struct fobj *fobj)
521 {
522 	struct tee_pager_area *area = NULL;
523 	uint32_t flags = 0;
524 	size_t fobj_pgoffs = 0;
525 	vaddr_t b = base;
526 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
527 	size_t s2 = 0;
528 
529 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
530 
531 	if (base & SMALL_PAGE_MASK || !s) {
532 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
533 		panic();
534 	}
535 
536 	switch (type) {
537 	case PAGER_AREA_TYPE_RO:
538 		flags = TEE_MATTR_PRX;
539 		break;
540 	case PAGER_AREA_TYPE_RW:
541 	case PAGER_AREA_TYPE_LOCK:
542 		flags = TEE_MATTR_PRW;
543 		break;
544 	default:
545 		panic();
546 	}
547 
548 	if (!fobj)
549 		panic();
550 
551 	while (s) {
552 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
553 		area = calloc(1, sizeof(*area));
554 		if (!area)
555 			panic("alloc_area");
556 
557 		area->fobj = fobj_get(fobj);
558 		area->fobj_pgoffs = fobj_pgoffs;
559 		area->type = type;
560 		area->pgt = find_core_pgt(b);
561 		area->base = b;
562 		area->size = s2;
563 		area->flags = flags;
564 		area_insert(&tee_pager_area_head, area, NULL);
565 
566 		b += s2;
567 		s -= s2;
568 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
569 	}
570 }
571 
572 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
573 					vaddr_t va)
574 {
575 	struct tee_pager_area *area;
576 
577 	if (!areas)
578 		return NULL;
579 
580 	TAILQ_FOREACH(area, areas, link) {
581 		if (core_is_buffer_inside(va, 1, area->base, area->size))
582 			return area;
583 	}
584 	return NULL;
585 }
586 
587 #ifdef CFG_PAGED_USER_TA
588 static struct tee_pager_area *find_uta_area(vaddr_t va)
589 {
590 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
591 
592 	if (!is_user_mode_ctx(ctx))
593 		return NULL;
594 	return find_area(to_user_mode_ctx(ctx)->areas, va);
595 }
596 #else
597 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
598 {
599 	return NULL;
600 }
601 #endif /*CFG_PAGED_USER_TA*/
602 
603 
604 static uint32_t get_area_mattr(uint32_t area_flags)
605 {
606 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
607 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
608 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
609 
610 	return attr;
611 }
612 
613 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
614 {
615 	struct core_mmu_table_info *ti;
616 	paddr_t pa;
617 	unsigned idx;
618 
619 	ti = find_table_info((vaddr_t)pmem->va_alias);
620 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
621 	core_mmu_get_entry(ti, idx, &pa, NULL);
622 	return pa;
623 }
624 
625 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
626 			void *va_alias)
627 {
628 	size_t fobj_pgoffs = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
629 			     area->fobj_pgoffs;
630 	struct core_mmu_table_info *ti;
631 	uint32_t attr_alias;
632 	paddr_t pa_alias;
633 	unsigned int idx_alias;
634 
635 	/* Insure we are allowed to write to aliased virtual page */
636 	ti = find_table_info((vaddr_t)va_alias);
637 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
638 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
639 	if (!(attr_alias & TEE_MATTR_PW)) {
640 		attr_alias |= TEE_MATTR_PW;
641 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
642 		tlbi_mva_allasid((vaddr_t)va_alias);
643 	}
644 
645 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
646 	if (fobj_load_page(area->fobj, fobj_pgoffs, va_alias)) {
647 		EMSG("PH 0x%" PRIxVA " failed", page_va);
648 		panic();
649 	}
650 	switch (area->type) {
651 	case PAGER_AREA_TYPE_RO:
652 		incr_ro_hits();
653 		/* Forbid write to aliases for read-only (maybe exec) pages */
654 		attr_alias &= ~TEE_MATTR_PW;
655 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
656 		tlbi_mva_allasid((vaddr_t)va_alias);
657 		break;
658 	case PAGER_AREA_TYPE_RW:
659 		incr_rw_hits();
660 		break;
661 	case PAGER_AREA_TYPE_LOCK:
662 		break;
663 	default:
664 		panic();
665 	}
666 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
667 }
668 
669 static void tee_pager_save_page(struct tee_pager_pmem *pmem)
670 {
671 	if (pmem_is_dirty(pmem)) {
672 		asan_tag_access(pmem->va_alias,
673 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
674 		if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
675 				   pmem->va_alias))
676 			panic("fobj_save_page");
677 		asan_tag_no_access(pmem->va_alias,
678 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
679 	}
680 }
681 
682 #ifdef CFG_PAGED_USER_TA
683 static void unlink_area(struct tee_pager_area_head *area_head,
684 			struct tee_pager_area *area)
685 {
686 	uint32_t exceptions = pager_lock_check_stack(64);
687 
688 	TAILQ_REMOVE(area_head, area, link);
689 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
690 
691 	pager_unlock(exceptions);
692 }
693 KEEP_PAGER(unlink_area);
694 
695 static void free_area(struct tee_pager_area *area)
696 {
697 	fobj_put(area->fobj);
698 	free(area);
699 }
700 
701 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
702 				    struct fobj *fobj, uint32_t prot)
703 {
704 	struct tee_pager_area *a_prev = NULL;
705 	struct tee_pager_area *area = NULL;
706 	vaddr_t b = base;
707 	size_t fobj_pgoffs = 0;
708 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
709 
710 	if (!uctx->areas) {
711 		uctx->areas = malloc(sizeof(*uctx->areas));
712 		if (!uctx->areas)
713 			return TEE_ERROR_OUT_OF_MEMORY;
714 		TAILQ_INIT(uctx->areas);
715 	}
716 
717 	area = TAILQ_FIRST(uctx->areas);
718 	while (area) {
719 		if (core_is_buffer_intersect(b, s, area->base,
720 					     area->size))
721 			return TEE_ERROR_BAD_PARAMETERS;
722 		if (b < area->base)
723 			break;
724 		a_prev = area;
725 		area = TAILQ_NEXT(area, link);
726 	}
727 
728 	while (s) {
729 		size_t s2;
730 
731 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
732 		area = calloc(1, sizeof(*area));
733 		if (!area)
734 			return TEE_ERROR_OUT_OF_MEMORY;
735 
736 		/* Table info will be set when the context is activated. */
737 		area->fobj = fobj_get(fobj);
738 		area->fobj_pgoffs = fobj_pgoffs;
739 		area->type = PAGER_AREA_TYPE_RW;
740 		area->base = b;
741 		area->size = s2;
742 		area->flags = prot;
743 
744 		area_insert(uctx->areas, area, a_prev);
745 
746 		a_prev = area;
747 		b += s2;
748 		s -= s2;
749 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
750 	}
751 
752 	return TEE_SUCCESS;
753 }
754 
755 TEE_Result tee_pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
756 				 struct fobj *fobj, uint32_t prot)
757 {
758 	TEE_Result res = TEE_SUCCESS;
759 	struct thread_specific_data *tsd = thread_get_tsd();
760 	struct tee_pager_area *area = NULL;
761 	struct core_mmu_table_info dir_info = { NULL };
762 
763 	if (&uctx->ctx != tsd->ctx) {
764 		/*
765 		 * Changes are to an utc that isn't active. Just add the
766 		 * areas page tables will be dealt with later.
767 		 */
768 		return pager_add_um_area(uctx, base, fobj, prot);
769 	}
770 
771 	/*
772 	 * Assign page tables before adding areas to be able to tell which
773 	 * are newly added and should be removed in case of failure.
774 	 */
775 	tee_pager_assign_um_tables(uctx);
776 	res = pager_add_um_area(uctx, base, fobj, prot);
777 	if (res) {
778 		struct tee_pager_area *next_a;
779 
780 		/* Remove all added areas */
781 		TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
782 			if (!area->pgt) {
783 				unlink_area(uctx->areas, area);
784 				free_area(area);
785 			}
786 		}
787 		return res;
788 	}
789 
790 	/*
791 	 * Assign page tables to the new areas and make sure that the page
792 	 * tables are registered in the upper table.
793 	 */
794 	tee_pager_assign_um_tables(uctx);
795 	core_mmu_get_user_pgdir(&dir_info);
796 	TAILQ_FOREACH(area, uctx->areas, link) {
797 		paddr_t pa;
798 		size_t idx;
799 		uint32_t attr;
800 
801 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
802 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
803 
804 		/*
805 		 * Check if the page table already is used, if it is, it's
806 		 * already registered.
807 		 */
808 		if (area->pgt->num_used_entries) {
809 			assert(attr & TEE_MATTR_TABLE);
810 			assert(pa == virt_to_phys(area->pgt->tbl));
811 			continue;
812 		}
813 
814 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
815 		pa = virt_to_phys(area->pgt->tbl);
816 		assert(pa);
817 		/*
818 		 * Note that the update of the table entry is guaranteed to
819 		 * be atomic.
820 		 */
821 		core_mmu_set_entry(&dir_info, idx, pa, attr);
822 	}
823 
824 	return TEE_SUCCESS;
825 }
826 
827 static void split_area(struct tee_pager_area_head *area_head,
828 		       struct tee_pager_area *area, struct tee_pager_area *a2,
829 		       vaddr_t va)
830 {
831 	uint32_t exceptions = pager_lock_check_stack(64);
832 	size_t diff = va - area->base;
833 
834 	a2->fobj = fobj_get(area->fobj);
835 	a2->fobj_pgoffs = area->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
836 	a2->type = area->type;
837 	a2->flags = area->flags;
838 	a2->base = va;
839 	a2->size = area->size - diff;
840 	a2->pgt = area->pgt;
841 	area->size = diff;
842 
843 	TAILQ_INSERT_AFTER(area_head, area, a2, link);
844 	TAILQ_INSERT_AFTER(&area->fobj->areas, area, a2, fobj_link);
845 
846 	pager_unlock(exceptions);
847 }
848 KEEP_PAGER(split_area);
849 
850 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
851 {
852 	struct tee_pager_area *area = NULL;
853 	struct tee_pager_area *a2 = NULL;
854 
855 	if (va & SMALL_PAGE_MASK)
856 		return TEE_ERROR_BAD_PARAMETERS;
857 
858 	TAILQ_FOREACH(area, uctx->areas, link) {
859 		if (va == area->base || va == area->base + area->size)
860 			return TEE_SUCCESS;
861 		if (va > area->base && va < area->base + area->size) {
862 			a2 = calloc(1, sizeof(*a2));
863 			if (!a2)
864 				return TEE_ERROR_OUT_OF_MEMORY;
865 			split_area(uctx->areas, area, a2, va);
866 			return TEE_SUCCESS;
867 		}
868 	}
869 
870 	return TEE_SUCCESS;
871 }
872 
873 static void merge_area_with_next(struct tee_pager_area_head *area_head,
874 				 struct tee_pager_area *a,
875 				 struct tee_pager_area *a_next)
876 {
877 	uint32_t exceptions = pager_lock_check_stack(64);
878 
879 	TAILQ_REMOVE(area_head, a_next, link);
880 	TAILQ_REMOVE(&a_next->fobj->areas, a_next, fobj_link);
881 	a->size += a_next->size;
882 
883 	pager_unlock(exceptions);
884 }
885 KEEP_PAGER(merge_area_with_next);
886 
887 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
888 			       size_t len)
889 {
890 	struct tee_pager_area *a_next = NULL;
891 	struct tee_pager_area *a = NULL;
892 
893 	if ((va | len) & SMALL_PAGE_MASK)
894 		return;
895 
896 	for (a = TAILQ_FIRST(uctx->areas);; a = a_next) {
897 		a_next = TAILQ_NEXT(a, link);
898 		if (!a_next)
899 			return;
900 
901 		/* Try merging with the area just before va */
902 		if (a->base + a->size < va)
903 			continue;
904 
905 		/*
906 		 * If a->base is well past our range we're done.
907 		 * Note that if it's just the page after our range we'll
908 		 * try to merge.
909 		 */
910 		if (a->base > va + len)
911 			return;
912 
913 		if (a->base + a->size != a_next->base)
914 			continue;
915 		if (a->fobj != a_next->fobj || a->type != a_next->type ||
916 		    a->flags != a_next->flags || a->pgt != a_next->pgt)
917 			continue;
918 		if (a->fobj_pgoffs + a->size / SMALL_PAGE_SIZE !=
919 		    a_next->fobj_pgoffs)
920 			continue;
921 
922 		merge_area_with_next(uctx->areas, a, a_next);
923 		free_area(a_next);
924 		a_next = a;
925 	}
926 }
927 
928 static void rem_area(struct tee_pager_area_head *area_head,
929 		     struct tee_pager_area *area)
930 {
931 	struct tee_pager_pmem *pmem;
932 	size_t last_pgoffs = area->fobj_pgoffs +
933 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
934 	uint32_t exceptions;
935 	size_t idx = 0;
936 	uint32_t a = 0;
937 
938 	exceptions = pager_lock_check_stack(64);
939 
940 	TAILQ_REMOVE(area_head, area, link);
941 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
942 
943 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
944 		if (pmem->fobj != area->fobj ||
945 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
946 		    pmem->fobj_pgidx > last_pgoffs)
947 			continue;
948 
949 		idx = pmem_get_area_tblidx(pmem, area);
950 		area_get_entry(area, idx, NULL, &a);
951 		if (!(a & TEE_MATTR_VALID_BLOCK))
952 			continue;
953 
954 		area_set_entry(area, idx, 0, 0);
955 		area_tlbi_entry(area, idx);
956 		pgt_dec_used_entries(area->pgt);
957 	}
958 
959 	pager_unlock(exceptions);
960 
961 	free_area(area);
962 }
963 KEEP_PAGER(rem_area);
964 
965 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
966 			     size_t size)
967 {
968 	struct tee_pager_area *area;
969 	struct tee_pager_area *next_a;
970 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
971 
972 	TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
973 		if (core_is_buffer_inside(area->base, area->size, base, s))
974 			rem_area(uctx->areas, area);
975 	}
976 	tlbi_asid(uctx->vm_info.asid);
977 }
978 
979 void tee_pager_rem_um_areas(struct user_mode_ctx *uctx)
980 {
981 	struct tee_pager_area *area = NULL;
982 
983 	if (!uctx->areas)
984 		return;
985 
986 	while (true) {
987 		area = TAILQ_FIRST(uctx->areas);
988 		if (!area)
989 			break;
990 		unlink_area(uctx->areas, area);
991 		free_area(area);
992 	}
993 
994 	free(uctx->areas);
995 }
996 
997 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
998 {
999 	struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas);
1000 	void *ctx = a->pgt->ctx;
1001 
1002 	do {
1003 		a = TAILQ_NEXT(a, fobj_link);
1004 		if (!a)
1005 			return true;
1006 	} while (a->pgt->ctx == ctx);
1007 
1008 	return false;
1009 }
1010 
1011 bool tee_pager_set_um_area_attr(struct user_mode_ctx *uctx, vaddr_t base,
1012 				size_t size, uint32_t flags)
1013 {
1014 	bool ret = false;
1015 	vaddr_t b = base;
1016 	size_t s = size;
1017 	size_t s2 = 0;
1018 	struct tee_pager_area *area = find_area(uctx->areas, b);
1019 	uint32_t exceptions = 0;
1020 	struct tee_pager_pmem *pmem = NULL;
1021 	uint32_t a = 0;
1022 	uint32_t f = 0;
1023 	uint32_t mattr = 0;
1024 	uint32_t f2 = 0;
1025 	size_t tblidx = 0;
1026 
1027 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1028 	if (f & TEE_MATTR_UW)
1029 		f |= TEE_MATTR_PW;
1030 	mattr = get_area_mattr(f);
1031 
1032 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1033 
1034 	while (s) {
1035 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1036 		if (!area || area->base != b || area->size != s2) {
1037 			ret = false;
1038 			goto out;
1039 		}
1040 		b += s2;
1041 		s -= s2;
1042 
1043 		if (area->flags == f)
1044 			goto next_area;
1045 
1046 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1047 			if (!pmem_is_covered_by_area(pmem, area))
1048 				continue;
1049 
1050 			tblidx = pmem_get_area_tblidx(pmem, area);
1051 			area_get_entry(area, tblidx, NULL, &a);
1052 			if (a == f)
1053 				continue;
1054 			area_set_entry(area, tblidx, 0, 0);
1055 			area_tlbi_entry(area, tblidx);
1056 
1057 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1058 			if (pmem_is_dirty(pmem))
1059 				f2 = mattr;
1060 			else
1061 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
1062 			area_set_entry(area, tblidx, get_pmem_pa(pmem), f2);
1063 			if (!(a & TEE_MATTR_VALID_BLOCK))
1064 				pgt_inc_used_entries(area->pgt);
1065 			/*
1066 			 * Make sure the table update is visible before
1067 			 * continuing.
1068 			 */
1069 			dsb_ishst();
1070 
1071 			/*
1072 			 * Here's a problem if this page already is shared.
1073 			 * We need do icache invalidate for each context
1074 			 * in which it is shared. In practice this will
1075 			 * never happen.
1076 			 */
1077 			if (flags & TEE_MATTR_UX) {
1078 				void *va = (void *)area_idx2va(area, tblidx);
1079 
1080 				/* Assert that the pmem isn't shared. */
1081 				assert(same_context(pmem));
1082 
1083 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1084 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1085 			}
1086 		}
1087 
1088 		area->flags = f;
1089 next_area:
1090 		area = TAILQ_NEXT(area, link);
1091 	}
1092 
1093 	ret = true;
1094 out:
1095 	pager_unlock(exceptions);
1096 	return ret;
1097 }
1098 
1099 KEEP_PAGER(tee_pager_set_um_area_attr);
1100 #endif /*CFG_PAGED_USER_TA*/
1101 
1102 void tee_pager_invalidate_fobj(struct fobj *fobj)
1103 {
1104 	struct tee_pager_pmem *pmem;
1105 	uint32_t exceptions;
1106 
1107 	exceptions = pager_lock_check_stack(64);
1108 
1109 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1110 		if (pmem->fobj == fobj) {
1111 			pmem->fobj = NULL;
1112 			pmem->fobj_pgidx = INVALID_PGIDX;
1113 		}
1114 	}
1115 
1116 	pager_unlock(exceptions);
1117 }
1118 KEEP_PAGER(tee_pager_invalidate_fobj);
1119 
1120 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area,
1121 					unsigned int tblidx)
1122 {
1123 	struct tee_pager_pmem *pmem = NULL;
1124 
1125 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1126 		if (pmem->fobj == area->fobj &&
1127 		    pmem_get_area_tblidx(pmem, area) == tblidx)
1128 			return pmem;
1129 
1130 	return NULL;
1131 }
1132 
1133 static bool tee_pager_unhide_page(struct tee_pager_area *area,
1134 				  unsigned int tblidx)
1135 {
1136 	struct tee_pager_pmem *pmem = pmem_find(area, tblidx);
1137 	uint32_t a = get_area_mattr(area->flags);
1138 	uint32_t attr = 0;
1139 	paddr_t pa = 0;
1140 
1141 	if (!pmem)
1142 		return false;
1143 
1144 	area_get_entry(area, tblidx, NULL, &attr);
1145 	if (attr & TEE_MATTR_VALID_BLOCK)
1146 		return false;
1147 
1148 	/*
1149 	 * The page is hidden, or not not mapped yet. Unhide the page and
1150 	 * move it to the tail.
1151 	 *
1152 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1153 	 * for this address, so no TLB invalidation is required after setting
1154 	 * the new entry. A DSB is needed though, to make the write visible.
1155 	 *
1156 	 * For user executable pages it's more complicated. Those pages can
1157 	 * be shared between multiple TA mappings and thus populated by
1158 	 * another TA. The reference manual states that:
1159 	 *
1160 	 * "instruction cache maintenance is required only after writing
1161 	 * new data to a physical address that holds an instruction."
1162 	 *
1163 	 * So for hidden pages we would not need to invalidate i-cache, but
1164 	 * for newly populated pages we do. Since we don't know which we
1165 	 * have to assume the worst and always invalidate the i-cache. We
1166 	 * don't need to clean the d-cache though, since that has already
1167 	 * been done earlier.
1168 	 *
1169 	 * Additional bookkeeping to tell if the i-cache invalidation is
1170 	 * needed or not is left as a future optimization.
1171 	 */
1172 
1173 	/* If it's not a dirty block, then it should be read only. */
1174 	if (!pmem_is_dirty(pmem))
1175 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1176 
1177 	pa = get_pmem_pa(pmem);
1178 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1179 	if (area->flags & TEE_MATTR_UX) {
1180 		void *va = (void *)area_idx2va(area, tblidx);
1181 
1182 		/* Set a temporary read-only mapping */
1183 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1184 		area_set_entry(area, tblidx, pa, a & ~TEE_MATTR_UX);
1185 		dsb_ishst();
1186 
1187 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1188 
1189 		/* Set the final mapping */
1190 		area_set_entry(area, tblidx, pa, a);
1191 		area_tlbi_entry(area, tblidx);
1192 	} else {
1193 		area_set_entry(area, tblidx, pa, a);
1194 		dsb_ishst();
1195 	}
1196 	pgt_inc_used_entries(area->pgt);
1197 
1198 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1199 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1200 	incr_hidden_hits();
1201 	return true;
1202 }
1203 
1204 static void tee_pager_hide_pages(void)
1205 {
1206 	struct tee_pager_pmem *pmem = NULL;
1207 	size_t n = 0;
1208 
1209 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1210 		if (n >= TEE_PAGER_NHIDE)
1211 			break;
1212 		n++;
1213 
1214 		/* we cannot hide pages when pmem->fobj is not defined. */
1215 		if (!pmem->fobj)
1216 			continue;
1217 
1218 		if (pmem_is_hidden(pmem))
1219 			continue;
1220 
1221 		pmem->flags |= PMEM_FLAG_HIDDEN;
1222 		pmem_unmap(pmem, NULL);
1223 	}
1224 }
1225 
1226 /*
1227  * Find mapped pmem, hide and move to pageble pmem.
1228  * Return false if page was not mapped, and true if page was mapped.
1229  */
1230 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1231 				       vaddr_t page_va)
1232 {
1233 	struct tee_pager_pmem *pmem;
1234 	size_t tblidx = 0;
1235 	size_t pgidx = area_va2idx(area, page_va) + area->fobj_pgoffs -
1236 		       ((area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT);
1237 
1238 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1239 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != pgidx)
1240 			continue;
1241 
1242 		/*
1243 		 * Locked pages may not be shared, these two asserts checks
1244 		 * that there's only a signed area recorded with this pmem.
1245 		 */
1246 		assert(TAILQ_FIRST(&pmem->fobj->areas) == area);
1247 		assert(TAILQ_LAST(&pmem->fobj->areas,
1248 				  tee_pager_area_head) == area);
1249 
1250 		tblidx = pmem_get_area_tblidx(pmem, area);
1251 		area_set_entry(area, tblidx, 0, 0);
1252 		pgt_dec_used_entries(area->pgt);
1253 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1254 		pmem->fobj = NULL;
1255 		pmem->fobj_pgidx = INVALID_PGIDX;
1256 		tee_pager_npages++;
1257 		set_npages();
1258 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1259 		incr_zi_released();
1260 		return true;
1261 	}
1262 
1263 	return false;
1264 }
1265 
1266 /* Finds the oldest page and unmaps it from all tables */
1267 static struct tee_pager_pmem *tee_pager_get_page(enum tee_pager_area_type at)
1268 {
1269 	struct tee_pager_pmem *pmem;
1270 
1271 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1272 	if (!pmem) {
1273 		EMSG("No pmem entries");
1274 		return NULL;
1275 	}
1276 
1277 	if (pmem->fobj) {
1278 		pmem_unmap(pmem, NULL);
1279 		tee_pager_save_page(pmem);
1280 	}
1281 
1282 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1283 	pmem->fobj = NULL;
1284 	pmem->fobj_pgidx = INVALID_PGIDX;
1285 	pmem->flags = 0;
1286 	if (at == PAGER_AREA_TYPE_LOCK) {
1287 		/* Move page to lock list */
1288 		if (tee_pager_npages <= 0)
1289 			panic("running out of page");
1290 		tee_pager_npages--;
1291 		set_npages();
1292 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1293 	} else {
1294 		/* move page to back */
1295 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1296 	}
1297 
1298 	return pmem;
1299 }
1300 
1301 static bool pager_update_permissions(struct tee_pager_area *area,
1302 			struct abort_info *ai, bool *handled)
1303 {
1304 	unsigned int pgidx = area_va2idx(area, ai->va);
1305 	struct tee_pager_pmem *pmem = NULL;
1306 	uint32_t attr = 0;
1307 	paddr_t pa = 0;
1308 
1309 	*handled = false;
1310 
1311 	area_get_entry(area, pgidx, &pa, &attr);
1312 
1313 	/* Not mapped */
1314 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1315 		return false;
1316 
1317 	/* Not readable, should not happen */
1318 	if (abort_is_user_exception(ai)) {
1319 		if (!(attr & TEE_MATTR_UR))
1320 			return true;
1321 	} else {
1322 		if (!(attr & TEE_MATTR_PR)) {
1323 			abort_print_error(ai);
1324 			panic();
1325 		}
1326 	}
1327 
1328 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1329 	case CORE_MMU_FAULT_TRANSLATION:
1330 	case CORE_MMU_FAULT_READ_PERMISSION:
1331 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1332 			/* Check attempting to execute from an NOX page */
1333 			if (abort_is_user_exception(ai)) {
1334 				if (!(attr & TEE_MATTR_UX))
1335 					return true;
1336 			} else {
1337 				if (!(attr & TEE_MATTR_PX)) {
1338 					abort_print_error(ai);
1339 					panic();
1340 				}
1341 			}
1342 		}
1343 		/* Since the page is mapped now it's OK */
1344 		break;
1345 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1346 		/* Check attempting to write to an RO page */
1347 		pmem = pmem_find(area, pgidx);
1348 		if (!pmem)
1349 			panic();
1350 		if (abort_is_user_exception(ai)) {
1351 			if (!(area->flags & TEE_MATTR_UW))
1352 				return true;
1353 			if (!(attr & TEE_MATTR_UW)) {
1354 				FMSG("Dirty %p",
1355 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1356 				pmem->flags |= PMEM_FLAG_DIRTY;
1357 				area_set_entry(area, pgidx, pa,
1358 					       get_area_mattr(area->flags));
1359 				area_tlbi_entry(area, pgidx);
1360 			}
1361 
1362 		} else {
1363 			if (!(area->flags & TEE_MATTR_PW)) {
1364 				abort_print_error(ai);
1365 				panic();
1366 			}
1367 			if (!(attr & TEE_MATTR_PW)) {
1368 				FMSG("Dirty %p",
1369 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1370 				pmem->flags |= PMEM_FLAG_DIRTY;
1371 				area_set_entry(area, pgidx, pa,
1372 					       get_area_mattr(area->flags));
1373 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1374 			}
1375 		}
1376 		/* Since permissions has been updated now it's OK */
1377 		break;
1378 	default:
1379 		/* Some fault we can't deal with */
1380 		if (abort_is_user_exception(ai))
1381 			return true;
1382 		abort_print_error(ai);
1383 		panic();
1384 	}
1385 	*handled = true;
1386 	return true;
1387 }
1388 
1389 #ifdef CFG_TEE_CORE_DEBUG
1390 static void stat_handle_fault(void)
1391 {
1392 	static size_t num_faults;
1393 	static size_t min_npages = SIZE_MAX;
1394 	static size_t total_min_npages = SIZE_MAX;
1395 
1396 	num_faults++;
1397 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1398 		DMSG("nfaults %zu npages %zu (min %zu)",
1399 		     num_faults, tee_pager_npages, min_npages);
1400 		min_npages = tee_pager_npages; /* reset */
1401 	}
1402 	if (tee_pager_npages < min_npages)
1403 		min_npages = tee_pager_npages;
1404 	if (tee_pager_npages < total_min_npages)
1405 		total_min_npages = tee_pager_npages;
1406 }
1407 #else
1408 static void stat_handle_fault(void)
1409 {
1410 }
1411 #endif
1412 
1413 bool tee_pager_handle_fault(struct abort_info *ai)
1414 {
1415 	struct tee_pager_area *area;
1416 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1417 	uint32_t exceptions;
1418 	bool ret;
1419 	bool clean_user_cache = false;
1420 
1421 #ifdef TEE_PAGER_DEBUG_PRINT
1422 	if (!abort_is_user_exception(ai))
1423 		abort_print(ai);
1424 #endif
1425 
1426 	/*
1427 	 * We're updating pages that can affect several active CPUs at a
1428 	 * time below. We end up here because a thread tries to access some
1429 	 * memory that isn't available. We have to be careful when making
1430 	 * that memory available as other threads may succeed in accessing
1431 	 * that address the moment after we've made it available.
1432 	 *
1433 	 * That means that we can't just map the memory and populate the
1434 	 * page, instead we use the aliased mapping to populate the page
1435 	 * and once everything is ready we map it.
1436 	 */
1437 	exceptions = pager_lock(ai);
1438 
1439 	stat_handle_fault();
1440 
1441 	/* check if the access is valid */
1442 	if (abort_is_user_exception(ai)) {
1443 		area = find_uta_area(ai->va);
1444 		clean_user_cache = true;
1445 	} else {
1446 		area = find_area(&tee_pager_area_head, ai->va);
1447 		if (!area) {
1448 			area = find_uta_area(ai->va);
1449 			clean_user_cache = true;
1450 		}
1451 	}
1452 	if (!area || !area->pgt) {
1453 		ret = false;
1454 		goto out;
1455 	}
1456 
1457 	if (!tee_pager_unhide_page(area, area_va2idx(area, page_va))) {
1458 		struct tee_pager_pmem *pmem = NULL;
1459 		uint32_t attr = 0;
1460 		paddr_t pa = 0;
1461 		size_t tblidx = 0;
1462 
1463 		/*
1464 		 * The page wasn't hidden, but some other core may have
1465 		 * updated the table entry before we got here or we need
1466 		 * to make a read-only page read-write (dirty).
1467 		 */
1468 		if (pager_update_permissions(area, ai, &ret)) {
1469 			/*
1470 			 * Nothing more to do with the abort. The problem
1471 			 * could already have been dealt with from another
1472 			 * core or if ret is false the TA will be paniced.
1473 			 */
1474 			goto out;
1475 		}
1476 
1477 		pmem = tee_pager_get_page(area->type);
1478 		if (!pmem) {
1479 			abort_print(ai);
1480 			panic();
1481 		}
1482 
1483 		/* load page code & data */
1484 		tee_pager_load_page(area, page_va, pmem->va_alias);
1485 
1486 
1487 		pmem->fobj = area->fobj;
1488 		pmem->fobj_pgidx = area_va2idx(area, page_va) +
1489 				   area->fobj_pgoffs -
1490 				   ((area->base & CORE_MMU_PGDIR_MASK) >>
1491 					SMALL_PAGE_SHIFT);
1492 		tblidx = pmem_get_area_tblidx(pmem, area);
1493 		attr = get_area_mattr(area->flags);
1494 		/*
1495 		 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1496 		 * able to tell when they are updated and should be tagged
1497 		 * as dirty.
1498 		 */
1499 		if (area->type == PAGER_AREA_TYPE_RW)
1500 			attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1501 		pa = get_pmem_pa(pmem);
1502 
1503 		/*
1504 		 * We've updated the page using the aliased mapping and
1505 		 * some cache maintenence is now needed if it's an
1506 		 * executable page.
1507 		 *
1508 		 * Since the d-cache is a Physically-indexed,
1509 		 * physically-tagged (PIPT) cache we can clean either the
1510 		 * aliased address or the real virtual address. In this
1511 		 * case we choose the real virtual address.
1512 		 *
1513 		 * The i-cache can also be PIPT, but may be something else
1514 		 * too like VIPT. The current code requires the caches to
1515 		 * implement the IVIPT extension, that is:
1516 		 * "instruction cache maintenance is required only after
1517 		 * writing new data to a physical address that holds an
1518 		 * instruction."
1519 		 *
1520 		 * To portably invalidate the icache the page has to
1521 		 * be mapped at the final virtual address but not
1522 		 * executable.
1523 		 */
1524 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1525 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1526 					TEE_MATTR_PW | TEE_MATTR_UW;
1527 			void *va = (void *)page_va;
1528 
1529 			/* Set a temporary read-only mapping */
1530 			area_set_entry(area, tblidx, pa, attr & ~mask);
1531 			area_tlbi_entry(area, tblidx);
1532 
1533 			dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1534 			if (clean_user_cache)
1535 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1536 			else
1537 				icache_inv_range(va, SMALL_PAGE_SIZE);
1538 
1539 			/* Set the final mapping */
1540 			area_set_entry(area, tblidx, pa, attr);
1541 			area_tlbi_entry(area, tblidx);
1542 		} else {
1543 			area_set_entry(area, tblidx, pa, attr);
1544 			/*
1545 			 * No need to flush TLB for this entry, it was
1546 			 * invalid. We should use a barrier though, to make
1547 			 * sure that the change is visible.
1548 			 */
1549 			dsb_ishst();
1550 		}
1551 		pgt_inc_used_entries(area->pgt);
1552 
1553 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1554 
1555 	}
1556 
1557 	tee_pager_hide_pages();
1558 	ret = true;
1559 out:
1560 	pager_unlock(exceptions);
1561 	return ret;
1562 }
1563 
1564 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1565 {
1566 	size_t n;
1567 
1568 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1569 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1570 
1571 	/* setup memory */
1572 	for (n = 0; n < npages; n++) {
1573 		struct core_mmu_table_info *ti;
1574 		struct tee_pager_pmem *pmem;
1575 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1576 		unsigned int pgidx;
1577 		paddr_t pa;
1578 		uint32_t attr;
1579 
1580 		ti = find_table_info(va);
1581 		pgidx = core_mmu_va2idx(ti, va);
1582 		/*
1583 		 * Note that we can only support adding pages in the
1584 		 * valid range of this table info, currently not a problem.
1585 		 */
1586 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1587 
1588 		/* Ignore unmapped pages/blocks */
1589 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1590 			continue;
1591 
1592 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1593 		if (!pmem)
1594 			panic("out of mem");
1595 
1596 		pmem->va_alias = pager_add_alias_page(pa);
1597 
1598 		if (unmap) {
1599 			pmem->fobj = NULL;
1600 			pmem->fobj_pgidx = INVALID_PGIDX;
1601 			core_mmu_set_entry(ti, pgidx, 0, 0);
1602 			pgt_dec_used_entries(find_core_pgt(va));
1603 		} else {
1604 			struct tee_pager_area *area = NULL;
1605 
1606 			/*
1607 			 * The page is still mapped, let's assign the area
1608 			 * and update the protection bits accordingly.
1609 			 */
1610 			area = find_area(&tee_pager_area_head, va);
1611 			assert(area && area->pgt == find_core_pgt(va));
1612 			pmem->fobj = area->fobj;
1613 			pmem->fobj_pgidx = pgidx + area->fobj_pgoffs -
1614 					   ((area->base &
1615 							CORE_MMU_PGDIR_MASK) >>
1616 						SMALL_PAGE_SHIFT);
1617 			assert(pgidx == pmem_get_area_tblidx(pmem, area));
1618 			assert(pa == get_pmem_pa(pmem));
1619 			area_set_entry(area, pgidx, pa,
1620 				       get_area_mattr(area->flags));
1621 		}
1622 
1623 		tee_pager_npages++;
1624 		incr_npages_all();
1625 		set_npages();
1626 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1627 	}
1628 
1629 	/*
1630 	 * As this is done at inits, invalidate all TLBs once instead of
1631 	 * targeting only the modified entries.
1632 	 */
1633 	tlbi_all();
1634 }
1635 
1636 #ifdef CFG_PAGED_USER_TA
1637 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1638 {
1639 	struct pgt *p = pgt;
1640 
1641 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1642 		p = SLIST_NEXT(p, link);
1643 	return p;
1644 }
1645 
1646 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1647 {
1648 	struct tee_pager_area *area = NULL;
1649 	struct pgt *pgt = NULL;
1650 
1651 	if (!uctx->areas)
1652 		return;
1653 
1654 	pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1655 	TAILQ_FOREACH(area, uctx->areas, link) {
1656 		if (!area->pgt)
1657 			area->pgt = find_pgt(pgt, area->base);
1658 		else
1659 			assert(area->pgt == find_pgt(pgt, area->base));
1660 		if (!area->pgt)
1661 			panic();
1662 	}
1663 }
1664 
1665 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1666 {
1667 	struct tee_pager_pmem *pmem = NULL;
1668 	struct tee_pager_area *area = NULL;
1669 	struct tee_pager_area_head *areas = NULL;
1670 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1671 
1672 	if (!pgt->num_used_entries)
1673 		goto out;
1674 
1675 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1676 		if (pmem->fobj)
1677 			pmem_unmap(pmem, pgt);
1678 	}
1679 	assert(!pgt->num_used_entries);
1680 
1681 out:
1682 	areas = to_user_ta_ctx(pgt->ctx)->uctx.areas;
1683 	if (areas) {
1684 		TAILQ_FOREACH(area, areas, link) {
1685 			if (area->pgt == pgt)
1686 				area->pgt = NULL;
1687 		}
1688 	}
1689 
1690 	pager_unlock(exceptions);
1691 }
1692 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1693 #endif /*CFG_PAGED_USER_TA*/
1694 
1695 void tee_pager_release_phys(void *addr, size_t size)
1696 {
1697 	bool unmaped = false;
1698 	vaddr_t va = (vaddr_t)addr;
1699 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1700 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1701 	struct tee_pager_area *area;
1702 	uint32_t exceptions;
1703 
1704 	if (end <= begin)
1705 		return;
1706 
1707 	exceptions = pager_lock_check_stack(128);
1708 
1709 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1710 		area = find_area(&tee_pager_area_head, va);
1711 		if (!area)
1712 			panic();
1713 		unmaped |= tee_pager_release_one_phys(area, va);
1714 	}
1715 
1716 	if (unmaped)
1717 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1718 
1719 	pager_unlock(exceptions);
1720 }
1721 KEEP_PAGER(tee_pager_release_phys);
1722 
1723 void *tee_pager_alloc(size_t size)
1724 {
1725 	tee_mm_entry_t *mm = NULL;
1726 	uint8_t *smem = NULL;
1727 	size_t num_pages = 0;
1728 	struct fobj *fobj = NULL;
1729 
1730 	if (!size)
1731 		return NULL;
1732 
1733 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1734 	if (!mm)
1735 		return NULL;
1736 
1737 	smem = (uint8_t *)tee_mm_get_smem(mm);
1738 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1739 	fobj = fobj_locked_paged_alloc(num_pages);
1740 	if (!fobj) {
1741 		tee_mm_free(mm);
1742 		return NULL;
1743 	}
1744 
1745 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1746 	fobj_put(fobj);
1747 
1748 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1749 
1750 	return smem;
1751 }
1752