xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 9438dbdb59dc3bbf4fb58e0e1c00989abb7713bc)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/panic.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/tee_ta_manager.h>
18 #include <kernel/thread.h>
19 #include <kernel/tlb_helpers.h>
20 #include <kernel/user_mode_ctx.h>
21 #include <mm/core_memprot.h>
22 #include <mm/fobj.h>
23 #include <mm/tee_mm.h>
24 #include <mm/tee_pager.h>
25 #include <stdlib.h>
26 #include <sys/queue.h>
27 #include <tee_api_defines.h>
28 #include <trace.h>
29 #include <types_ext.h>
30 #include <utee_defines.h>
31 #include <util.h>
32 
33 
34 static struct tee_pager_area_head tee_pager_area_head =
35 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
36 
37 #define INVALID_PGIDX		UINT_MAX
38 #define PMEM_FLAG_DIRTY		BIT(0)
39 #define PMEM_FLAG_HIDDEN	BIT(1)
40 
41 /*
42  * struct tee_pager_pmem - Represents a physical page used for paging.
43  *
44  * @flags	flags defined by PMEM_FLAG_* above
45  * @fobj_pgidx	index of the page in the @fobj
46  * @fobj	File object of which a page is made visible.
47  * @va_alias	Virtual address where the physical page always is aliased.
48  *		Used during remapping of the page when the content need to
49  *		be updated before it's available at the new location.
50  */
51 struct tee_pager_pmem {
52 	unsigned int flags;
53 	unsigned int fobj_pgidx;
54 	struct fobj *fobj;
55 	void *va_alias;
56 	TAILQ_ENTRY(tee_pager_pmem) link;
57 };
58 
59 /* The list of physical pages. The first page in the list is the oldest */
60 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
61 
62 static struct tee_pager_pmem_head tee_pager_pmem_head =
63 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
64 
65 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
66 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
67 
68 /* number of pages hidden */
69 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
70 
71 /* Number of registered physical pages, used hiding pages. */
72 static size_t tee_pager_npages;
73 
74 #ifdef CFG_WITH_STATS
75 static struct tee_pager_stats pager_stats;
76 
77 static inline void incr_ro_hits(void)
78 {
79 	pager_stats.ro_hits++;
80 }
81 
82 static inline void incr_rw_hits(void)
83 {
84 	pager_stats.rw_hits++;
85 }
86 
87 static inline void incr_hidden_hits(void)
88 {
89 	pager_stats.hidden_hits++;
90 }
91 
92 static inline void incr_zi_released(void)
93 {
94 	pager_stats.zi_released++;
95 }
96 
97 static inline void incr_npages_all(void)
98 {
99 	pager_stats.npages_all++;
100 }
101 
102 static inline void set_npages(void)
103 {
104 	pager_stats.npages = tee_pager_npages;
105 }
106 
107 void tee_pager_get_stats(struct tee_pager_stats *stats)
108 {
109 	*stats = pager_stats;
110 
111 	pager_stats.hidden_hits = 0;
112 	pager_stats.ro_hits = 0;
113 	pager_stats.rw_hits = 0;
114 	pager_stats.zi_released = 0;
115 }
116 
117 #else /* CFG_WITH_STATS */
118 static inline void incr_ro_hits(void) { }
119 static inline void incr_rw_hits(void) { }
120 static inline void incr_hidden_hits(void) { }
121 static inline void incr_zi_released(void) { }
122 static inline void incr_npages_all(void) { }
123 static inline void set_npages(void) { }
124 
125 void tee_pager_get_stats(struct tee_pager_stats *stats)
126 {
127 	memset(stats, 0, sizeof(struct tee_pager_stats));
128 }
129 #endif /* CFG_WITH_STATS */
130 
131 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
132 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
133 #define TBL_SHIFT	SMALL_PAGE_SHIFT
134 
135 #define EFFECTIVE_VA_SIZE \
136 	(ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \
137 	 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE))
138 
139 static struct pager_table {
140 	struct pgt pgt;
141 	struct core_mmu_table_info tbl_info;
142 } *pager_tables;
143 static unsigned int num_pager_tables;
144 
145 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
146 
147 /* Defines the range of the alias area */
148 static tee_mm_entry_t *pager_alias_area;
149 /*
150  * Physical pages are added in a stack like fashion to the alias area,
151  * @pager_alias_next_free gives the address of next free entry if
152  * @pager_alias_next_free is != 0
153  */
154 static uintptr_t pager_alias_next_free;
155 
156 #ifdef CFG_TEE_CORE_DEBUG
157 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
158 
159 static uint32_t pager_lock_dldetect(const char *func, const int line,
160 				    struct abort_info *ai)
161 {
162 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
163 	unsigned int retries = 0;
164 	unsigned int reminder = 0;
165 
166 	while (!cpu_spin_trylock(&pager_spinlock)) {
167 		retries++;
168 		if (!retries) {
169 			/* wrapped, time to report */
170 			trace_printf(func, line, TRACE_ERROR, true,
171 				     "possible spinlock deadlock reminder %u",
172 				     reminder);
173 			if (reminder < UINT_MAX)
174 				reminder++;
175 			if (ai)
176 				abort_print(ai);
177 		}
178 	}
179 
180 	return exceptions;
181 }
182 #else
183 static uint32_t pager_lock(struct abort_info __unused *ai)
184 {
185 	return cpu_spin_lock_xsave(&pager_spinlock);
186 }
187 #endif
188 
189 static uint32_t pager_lock_check_stack(size_t stack_size)
190 {
191 	if (stack_size) {
192 		int8_t buf[stack_size];
193 		size_t n;
194 
195 		/*
196 		 * Make sure to touch all pages of the stack that we expect
197 		 * to use with this lock held. We need to take eventual
198 		 * page faults before the lock is taken or we'll deadlock
199 		 * the pager. The pages that are populated in this way will
200 		 * eventually be released at certain save transitions of
201 		 * the thread.
202 		 */
203 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
204 			io_write8((vaddr_t)buf + n, 1);
205 		io_write8((vaddr_t)buf + stack_size - 1, 1);
206 	}
207 
208 	return pager_lock(NULL);
209 }
210 
211 static void pager_unlock(uint32_t exceptions)
212 {
213 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
214 }
215 
216 void *tee_pager_phys_to_virt(paddr_t pa)
217 {
218 	struct core_mmu_table_info ti;
219 	unsigned idx;
220 	uint32_t a;
221 	paddr_t p;
222 	vaddr_t v;
223 	size_t n;
224 
225 	/*
226 	 * Most addresses are mapped lineary, try that first if possible.
227 	 */
228 	if (!tee_pager_get_table_info(pa, &ti))
229 		return NULL; /* impossible pa */
230 	idx = core_mmu_va2idx(&ti, pa);
231 	core_mmu_get_entry(&ti, idx, &p, &a);
232 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
233 		return (void *)core_mmu_idx2va(&ti, idx);
234 
235 	n = 0;
236 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
237 	while (true) {
238 		while (idx < TBL_NUM_ENTRIES) {
239 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
240 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
241 				return NULL;
242 
243 			core_mmu_get_entry(&pager_tables[n].tbl_info,
244 					   idx, &p, &a);
245 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
246 				return (void *)v;
247 			idx++;
248 		}
249 
250 		n++;
251 		if (n >= num_pager_tables)
252 			return NULL;
253 		idx = 0;
254 	}
255 
256 	return NULL;
257 }
258 
259 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
260 {
261 	return pmem->flags & PMEM_FLAG_HIDDEN;
262 }
263 
264 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
265 {
266 	return pmem->flags & PMEM_FLAG_DIRTY;
267 }
268 
269 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
270 				    struct tee_pager_area *area)
271 {
272 	if (pmem->fobj != area->fobj)
273 		return false;
274 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
275 		return false;
276 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
277 	    (area->size >> SMALL_PAGE_SHIFT))
278 		return false;
279 
280 	return true;
281 }
282 
283 static size_t pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
284 				   struct tee_pager_area *area)
285 {
286 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
287 
288 	return pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
289 }
290 
291 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
292 {
293 	size_t n;
294 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
295 
296 	if (!pager_tables)
297 		return NULL;
298 
299 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
300 	    CORE_MMU_PGDIR_SHIFT;
301 	if (n >= num_pager_tables)
302 		return NULL;
303 
304 	assert(va >= pager_tables[n].tbl_info.va_base &&
305 	       va <= (pager_tables[n].tbl_info.va_base | mask));
306 
307 	return pager_tables + n;
308 }
309 
310 static struct pager_table *find_pager_table(vaddr_t va)
311 {
312 	struct pager_table *pt = find_pager_table_may_fail(va);
313 
314 	assert(pt);
315 	return pt;
316 }
317 
318 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
319 {
320 	struct pager_table *pt = find_pager_table_may_fail(va);
321 
322 	if (!pt)
323 		return false;
324 
325 	*ti = pt->tbl_info;
326 	return true;
327 }
328 
329 static struct core_mmu_table_info *find_table_info(vaddr_t va)
330 {
331 	return &find_pager_table(va)->tbl_info;
332 }
333 
334 static struct pgt *find_core_pgt(vaddr_t va)
335 {
336 	return &find_pager_table(va)->pgt;
337 }
338 
339 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
340 {
341 	struct pager_table *pt;
342 	unsigned idx;
343 	vaddr_t smem = tee_mm_get_smem(mm);
344 	size_t nbytes = tee_mm_get_bytes(mm);
345 	vaddr_t v;
346 	uint32_t a = 0;
347 
348 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
349 
350 	assert(!pager_alias_area);
351 	pager_alias_area = mm;
352 	pager_alias_next_free = smem;
353 
354 	/* Clear all mapping in the alias area */
355 	pt = find_pager_table(smem);
356 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
357 	while (pt <= (pager_tables + num_pager_tables - 1)) {
358 		while (idx < TBL_NUM_ENTRIES) {
359 			v = core_mmu_idx2va(&pt->tbl_info, idx);
360 			if (v >= (smem + nbytes))
361 				goto out;
362 
363 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
364 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
365 			if (a & TEE_MATTR_VALID_BLOCK)
366 				pgt_dec_used_entries(&pt->pgt);
367 			idx++;
368 		}
369 
370 		pt++;
371 		idx = 0;
372 	}
373 
374 out:
375 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
376 }
377 
378 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
379 {
380 	size_t n;
381 	uint32_t a = 0;
382 	size_t usage = 0;
383 
384 	for (n = 0; n < ti->num_entries; n++) {
385 		core_mmu_get_entry(ti, n, NULL, &a);
386 		if (a & TEE_MATTR_VALID_BLOCK)
387 			usage++;
388 	}
389 	return usage;
390 }
391 
392 static void area_get_entry(struct tee_pager_area *area, size_t idx,
393 			   paddr_t *pa, uint32_t *attr)
394 {
395 	assert(area->pgt);
396 	assert(idx < TBL_NUM_ENTRIES);
397 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
398 }
399 
400 static void area_set_entry(struct tee_pager_area *area, size_t idx,
401 			   paddr_t pa, uint32_t attr)
402 {
403 	assert(area->pgt);
404 	assert(idx < TBL_NUM_ENTRIES);
405 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
406 }
407 
408 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
409 {
410 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
411 }
412 
413 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
414 {
415 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
416 }
417 
418 static void area_tlbi_entry(struct tee_pager_area *area, size_t idx)
419 {
420 	vaddr_t va = area_idx2va(area, idx);
421 
422 #if defined(CFG_PAGED_USER_TA)
423 	assert(area->pgt);
424 	if (area->pgt->ctx) {
425 		uint32_t asid = to_user_mode_ctx(area->pgt->ctx)->vm_info.asid;
426 
427 		tlbi_mva_asid(va, asid);
428 		return;
429 	}
430 #endif
431 	tlbi_mva_allasid(va);
432 }
433 
434 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
435 {
436 	struct tee_pager_area *area = NULL;
437 	size_t tblidx = 0;
438 	uint32_t a = 0;
439 
440 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
441 		/*
442 		 * If only_this_pgt points to a pgt then the pgt of this
443 		 * area has to match or we'll skip over it.
444 		 */
445 		if (only_this_pgt && area->pgt != only_this_pgt)
446 			continue;
447 		if (!area->pgt || !pmem_is_covered_by_area(pmem, area))
448 			continue;
449 		tblidx = pmem_get_area_tblidx(pmem, area);
450 		area_get_entry(area, tblidx, NULL, &a);
451 		if (a & TEE_MATTR_VALID_BLOCK) {
452 			area_set_entry(area, tblidx, 0, 0);
453 			pgt_dec_used_entries(area->pgt);
454 			area_tlbi_entry(area, tblidx);
455 		}
456 	}
457 }
458 
459 void tee_pager_early_init(void)
460 {
461 	size_t n = 0;
462 
463 	num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE;
464 	pager_tables = calloc(num_pager_tables, sizeof(*pager_tables));
465 	if (!pager_tables)
466 		panic("Cannot allocate pager_tables");
467 
468 	/*
469 	 * Note that this depends on add_pager_vaspace() adding vaspace
470 	 * after end of memory.
471 	 */
472 	for (n = 0; n < num_pager_tables; n++) {
473 		if (!core_mmu_find_table(NULL, TEE_RAM_VA_START +
474 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
475 					 &pager_tables[n].tbl_info))
476 			panic("can't find mmu tables");
477 
478 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
479 			panic("Unsupported page size in translation table");
480 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
481 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
482 
483 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
484 		pgt_set_used_entries(&pager_tables[n].pgt,
485 				tbl_usage_count(&pager_tables[n].tbl_info));
486 	}
487 }
488 
489 static void *pager_add_alias_page(paddr_t pa)
490 {
491 	unsigned idx;
492 	struct core_mmu_table_info *ti;
493 	/* Alias pages mapped without write permission: runtime will care */
494 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
495 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
496 			TEE_MATTR_SECURE | TEE_MATTR_PR;
497 
498 	DMSG("0x%" PRIxPA, pa);
499 
500 	ti = find_table_info(pager_alias_next_free);
501 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
502 	core_mmu_set_entry(ti, idx, pa, attr);
503 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
504 	pager_alias_next_free += SMALL_PAGE_SIZE;
505 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
506 				      tee_mm_get_bytes(pager_alias_area)))
507 		pager_alias_next_free = 0;
508 	return (void *)core_mmu_idx2va(ti, idx);
509 }
510 
511 static void area_insert(struct tee_pager_area_head *head,
512 			struct tee_pager_area *area,
513 			struct tee_pager_area *a_prev)
514 {
515 	uint32_t exceptions = pager_lock_check_stack(8);
516 
517 	if (a_prev)
518 		TAILQ_INSERT_AFTER(head, a_prev, area, link);
519 	else
520 		TAILQ_INSERT_HEAD(head, area, link);
521 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
522 
523 	pager_unlock(exceptions);
524 }
525 KEEP_PAGER(area_insert);
526 
527 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
528 			     struct fobj *fobj)
529 {
530 	struct tee_pager_area *area = NULL;
531 	uint32_t flags = 0;
532 	size_t fobj_pgoffs = 0;
533 	vaddr_t b = base;
534 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
535 	size_t s2 = 0;
536 
537 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
538 
539 	if (base & SMALL_PAGE_MASK || !s) {
540 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
541 		panic();
542 	}
543 
544 	switch (type) {
545 	case PAGER_AREA_TYPE_RO:
546 		flags = TEE_MATTR_PRX;
547 		break;
548 	case PAGER_AREA_TYPE_RW:
549 	case PAGER_AREA_TYPE_LOCK:
550 		flags = TEE_MATTR_PRW;
551 		break;
552 	default:
553 		panic();
554 	}
555 
556 	if (!fobj)
557 		panic();
558 
559 	while (s) {
560 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
561 		area = calloc(1, sizeof(*area));
562 		if (!area)
563 			panic("alloc_area");
564 
565 		area->fobj = fobj_get(fobj);
566 		area->fobj_pgoffs = fobj_pgoffs;
567 		area->type = type;
568 		area->pgt = find_core_pgt(b);
569 		area->base = b;
570 		area->size = s2;
571 		area->flags = flags;
572 		area_insert(&tee_pager_area_head, area, NULL);
573 
574 		b += s2;
575 		s -= s2;
576 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
577 	}
578 }
579 
580 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
581 					vaddr_t va)
582 {
583 	struct tee_pager_area *area;
584 
585 	if (!areas)
586 		return NULL;
587 
588 	TAILQ_FOREACH(area, areas, link) {
589 		if (core_is_buffer_inside(va, 1, area->base, area->size))
590 			return area;
591 	}
592 	return NULL;
593 }
594 
595 #ifdef CFG_PAGED_USER_TA
596 static struct tee_pager_area *find_uta_area(vaddr_t va)
597 {
598 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
599 
600 	if (!is_user_mode_ctx(ctx))
601 		return NULL;
602 	return find_area(to_user_mode_ctx(ctx)->areas, va);
603 }
604 #else
605 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
606 {
607 	return NULL;
608 }
609 #endif /*CFG_PAGED_USER_TA*/
610 
611 
612 static uint32_t get_area_mattr(uint32_t area_flags)
613 {
614 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
615 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
616 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
617 
618 	return attr;
619 }
620 
621 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
622 {
623 	struct core_mmu_table_info *ti;
624 	paddr_t pa;
625 	unsigned idx;
626 
627 	ti = find_table_info((vaddr_t)pmem->va_alias);
628 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
629 	core_mmu_get_entry(ti, idx, &pa, NULL);
630 	return pa;
631 }
632 
633 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
634 			void *va_alias)
635 {
636 	size_t fobj_pgoffs = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
637 			     area->fobj_pgoffs;
638 	struct core_mmu_table_info *ti;
639 	uint32_t attr_alias;
640 	paddr_t pa_alias;
641 	unsigned int idx_alias;
642 
643 	/* Insure we are allowed to write to aliased virtual page */
644 	ti = find_table_info((vaddr_t)va_alias);
645 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
646 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
647 	if (!(attr_alias & TEE_MATTR_PW)) {
648 		attr_alias |= TEE_MATTR_PW;
649 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
650 		tlbi_mva_allasid((vaddr_t)va_alias);
651 	}
652 
653 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
654 	if (fobj_load_page(area->fobj, fobj_pgoffs, va_alias)) {
655 		EMSG("PH 0x%" PRIxVA " failed", page_va);
656 		panic();
657 	}
658 	switch (area->type) {
659 	case PAGER_AREA_TYPE_RO:
660 		incr_ro_hits();
661 		/* Forbid write to aliases for read-only (maybe exec) pages */
662 		attr_alias &= ~TEE_MATTR_PW;
663 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
664 		tlbi_mva_allasid((vaddr_t)va_alias);
665 		break;
666 	case PAGER_AREA_TYPE_RW:
667 		incr_rw_hits();
668 		break;
669 	case PAGER_AREA_TYPE_LOCK:
670 		break;
671 	default:
672 		panic();
673 	}
674 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
675 }
676 
677 static void tee_pager_save_page(struct tee_pager_pmem *pmem)
678 {
679 	if (pmem_is_dirty(pmem)) {
680 		asan_tag_access(pmem->va_alias,
681 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
682 		if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
683 				   pmem->va_alias))
684 			panic("fobj_save_page");
685 		asan_tag_no_access(pmem->va_alias,
686 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
687 	}
688 }
689 
690 #ifdef CFG_PAGED_USER_TA
691 static void unlink_area(struct tee_pager_area_head *area_head,
692 			struct tee_pager_area *area)
693 {
694 	uint32_t exceptions = pager_lock_check_stack(64);
695 
696 	TAILQ_REMOVE(area_head, area, link);
697 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
698 
699 	pager_unlock(exceptions);
700 }
701 KEEP_PAGER(unlink_area);
702 
703 static void free_area(struct tee_pager_area *area)
704 {
705 	fobj_put(area->fobj);
706 	free(area);
707 }
708 
709 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
710 				    struct fobj *fobj, uint32_t prot)
711 {
712 	struct tee_pager_area *a_prev = NULL;
713 	struct tee_pager_area *area = NULL;
714 	vaddr_t b = base;
715 	size_t fobj_pgoffs = 0;
716 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
717 
718 	if (!uctx->areas) {
719 		uctx->areas = malloc(sizeof(*uctx->areas));
720 		if (!uctx->areas)
721 			return TEE_ERROR_OUT_OF_MEMORY;
722 		TAILQ_INIT(uctx->areas);
723 	}
724 
725 	area = TAILQ_FIRST(uctx->areas);
726 	while (area) {
727 		if (core_is_buffer_intersect(b, s, area->base,
728 					     area->size))
729 			return TEE_ERROR_BAD_PARAMETERS;
730 		if (b < area->base)
731 			break;
732 		a_prev = area;
733 		area = TAILQ_NEXT(area, link);
734 	}
735 
736 	while (s) {
737 		size_t s2;
738 
739 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
740 		area = calloc(1, sizeof(*area));
741 		if (!area)
742 			return TEE_ERROR_OUT_OF_MEMORY;
743 
744 		/* Table info will be set when the context is activated. */
745 		area->fobj = fobj_get(fobj);
746 		area->fobj_pgoffs = fobj_pgoffs;
747 		area->type = PAGER_AREA_TYPE_RW;
748 		area->base = b;
749 		area->size = s2;
750 		area->flags = prot;
751 
752 		area_insert(uctx->areas, area, a_prev);
753 
754 		a_prev = area;
755 		b += s2;
756 		s -= s2;
757 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
758 	}
759 
760 	return TEE_SUCCESS;
761 }
762 
763 TEE_Result tee_pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
764 				 struct fobj *fobj, uint32_t prot)
765 {
766 	TEE_Result res = TEE_SUCCESS;
767 	struct thread_specific_data *tsd = thread_get_tsd();
768 	struct tee_pager_area *area = NULL;
769 	struct core_mmu_table_info dir_info = { NULL };
770 
771 	if (&uctx->ctx != tsd->ctx) {
772 		/*
773 		 * Changes are to an utc that isn't active. Just add the
774 		 * areas page tables will be dealt with later.
775 		 */
776 		return pager_add_um_area(uctx, base, fobj, prot);
777 	}
778 
779 	/*
780 	 * Assign page tables before adding areas to be able to tell which
781 	 * are newly added and should be removed in case of failure.
782 	 */
783 	tee_pager_assign_um_tables(uctx);
784 	res = pager_add_um_area(uctx, base, fobj, prot);
785 	if (res) {
786 		struct tee_pager_area *next_a;
787 
788 		/* Remove all added areas */
789 		TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
790 			if (!area->pgt) {
791 				unlink_area(uctx->areas, area);
792 				free_area(area);
793 			}
794 		}
795 		return res;
796 	}
797 
798 	/*
799 	 * Assign page tables to the new areas and make sure that the page
800 	 * tables are registered in the upper table.
801 	 */
802 	tee_pager_assign_um_tables(uctx);
803 	core_mmu_get_user_pgdir(&dir_info);
804 	TAILQ_FOREACH(area, uctx->areas, link) {
805 		paddr_t pa;
806 		size_t idx;
807 		uint32_t attr;
808 
809 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
810 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
811 
812 		/*
813 		 * Check if the page table already is used, if it is, it's
814 		 * already registered.
815 		 */
816 		if (area->pgt->num_used_entries) {
817 			assert(attr & TEE_MATTR_TABLE);
818 			assert(pa == virt_to_phys(area->pgt->tbl));
819 			continue;
820 		}
821 
822 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
823 		pa = virt_to_phys(area->pgt->tbl);
824 		assert(pa);
825 		/*
826 		 * Note that the update of the table entry is guaranteed to
827 		 * be atomic.
828 		 */
829 		core_mmu_set_entry(&dir_info, idx, pa, attr);
830 	}
831 
832 	return TEE_SUCCESS;
833 }
834 
835 static void split_area(struct tee_pager_area_head *area_head,
836 		       struct tee_pager_area *area, struct tee_pager_area *a2,
837 		       vaddr_t va)
838 {
839 	uint32_t exceptions = pager_lock_check_stack(64);
840 	size_t diff = va - area->base;
841 
842 	a2->fobj = fobj_get(area->fobj);
843 	a2->fobj_pgoffs = area->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
844 	a2->type = area->type;
845 	a2->flags = area->flags;
846 	a2->base = va;
847 	a2->size = area->size - diff;
848 	a2->pgt = area->pgt;
849 	area->size = diff;
850 
851 	TAILQ_INSERT_AFTER(area_head, area, a2, link);
852 	TAILQ_INSERT_AFTER(&area->fobj->areas, area, a2, fobj_link);
853 
854 	pager_unlock(exceptions);
855 }
856 KEEP_PAGER(split_area);
857 
858 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
859 {
860 	struct tee_pager_area *area = NULL;
861 	struct tee_pager_area *a2 = NULL;
862 
863 	if (va & SMALL_PAGE_MASK)
864 		return TEE_ERROR_BAD_PARAMETERS;
865 
866 	TAILQ_FOREACH(area, uctx->areas, link) {
867 		if (va == area->base || va == area->base + area->size)
868 			return TEE_SUCCESS;
869 		if (va > area->base && va < area->base + area->size) {
870 			a2 = calloc(1, sizeof(*a2));
871 			if (!a2)
872 				return TEE_ERROR_OUT_OF_MEMORY;
873 			split_area(uctx->areas, area, a2, va);
874 			return TEE_SUCCESS;
875 		}
876 	}
877 
878 	return TEE_SUCCESS;
879 }
880 
881 static void merge_area_with_next(struct tee_pager_area_head *area_head,
882 				 struct tee_pager_area *a,
883 				 struct tee_pager_area *a_next)
884 {
885 	uint32_t exceptions = pager_lock_check_stack(64);
886 
887 	TAILQ_REMOVE(area_head, a_next, link);
888 	TAILQ_REMOVE(&a_next->fobj->areas, a_next, fobj_link);
889 	a->size += a_next->size;
890 
891 	pager_unlock(exceptions);
892 }
893 KEEP_PAGER(merge_area_with_next);
894 
895 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
896 			       size_t len)
897 {
898 	struct tee_pager_area *a_next = NULL;
899 	struct tee_pager_area *a = NULL;
900 
901 	if ((va | len) & SMALL_PAGE_MASK)
902 		return;
903 
904 	for (a = TAILQ_FIRST(uctx->areas);; a = a_next) {
905 		a_next = TAILQ_NEXT(a, link);
906 		if (!a_next)
907 			return;
908 
909 		/* Try merging with the area just before va */
910 		if (a->base + a->size < va)
911 			continue;
912 
913 		/*
914 		 * If a->base is well past our range we're done.
915 		 * Note that if it's just the page after our range we'll
916 		 * try to merge.
917 		 */
918 		if (a->base > va + len)
919 			return;
920 
921 		if (a->base + a->size != a_next->base)
922 			continue;
923 		if (a->fobj != a_next->fobj || a->type != a_next->type ||
924 		    a->flags != a_next->flags || a->pgt != a_next->pgt)
925 			continue;
926 		if (a->fobj_pgoffs + a->size / SMALL_PAGE_SIZE !=
927 		    a_next->fobj_pgoffs)
928 			continue;
929 
930 		merge_area_with_next(uctx->areas, a, a_next);
931 		free_area(a_next);
932 		a_next = a;
933 	}
934 }
935 
936 static void rem_area(struct tee_pager_area_head *area_head,
937 		     struct tee_pager_area *area)
938 {
939 	struct tee_pager_pmem *pmem;
940 	size_t last_pgoffs = area->fobj_pgoffs +
941 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
942 	uint32_t exceptions;
943 	size_t idx = 0;
944 	uint32_t a = 0;
945 
946 	exceptions = pager_lock_check_stack(64);
947 
948 	TAILQ_REMOVE(area_head, area, link);
949 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
950 
951 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
952 		if (pmem->fobj != area->fobj ||
953 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
954 		    pmem->fobj_pgidx > last_pgoffs)
955 			continue;
956 
957 		idx = pmem_get_area_tblidx(pmem, area);
958 		area_get_entry(area, idx, NULL, &a);
959 		if (!(a & TEE_MATTR_VALID_BLOCK))
960 			continue;
961 
962 		area_set_entry(area, idx, 0, 0);
963 		area_tlbi_entry(area, idx);
964 		pgt_dec_used_entries(area->pgt);
965 	}
966 
967 	pager_unlock(exceptions);
968 
969 	free_area(area);
970 }
971 KEEP_PAGER(rem_area);
972 
973 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
974 			     size_t size)
975 {
976 	struct tee_pager_area *area;
977 	struct tee_pager_area *next_a;
978 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
979 
980 	TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
981 		if (core_is_buffer_inside(area->base, area->size, base, s))
982 			rem_area(uctx->areas, area);
983 	}
984 	tlbi_asid(uctx->vm_info.asid);
985 }
986 
987 void tee_pager_rem_um_areas(struct user_mode_ctx *uctx)
988 {
989 	struct tee_pager_area *area = NULL;
990 
991 	if (!uctx->areas)
992 		return;
993 
994 	while (true) {
995 		area = TAILQ_FIRST(uctx->areas);
996 		if (!area)
997 			break;
998 		unlink_area(uctx->areas, area);
999 		free_area(area);
1000 	}
1001 
1002 	free(uctx->areas);
1003 }
1004 
1005 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
1006 {
1007 	struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas);
1008 	void *ctx = a->pgt->ctx;
1009 
1010 	do {
1011 		a = TAILQ_NEXT(a, fobj_link);
1012 		if (!a)
1013 			return true;
1014 	} while (a->pgt->ctx == ctx);
1015 
1016 	return false;
1017 }
1018 
1019 bool tee_pager_set_um_area_attr(struct user_mode_ctx *uctx, vaddr_t base,
1020 				size_t size, uint32_t flags)
1021 {
1022 	bool ret = false;
1023 	vaddr_t b = base;
1024 	size_t s = size;
1025 	size_t s2 = 0;
1026 	struct tee_pager_area *area = find_area(uctx->areas, b);
1027 	uint32_t exceptions = 0;
1028 	struct tee_pager_pmem *pmem = NULL;
1029 	uint32_t a = 0;
1030 	uint32_t f = 0;
1031 	uint32_t mattr = 0;
1032 	uint32_t f2 = 0;
1033 	size_t tblidx = 0;
1034 
1035 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1036 	if (f & TEE_MATTR_UW)
1037 		f |= TEE_MATTR_PW;
1038 	mattr = get_area_mattr(f);
1039 
1040 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1041 
1042 	while (s) {
1043 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1044 		if (!area || area->base != b || area->size != s2) {
1045 			ret = false;
1046 			goto out;
1047 		}
1048 		b += s2;
1049 		s -= s2;
1050 
1051 		if (area->flags == f)
1052 			goto next_area;
1053 
1054 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1055 			if (!pmem_is_covered_by_area(pmem, area))
1056 				continue;
1057 
1058 			tblidx = pmem_get_area_tblidx(pmem, area);
1059 			area_get_entry(area, tblidx, NULL, &a);
1060 			if (a == f)
1061 				continue;
1062 			area_set_entry(area, tblidx, 0, 0);
1063 			area_tlbi_entry(area, tblidx);
1064 
1065 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1066 			if (pmem_is_dirty(pmem))
1067 				f2 = mattr;
1068 			else
1069 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
1070 			area_set_entry(area, tblidx, get_pmem_pa(pmem), f2);
1071 			if (!(a & TEE_MATTR_VALID_BLOCK))
1072 				pgt_inc_used_entries(area->pgt);
1073 			/*
1074 			 * Make sure the table update is visible before
1075 			 * continuing.
1076 			 */
1077 			dsb_ishst();
1078 
1079 			/*
1080 			 * Here's a problem if this page already is shared.
1081 			 * We need do icache invalidate for each context
1082 			 * in which it is shared. In practice this will
1083 			 * never happen.
1084 			 */
1085 			if (flags & TEE_MATTR_UX) {
1086 				void *va = (void *)area_idx2va(area, tblidx);
1087 
1088 				/* Assert that the pmem isn't shared. */
1089 				assert(same_context(pmem));
1090 
1091 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1092 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1093 			}
1094 		}
1095 
1096 		area->flags = f;
1097 next_area:
1098 		area = TAILQ_NEXT(area, link);
1099 	}
1100 
1101 	ret = true;
1102 out:
1103 	pager_unlock(exceptions);
1104 	return ret;
1105 }
1106 
1107 KEEP_PAGER(tee_pager_set_um_area_attr);
1108 #endif /*CFG_PAGED_USER_TA*/
1109 
1110 void tee_pager_invalidate_fobj(struct fobj *fobj)
1111 {
1112 	struct tee_pager_pmem *pmem;
1113 	uint32_t exceptions;
1114 
1115 	exceptions = pager_lock_check_stack(64);
1116 
1117 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1118 		if (pmem->fobj == fobj) {
1119 			pmem->fobj = NULL;
1120 			pmem->fobj_pgidx = INVALID_PGIDX;
1121 		}
1122 	}
1123 
1124 	pager_unlock(exceptions);
1125 }
1126 KEEP_PAGER(tee_pager_invalidate_fobj);
1127 
1128 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area,
1129 					unsigned int tblidx)
1130 {
1131 	struct tee_pager_pmem *pmem = NULL;
1132 
1133 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1134 		if (pmem->fobj == area->fobj &&
1135 		    pmem_get_area_tblidx(pmem, area) == tblidx)
1136 			return pmem;
1137 
1138 	return NULL;
1139 }
1140 
1141 static bool tee_pager_unhide_page(struct tee_pager_area *area,
1142 				  unsigned int tblidx)
1143 {
1144 	struct tee_pager_pmem *pmem = pmem_find(area, tblidx);
1145 	uint32_t a = get_area_mattr(area->flags);
1146 	uint32_t attr = 0;
1147 	paddr_t pa = 0;
1148 
1149 	if (!pmem)
1150 		return false;
1151 
1152 	area_get_entry(area, tblidx, NULL, &attr);
1153 	if (attr & TEE_MATTR_VALID_BLOCK)
1154 		return false;
1155 
1156 	/*
1157 	 * The page is hidden, or not not mapped yet. Unhide the page and
1158 	 * move it to the tail.
1159 	 *
1160 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1161 	 * for this address, so no TLB invalidation is required after setting
1162 	 * the new entry. A DSB is needed though, to make the write visible.
1163 	 *
1164 	 * For user executable pages it's more complicated. Those pages can
1165 	 * be shared between multiple TA mappings and thus populated by
1166 	 * another TA. The reference manual states that:
1167 	 *
1168 	 * "instruction cache maintenance is required only after writing
1169 	 * new data to a physical address that holds an instruction."
1170 	 *
1171 	 * So for hidden pages we would not need to invalidate i-cache, but
1172 	 * for newly populated pages we do. Since we don't know which we
1173 	 * have to assume the worst and always invalidate the i-cache. We
1174 	 * don't need to clean the d-cache though, since that has already
1175 	 * been done earlier.
1176 	 *
1177 	 * Additional bookkeeping to tell if the i-cache invalidation is
1178 	 * needed or not is left as a future optimization.
1179 	 */
1180 
1181 	/* If it's not a dirty block, then it should be read only. */
1182 	if (!pmem_is_dirty(pmem))
1183 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1184 
1185 	pa = get_pmem_pa(pmem);
1186 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1187 	if (area->flags & TEE_MATTR_UX) {
1188 		void *va = (void *)area_idx2va(area, tblidx);
1189 
1190 		/* Set a temporary read-only mapping */
1191 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1192 		area_set_entry(area, tblidx, pa, a & ~TEE_MATTR_UX);
1193 		dsb_ishst();
1194 
1195 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1196 
1197 		/* Set the final mapping */
1198 		area_set_entry(area, tblidx, pa, a);
1199 		area_tlbi_entry(area, tblidx);
1200 	} else {
1201 		area_set_entry(area, tblidx, pa, a);
1202 		dsb_ishst();
1203 	}
1204 	pgt_inc_used_entries(area->pgt);
1205 
1206 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1207 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1208 	incr_hidden_hits();
1209 	return true;
1210 }
1211 
1212 static void tee_pager_hide_pages(void)
1213 {
1214 	struct tee_pager_pmem *pmem = NULL;
1215 	size_t n = 0;
1216 
1217 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1218 		if (n >= TEE_PAGER_NHIDE)
1219 			break;
1220 		n++;
1221 
1222 		/* we cannot hide pages when pmem->fobj is not defined. */
1223 		if (!pmem->fobj)
1224 			continue;
1225 
1226 		if (pmem_is_hidden(pmem))
1227 			continue;
1228 
1229 		pmem->flags |= PMEM_FLAG_HIDDEN;
1230 		pmem_unmap(pmem, NULL);
1231 	}
1232 }
1233 
1234 static unsigned int __maybe_unused
1235 num_areas_with_pmem(struct tee_pager_pmem *pmem)
1236 {
1237 	struct tee_pager_area *a = NULL;
1238 	unsigned int num_matches = 0;
1239 
1240 	TAILQ_FOREACH(a, &pmem->fobj->areas, fobj_link)
1241 		if (pmem_is_covered_by_area(pmem, a))
1242 			num_matches++;
1243 
1244 	return num_matches;
1245 }
1246 
1247 /*
1248  * Find mapped pmem, hide and move to pageble pmem.
1249  * Return false if page was not mapped, and true if page was mapped.
1250  */
1251 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1252 				       vaddr_t page_va)
1253 {
1254 	struct tee_pager_pmem *pmem;
1255 	size_t tblidx = 0;
1256 	size_t pgidx = area_va2idx(area, page_va) + area->fobj_pgoffs -
1257 		       ((area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT);
1258 
1259 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1260 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != pgidx)
1261 			continue;
1262 
1263 		/*
1264 		 * Locked pages may not be shared. We're asserting that the
1265 		 * number of areas using this pmem is one and only one as
1266 		 * we're about to unmap it.
1267 		 */
1268 		assert(num_areas_with_pmem(pmem) == 1);
1269 
1270 		tblidx = pmem_get_area_tblidx(pmem, area);
1271 		area_set_entry(area, tblidx, 0, 0);
1272 		pgt_dec_used_entries(area->pgt);
1273 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1274 		pmem->fobj = NULL;
1275 		pmem->fobj_pgidx = INVALID_PGIDX;
1276 		tee_pager_npages++;
1277 		set_npages();
1278 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1279 		incr_zi_released();
1280 		return true;
1281 	}
1282 
1283 	return false;
1284 }
1285 
1286 /* Finds the oldest page and unmaps it from all tables */
1287 static struct tee_pager_pmem *tee_pager_get_page(enum tee_pager_area_type at)
1288 {
1289 	struct tee_pager_pmem *pmem;
1290 
1291 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1292 	if (!pmem) {
1293 		EMSG("No pmem entries");
1294 		return NULL;
1295 	}
1296 
1297 	if (pmem->fobj) {
1298 		pmem_unmap(pmem, NULL);
1299 		tee_pager_save_page(pmem);
1300 	}
1301 
1302 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1303 	pmem->fobj = NULL;
1304 	pmem->fobj_pgidx = INVALID_PGIDX;
1305 	pmem->flags = 0;
1306 	if (at == PAGER_AREA_TYPE_LOCK) {
1307 		/* Move page to lock list */
1308 		if (tee_pager_npages <= 0)
1309 			panic("running out of page");
1310 		tee_pager_npages--;
1311 		set_npages();
1312 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1313 	} else {
1314 		/* move page to back */
1315 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1316 	}
1317 
1318 	return pmem;
1319 }
1320 
1321 static bool pager_update_permissions(struct tee_pager_area *area,
1322 			struct abort_info *ai, bool *handled)
1323 {
1324 	unsigned int pgidx = area_va2idx(area, ai->va);
1325 	struct tee_pager_pmem *pmem = NULL;
1326 	uint32_t attr = 0;
1327 	paddr_t pa = 0;
1328 
1329 	*handled = false;
1330 
1331 	area_get_entry(area, pgidx, &pa, &attr);
1332 
1333 	/* Not mapped */
1334 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1335 		return false;
1336 
1337 	/* Not readable, should not happen */
1338 	if (abort_is_user_exception(ai)) {
1339 		if (!(attr & TEE_MATTR_UR))
1340 			return true;
1341 	} else {
1342 		if (!(attr & TEE_MATTR_PR)) {
1343 			abort_print_error(ai);
1344 			panic();
1345 		}
1346 	}
1347 
1348 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1349 	case CORE_MMU_FAULT_TRANSLATION:
1350 	case CORE_MMU_FAULT_READ_PERMISSION:
1351 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1352 			/* Check attempting to execute from an NOX page */
1353 			if (abort_is_user_exception(ai)) {
1354 				if (!(attr & TEE_MATTR_UX))
1355 					return true;
1356 			} else {
1357 				if (!(attr & TEE_MATTR_PX)) {
1358 					abort_print_error(ai);
1359 					panic();
1360 				}
1361 			}
1362 		}
1363 		/* Since the page is mapped now it's OK */
1364 		break;
1365 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1366 		/* Check attempting to write to an RO page */
1367 		pmem = pmem_find(area, pgidx);
1368 		if (!pmem)
1369 			panic();
1370 		if (abort_is_user_exception(ai)) {
1371 			if (!(area->flags & TEE_MATTR_UW))
1372 				return true;
1373 			if (!(attr & TEE_MATTR_UW)) {
1374 				FMSG("Dirty %p",
1375 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1376 				pmem->flags |= PMEM_FLAG_DIRTY;
1377 				area_set_entry(area, pgidx, pa,
1378 					       get_area_mattr(area->flags));
1379 				area_tlbi_entry(area, pgidx);
1380 			}
1381 
1382 		} else {
1383 			if (!(area->flags & TEE_MATTR_PW)) {
1384 				abort_print_error(ai);
1385 				panic();
1386 			}
1387 			if (!(attr & TEE_MATTR_PW)) {
1388 				FMSG("Dirty %p",
1389 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1390 				pmem->flags |= PMEM_FLAG_DIRTY;
1391 				area_set_entry(area, pgidx, pa,
1392 					       get_area_mattr(area->flags));
1393 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1394 			}
1395 		}
1396 		/* Since permissions has been updated now it's OK */
1397 		break;
1398 	default:
1399 		/* Some fault we can't deal with */
1400 		if (abort_is_user_exception(ai))
1401 			return true;
1402 		abort_print_error(ai);
1403 		panic();
1404 	}
1405 	*handled = true;
1406 	return true;
1407 }
1408 
1409 #ifdef CFG_TEE_CORE_DEBUG
1410 static void stat_handle_fault(void)
1411 {
1412 	static size_t num_faults;
1413 	static size_t min_npages = SIZE_MAX;
1414 	static size_t total_min_npages = SIZE_MAX;
1415 
1416 	num_faults++;
1417 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1418 		DMSG("nfaults %zu npages %zu (min %zu)",
1419 		     num_faults, tee_pager_npages, min_npages);
1420 		min_npages = tee_pager_npages; /* reset */
1421 	}
1422 	if (tee_pager_npages < min_npages)
1423 		min_npages = tee_pager_npages;
1424 	if (tee_pager_npages < total_min_npages)
1425 		total_min_npages = tee_pager_npages;
1426 }
1427 #else
1428 static void stat_handle_fault(void)
1429 {
1430 }
1431 #endif
1432 
1433 bool tee_pager_handle_fault(struct abort_info *ai)
1434 {
1435 	struct tee_pager_area *area;
1436 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1437 	uint32_t exceptions;
1438 	bool ret;
1439 	bool clean_user_cache = false;
1440 
1441 #ifdef TEE_PAGER_DEBUG_PRINT
1442 	if (!abort_is_user_exception(ai))
1443 		abort_print(ai);
1444 #endif
1445 
1446 	/*
1447 	 * We're updating pages that can affect several active CPUs at a
1448 	 * time below. We end up here because a thread tries to access some
1449 	 * memory that isn't available. We have to be careful when making
1450 	 * that memory available as other threads may succeed in accessing
1451 	 * that address the moment after we've made it available.
1452 	 *
1453 	 * That means that we can't just map the memory and populate the
1454 	 * page, instead we use the aliased mapping to populate the page
1455 	 * and once everything is ready we map it.
1456 	 */
1457 	exceptions = pager_lock(ai);
1458 
1459 	stat_handle_fault();
1460 
1461 	/* check if the access is valid */
1462 	if (abort_is_user_exception(ai)) {
1463 		area = find_uta_area(ai->va);
1464 		clean_user_cache = true;
1465 	} else {
1466 		area = find_area(&tee_pager_area_head, ai->va);
1467 		if (!area) {
1468 			area = find_uta_area(ai->va);
1469 			clean_user_cache = true;
1470 		}
1471 	}
1472 	if (!area || !area->pgt) {
1473 		ret = false;
1474 		goto out;
1475 	}
1476 
1477 	if (!tee_pager_unhide_page(area, area_va2idx(area, page_va))) {
1478 		struct tee_pager_pmem *pmem = NULL;
1479 		uint32_t attr = 0;
1480 		paddr_t pa = 0;
1481 		size_t tblidx = 0;
1482 
1483 		/*
1484 		 * The page wasn't hidden, but some other core may have
1485 		 * updated the table entry before we got here or we need
1486 		 * to make a read-only page read-write (dirty).
1487 		 */
1488 		if (pager_update_permissions(area, ai, &ret)) {
1489 			/*
1490 			 * Nothing more to do with the abort. The problem
1491 			 * could already have been dealt with from another
1492 			 * core or if ret is false the TA will be paniced.
1493 			 */
1494 			goto out;
1495 		}
1496 
1497 		pmem = tee_pager_get_page(area->type);
1498 		if (!pmem) {
1499 			abort_print(ai);
1500 			panic();
1501 		}
1502 
1503 		/* load page code & data */
1504 		tee_pager_load_page(area, page_va, pmem->va_alias);
1505 
1506 
1507 		pmem->fobj = area->fobj;
1508 		pmem->fobj_pgidx = area_va2idx(area, page_va) +
1509 				   area->fobj_pgoffs -
1510 				   ((area->base & CORE_MMU_PGDIR_MASK) >>
1511 					SMALL_PAGE_SHIFT);
1512 		tblidx = pmem_get_area_tblidx(pmem, area);
1513 		attr = get_area_mattr(area->flags);
1514 		/*
1515 		 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1516 		 * able to tell when they are updated and should be tagged
1517 		 * as dirty.
1518 		 */
1519 		if (area->type == PAGER_AREA_TYPE_RW)
1520 			attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1521 		pa = get_pmem_pa(pmem);
1522 
1523 		/*
1524 		 * We've updated the page using the aliased mapping and
1525 		 * some cache maintenence is now needed if it's an
1526 		 * executable page.
1527 		 *
1528 		 * Since the d-cache is a Physically-indexed,
1529 		 * physically-tagged (PIPT) cache we can clean either the
1530 		 * aliased address or the real virtual address. In this
1531 		 * case we choose the real virtual address.
1532 		 *
1533 		 * The i-cache can also be PIPT, but may be something else
1534 		 * too like VIPT. The current code requires the caches to
1535 		 * implement the IVIPT extension, that is:
1536 		 * "instruction cache maintenance is required only after
1537 		 * writing new data to a physical address that holds an
1538 		 * instruction."
1539 		 *
1540 		 * To portably invalidate the icache the page has to
1541 		 * be mapped at the final virtual address but not
1542 		 * executable.
1543 		 */
1544 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1545 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1546 					TEE_MATTR_PW | TEE_MATTR_UW;
1547 			void *va = (void *)page_va;
1548 
1549 			/* Set a temporary read-only mapping */
1550 			area_set_entry(area, tblidx, pa, attr & ~mask);
1551 			area_tlbi_entry(area, tblidx);
1552 
1553 			dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1554 			if (clean_user_cache)
1555 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1556 			else
1557 				icache_inv_range(va, SMALL_PAGE_SIZE);
1558 
1559 			/* Set the final mapping */
1560 			area_set_entry(area, tblidx, pa, attr);
1561 			area_tlbi_entry(area, tblidx);
1562 		} else {
1563 			area_set_entry(area, tblidx, pa, attr);
1564 			/*
1565 			 * No need to flush TLB for this entry, it was
1566 			 * invalid. We should use a barrier though, to make
1567 			 * sure that the change is visible.
1568 			 */
1569 			dsb_ishst();
1570 		}
1571 		pgt_inc_used_entries(area->pgt);
1572 
1573 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1574 
1575 	}
1576 
1577 	tee_pager_hide_pages();
1578 	ret = true;
1579 out:
1580 	pager_unlock(exceptions);
1581 	return ret;
1582 }
1583 
1584 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1585 {
1586 	size_t n;
1587 
1588 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1589 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1590 
1591 	/* setup memory */
1592 	for (n = 0; n < npages; n++) {
1593 		struct core_mmu_table_info *ti;
1594 		struct tee_pager_pmem *pmem;
1595 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1596 		unsigned int pgidx;
1597 		paddr_t pa;
1598 		uint32_t attr;
1599 
1600 		ti = find_table_info(va);
1601 		pgidx = core_mmu_va2idx(ti, va);
1602 		/*
1603 		 * Note that we can only support adding pages in the
1604 		 * valid range of this table info, currently not a problem.
1605 		 */
1606 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1607 
1608 		/* Ignore unmapped pages/blocks */
1609 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1610 			continue;
1611 
1612 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1613 		if (!pmem)
1614 			panic("out of mem");
1615 
1616 		pmem->va_alias = pager_add_alias_page(pa);
1617 
1618 		if (unmap) {
1619 			pmem->fobj = NULL;
1620 			pmem->fobj_pgidx = INVALID_PGIDX;
1621 			core_mmu_set_entry(ti, pgidx, 0, 0);
1622 			pgt_dec_used_entries(find_core_pgt(va));
1623 		} else {
1624 			struct tee_pager_area *area = NULL;
1625 
1626 			/*
1627 			 * The page is still mapped, let's assign the area
1628 			 * and update the protection bits accordingly.
1629 			 */
1630 			area = find_area(&tee_pager_area_head, va);
1631 			assert(area && area->pgt == find_core_pgt(va));
1632 			pmem->fobj = area->fobj;
1633 			pmem->fobj_pgidx = pgidx + area->fobj_pgoffs -
1634 					   ((area->base &
1635 							CORE_MMU_PGDIR_MASK) >>
1636 						SMALL_PAGE_SHIFT);
1637 			assert(pgidx == pmem_get_area_tblidx(pmem, area));
1638 			assert(pa == get_pmem_pa(pmem));
1639 			area_set_entry(area, pgidx, pa,
1640 				       get_area_mattr(area->flags));
1641 		}
1642 
1643 		tee_pager_npages++;
1644 		incr_npages_all();
1645 		set_npages();
1646 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1647 	}
1648 
1649 	/*
1650 	 * As this is done at inits, invalidate all TLBs once instead of
1651 	 * targeting only the modified entries.
1652 	 */
1653 	tlbi_all();
1654 }
1655 
1656 #ifdef CFG_PAGED_USER_TA
1657 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1658 {
1659 	struct pgt *p = pgt;
1660 
1661 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1662 		p = SLIST_NEXT(p, link);
1663 	return p;
1664 }
1665 
1666 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1667 {
1668 	struct tee_pager_area *area = NULL;
1669 	struct pgt *pgt = NULL;
1670 
1671 	if (!uctx->areas)
1672 		return;
1673 
1674 	pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1675 	TAILQ_FOREACH(area, uctx->areas, link) {
1676 		if (!area->pgt)
1677 			area->pgt = find_pgt(pgt, area->base);
1678 		else
1679 			assert(area->pgt == find_pgt(pgt, area->base));
1680 		if (!area->pgt)
1681 			panic();
1682 	}
1683 }
1684 
1685 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1686 {
1687 	struct tee_pager_pmem *pmem = NULL;
1688 	struct tee_pager_area *area = NULL;
1689 	struct tee_pager_area_head *areas = NULL;
1690 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1691 
1692 	if (!pgt->num_used_entries)
1693 		goto out;
1694 
1695 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1696 		if (pmem->fobj)
1697 			pmem_unmap(pmem, pgt);
1698 	}
1699 	assert(!pgt->num_used_entries);
1700 
1701 out:
1702 	areas = to_user_ta_ctx(pgt->ctx)->uctx.areas;
1703 	if (areas) {
1704 		TAILQ_FOREACH(area, areas, link) {
1705 			if (area->pgt == pgt)
1706 				area->pgt = NULL;
1707 		}
1708 	}
1709 
1710 	pager_unlock(exceptions);
1711 }
1712 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1713 #endif /*CFG_PAGED_USER_TA*/
1714 
1715 void tee_pager_release_phys(void *addr, size_t size)
1716 {
1717 	bool unmaped = false;
1718 	vaddr_t va = (vaddr_t)addr;
1719 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1720 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1721 	struct tee_pager_area *area;
1722 	uint32_t exceptions;
1723 
1724 	if (end <= begin)
1725 		return;
1726 
1727 	exceptions = pager_lock_check_stack(128);
1728 
1729 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1730 		area = find_area(&tee_pager_area_head, va);
1731 		if (!area)
1732 			panic();
1733 		unmaped |= tee_pager_release_one_phys(area, va);
1734 	}
1735 
1736 	if (unmaped)
1737 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1738 
1739 	pager_unlock(exceptions);
1740 }
1741 KEEP_PAGER(tee_pager_release_phys);
1742 
1743 void *tee_pager_alloc(size_t size)
1744 {
1745 	tee_mm_entry_t *mm = NULL;
1746 	uint8_t *smem = NULL;
1747 	size_t num_pages = 0;
1748 	struct fobj *fobj = NULL;
1749 
1750 	if (!size)
1751 		return NULL;
1752 
1753 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1754 	if (!mm)
1755 		return NULL;
1756 
1757 	smem = (uint8_t *)tee_mm_get_smem(mm);
1758 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1759 	fobj = fobj_locked_paged_alloc(num_pages);
1760 	if (!fobj) {
1761 		tee_mm_free(mm);
1762 		return NULL;
1763 	}
1764 
1765 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1766 	fobj_put(fobj);
1767 
1768 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1769 
1770 	return smem;
1771 }
1772