xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 5b25c76ac40f830867e3d60800120ffd7874e8dc)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/tlb_helpers.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <mm/core_memprot.h>
23 #include <mm/fobj.h>
24 #include <mm/tee_mm.h>
25 #include <mm/tee_pager.h>
26 #include <stdlib.h>
27 #include <sys/queue.h>
28 #include <tee_api_defines.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <utee_defines.h>
32 #include <util.h>
33 
34 
35 static struct tee_pager_area_head tee_pager_area_head =
36 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
37 
38 #define INVALID_PGIDX		UINT_MAX
39 #define PMEM_FLAG_DIRTY		BIT(0)
40 #define PMEM_FLAG_HIDDEN	BIT(1)
41 
42 /*
43  * struct tee_pager_pmem - Represents a physical page used for paging.
44  *
45  * @flags	flags defined by PMEM_FLAG_* above
46  * @fobj_pgidx	index of the page in the @fobj
47  * @fobj	File object of which a page is made visible.
48  * @va_alias	Virtual address where the physical page always is aliased.
49  *		Used during remapping of the page when the content need to
50  *		be updated before it's available at the new location.
51  */
52 struct tee_pager_pmem {
53 	unsigned int flags;
54 	unsigned int fobj_pgidx;
55 	struct fobj *fobj;
56 	void *va_alias;
57 	TAILQ_ENTRY(tee_pager_pmem) link;
58 };
59 
60 /* The list of physical pages. The first page in the list is the oldest */
61 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
62 
63 static struct tee_pager_pmem_head tee_pager_pmem_head =
64 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
65 
66 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
67 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
68 
69 /* number of pages hidden */
70 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
71 
72 /* Number of registered physical pages, used hiding pages. */
73 static size_t tee_pager_npages;
74 
75 #ifdef CFG_WITH_STATS
76 static struct tee_pager_stats pager_stats;
77 
78 static inline void incr_ro_hits(void)
79 {
80 	pager_stats.ro_hits++;
81 }
82 
83 static inline void incr_rw_hits(void)
84 {
85 	pager_stats.rw_hits++;
86 }
87 
88 static inline void incr_hidden_hits(void)
89 {
90 	pager_stats.hidden_hits++;
91 }
92 
93 static inline void incr_zi_released(void)
94 {
95 	pager_stats.zi_released++;
96 }
97 
98 static inline void incr_npages_all(void)
99 {
100 	pager_stats.npages_all++;
101 }
102 
103 static inline void set_npages(void)
104 {
105 	pager_stats.npages = tee_pager_npages;
106 }
107 
108 void tee_pager_get_stats(struct tee_pager_stats *stats)
109 {
110 	*stats = pager_stats;
111 
112 	pager_stats.hidden_hits = 0;
113 	pager_stats.ro_hits = 0;
114 	pager_stats.rw_hits = 0;
115 	pager_stats.zi_released = 0;
116 }
117 
118 #else /* CFG_WITH_STATS */
119 static inline void incr_ro_hits(void) { }
120 static inline void incr_rw_hits(void) { }
121 static inline void incr_hidden_hits(void) { }
122 static inline void incr_zi_released(void) { }
123 static inline void incr_npages_all(void) { }
124 static inline void set_npages(void) { }
125 
126 void tee_pager_get_stats(struct tee_pager_stats *stats)
127 {
128 	memset(stats, 0, sizeof(struct tee_pager_stats));
129 }
130 #endif /* CFG_WITH_STATS */
131 
132 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
133 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
134 #define TBL_SHIFT	SMALL_PAGE_SHIFT
135 
136 #define EFFECTIVE_VA_SIZE \
137 	(ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \
138 	 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE))
139 
140 static struct pager_table {
141 	struct pgt pgt;
142 	struct core_mmu_table_info tbl_info;
143 } *pager_tables;
144 static unsigned int num_pager_tables;
145 
146 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
147 
148 /* Defines the range of the alias area */
149 static tee_mm_entry_t *pager_alias_area;
150 /*
151  * Physical pages are added in a stack like fashion to the alias area,
152  * @pager_alias_next_free gives the address of next free entry if
153  * @pager_alias_next_free is != 0
154  */
155 static uintptr_t pager_alias_next_free;
156 
157 #ifdef CFG_TEE_CORE_DEBUG
158 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
159 
160 static uint32_t pager_lock_dldetect(const char *func, const int line,
161 				    struct abort_info *ai)
162 {
163 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
164 	unsigned int retries = 0;
165 	unsigned int reminder = 0;
166 
167 	while (!cpu_spin_trylock(&pager_spinlock)) {
168 		retries++;
169 		if (!retries) {
170 			/* wrapped, time to report */
171 			trace_printf(func, line, TRACE_ERROR, true,
172 				     "possible spinlock deadlock reminder %u",
173 				     reminder);
174 			if (reminder < UINT_MAX)
175 				reminder++;
176 			if (ai)
177 				abort_print(ai);
178 		}
179 	}
180 
181 	return exceptions;
182 }
183 #else
184 static uint32_t pager_lock(struct abort_info __unused *ai)
185 {
186 	return cpu_spin_lock_xsave(&pager_spinlock);
187 }
188 #endif
189 
190 static uint32_t pager_lock_check_stack(size_t stack_size)
191 {
192 	if (stack_size) {
193 		int8_t buf[stack_size];
194 		size_t n;
195 
196 		/*
197 		 * Make sure to touch all pages of the stack that we expect
198 		 * to use with this lock held. We need to take eventual
199 		 * page faults before the lock is taken or we'll deadlock
200 		 * the pager. The pages that are populated in this way will
201 		 * eventually be released at certain save transitions of
202 		 * the thread.
203 		 */
204 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
205 			io_write8((vaddr_t)buf + n, 1);
206 		io_write8((vaddr_t)buf + stack_size - 1, 1);
207 	}
208 
209 	return pager_lock(NULL);
210 }
211 
212 static void pager_unlock(uint32_t exceptions)
213 {
214 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
215 }
216 
217 void *tee_pager_phys_to_virt(paddr_t pa)
218 {
219 	struct core_mmu_table_info ti;
220 	unsigned idx;
221 	uint32_t a;
222 	paddr_t p;
223 	vaddr_t v;
224 	size_t n;
225 
226 	/*
227 	 * Most addresses are mapped lineary, try that first if possible.
228 	 */
229 	if (!tee_pager_get_table_info(pa, &ti))
230 		return NULL; /* impossible pa */
231 	idx = core_mmu_va2idx(&ti, pa);
232 	core_mmu_get_entry(&ti, idx, &p, &a);
233 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
234 		return (void *)core_mmu_idx2va(&ti, idx);
235 
236 	n = 0;
237 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
238 	while (true) {
239 		while (idx < TBL_NUM_ENTRIES) {
240 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
241 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
242 				return NULL;
243 
244 			core_mmu_get_entry(&pager_tables[n].tbl_info,
245 					   idx, &p, &a);
246 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
247 				return (void *)v;
248 			idx++;
249 		}
250 
251 		n++;
252 		if (n >= num_pager_tables)
253 			return NULL;
254 		idx = 0;
255 	}
256 
257 	return NULL;
258 }
259 
260 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
261 {
262 	return pmem->flags & PMEM_FLAG_HIDDEN;
263 }
264 
265 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
266 {
267 	return pmem->flags & PMEM_FLAG_DIRTY;
268 }
269 
270 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
271 				    struct tee_pager_area *area)
272 {
273 	if (pmem->fobj != area->fobj)
274 		return false;
275 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
276 		return false;
277 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
278 	    (area->size >> SMALL_PAGE_SHIFT))
279 		return false;
280 
281 	return true;
282 }
283 
284 static size_t pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
285 				   struct tee_pager_area *area)
286 {
287 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
288 
289 	return pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
290 }
291 
292 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
293 {
294 	size_t n;
295 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
296 
297 	if (!pager_tables)
298 		return NULL;
299 
300 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
301 	    CORE_MMU_PGDIR_SHIFT;
302 	if (n >= num_pager_tables)
303 		return NULL;
304 
305 	assert(va >= pager_tables[n].tbl_info.va_base &&
306 	       va <= (pager_tables[n].tbl_info.va_base | mask));
307 
308 	return pager_tables + n;
309 }
310 
311 static struct pager_table *find_pager_table(vaddr_t va)
312 {
313 	struct pager_table *pt = find_pager_table_may_fail(va);
314 
315 	assert(pt);
316 	return pt;
317 }
318 
319 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
320 {
321 	struct pager_table *pt = find_pager_table_may_fail(va);
322 
323 	if (!pt)
324 		return false;
325 
326 	*ti = pt->tbl_info;
327 	return true;
328 }
329 
330 static struct core_mmu_table_info *find_table_info(vaddr_t va)
331 {
332 	return &find_pager_table(va)->tbl_info;
333 }
334 
335 static struct pgt *find_core_pgt(vaddr_t va)
336 {
337 	return &find_pager_table(va)->pgt;
338 }
339 
340 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
341 {
342 	struct pager_table *pt;
343 	unsigned idx;
344 	vaddr_t smem = tee_mm_get_smem(mm);
345 	size_t nbytes = tee_mm_get_bytes(mm);
346 	vaddr_t v;
347 	uint32_t a = 0;
348 
349 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
350 
351 	assert(!pager_alias_area);
352 	pager_alias_area = mm;
353 	pager_alias_next_free = smem;
354 
355 	/* Clear all mapping in the alias area */
356 	pt = find_pager_table(smem);
357 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
358 	while (pt <= (pager_tables + num_pager_tables - 1)) {
359 		while (idx < TBL_NUM_ENTRIES) {
360 			v = core_mmu_idx2va(&pt->tbl_info, idx);
361 			if (v >= (smem + nbytes))
362 				goto out;
363 
364 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
365 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
366 			if (a & TEE_MATTR_VALID_BLOCK)
367 				pgt_dec_used_entries(&pt->pgt);
368 			idx++;
369 		}
370 
371 		pt++;
372 		idx = 0;
373 	}
374 
375 out:
376 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
377 }
378 
379 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
380 {
381 	size_t n;
382 	uint32_t a = 0;
383 	size_t usage = 0;
384 
385 	for (n = 0; n < ti->num_entries; n++) {
386 		core_mmu_get_entry(ti, n, NULL, &a);
387 		if (a & TEE_MATTR_VALID_BLOCK)
388 			usage++;
389 	}
390 	return usage;
391 }
392 
393 static void area_get_entry(struct tee_pager_area *area, size_t idx,
394 			   paddr_t *pa, uint32_t *attr)
395 {
396 	assert(area->pgt);
397 	assert(idx < TBL_NUM_ENTRIES);
398 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
399 }
400 
401 static void area_set_entry(struct tee_pager_area *area, size_t idx,
402 			   paddr_t pa, uint32_t attr)
403 {
404 	assert(area->pgt);
405 	assert(idx < TBL_NUM_ENTRIES);
406 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
407 }
408 
409 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
410 {
411 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
412 }
413 
414 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
415 {
416 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
417 }
418 
419 static void area_tlbi_entry(struct tee_pager_area *area, size_t idx)
420 {
421 	vaddr_t va = area_idx2va(area, idx);
422 
423 #if defined(CFG_PAGED_USER_TA)
424 	assert(area->pgt);
425 	if (area->pgt->ctx) {
426 		uint32_t asid = to_user_mode_ctx(area->pgt->ctx)->vm_info.asid;
427 
428 		tlbi_mva_asid(va, asid);
429 		return;
430 	}
431 #endif
432 	tlbi_mva_allasid(va);
433 }
434 
435 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
436 {
437 	struct tee_pager_area *area = NULL;
438 	size_t tblidx = 0;
439 	uint32_t a = 0;
440 
441 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
442 		/*
443 		 * If only_this_pgt points to a pgt then the pgt of this
444 		 * area has to match or we'll skip over it.
445 		 */
446 		if (only_this_pgt && area->pgt != only_this_pgt)
447 			continue;
448 		if (!area->pgt || !pmem_is_covered_by_area(pmem, area))
449 			continue;
450 		tblidx = pmem_get_area_tblidx(pmem, area);
451 		area_get_entry(area, tblidx, NULL, &a);
452 		if (a & TEE_MATTR_VALID_BLOCK) {
453 			area_set_entry(area, tblidx, 0, 0);
454 			pgt_dec_used_entries(area->pgt);
455 			area_tlbi_entry(area, tblidx);
456 		}
457 	}
458 }
459 
460 void tee_pager_early_init(void)
461 {
462 	size_t n = 0;
463 
464 	num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE;
465 	pager_tables = calloc(num_pager_tables, sizeof(*pager_tables));
466 	if (!pager_tables)
467 		panic("Cannot allocate pager_tables");
468 
469 	/*
470 	 * Note that this depends on add_pager_vaspace() adding vaspace
471 	 * after end of memory.
472 	 */
473 	for (n = 0; n < num_pager_tables; n++) {
474 		if (!core_mmu_find_table(NULL, VCORE_START_VA +
475 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
476 					 &pager_tables[n].tbl_info))
477 			panic("can't find mmu tables");
478 
479 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
480 			panic("Unsupported page size in translation table");
481 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
482 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
483 
484 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
485 		pgt_set_used_entries(&pager_tables[n].pgt,
486 				tbl_usage_count(&pager_tables[n].tbl_info));
487 	}
488 }
489 
490 static void *pager_add_alias_page(paddr_t pa)
491 {
492 	unsigned idx;
493 	struct core_mmu_table_info *ti;
494 	/* Alias pages mapped without write permission: runtime will care */
495 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
496 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
497 			TEE_MATTR_SECURE | TEE_MATTR_PR;
498 
499 	DMSG("0x%" PRIxPA, pa);
500 
501 	ti = find_table_info(pager_alias_next_free);
502 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
503 	core_mmu_set_entry(ti, idx, pa, attr);
504 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
505 	pager_alias_next_free += SMALL_PAGE_SIZE;
506 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
507 				      tee_mm_get_bytes(pager_alias_area)))
508 		pager_alias_next_free = 0;
509 	return (void *)core_mmu_idx2va(ti, idx);
510 }
511 
512 static void area_insert(struct tee_pager_area_head *head,
513 			struct tee_pager_area *area,
514 			struct tee_pager_area *a_prev)
515 {
516 	uint32_t exceptions = pager_lock_check_stack(8);
517 
518 	if (a_prev)
519 		TAILQ_INSERT_AFTER(head, a_prev, area, link);
520 	else
521 		TAILQ_INSERT_HEAD(head, area, link);
522 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
523 
524 	pager_unlock(exceptions);
525 }
526 KEEP_PAGER(area_insert);
527 
528 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
529 			     struct fobj *fobj)
530 {
531 	struct tee_pager_area *area = NULL;
532 	uint32_t flags = 0;
533 	size_t fobj_pgoffs = 0;
534 	vaddr_t b = base;
535 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
536 	size_t s2 = 0;
537 
538 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
539 
540 	if (base & SMALL_PAGE_MASK || !s) {
541 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
542 		panic();
543 	}
544 
545 	switch (type) {
546 	case PAGER_AREA_TYPE_RO:
547 		flags = TEE_MATTR_PRX;
548 		break;
549 	case PAGER_AREA_TYPE_RW:
550 	case PAGER_AREA_TYPE_LOCK:
551 		flags = TEE_MATTR_PRW;
552 		break;
553 	default:
554 		panic();
555 	}
556 
557 	if (!fobj)
558 		panic();
559 
560 	while (s) {
561 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
562 		area = calloc(1, sizeof(*area));
563 		if (!area)
564 			panic("alloc_area");
565 
566 		area->fobj = fobj_get(fobj);
567 		area->fobj_pgoffs = fobj_pgoffs;
568 		area->type = type;
569 		area->pgt = find_core_pgt(b);
570 		area->base = b;
571 		area->size = s2;
572 		area->flags = flags;
573 		area_insert(&tee_pager_area_head, area, NULL);
574 
575 		b += s2;
576 		s -= s2;
577 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
578 	}
579 }
580 
581 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
582 					vaddr_t va)
583 {
584 	struct tee_pager_area *area;
585 
586 	if (!areas)
587 		return NULL;
588 
589 	TAILQ_FOREACH(area, areas, link) {
590 		if (core_is_buffer_inside(va, 1, area->base, area->size))
591 			return area;
592 	}
593 	return NULL;
594 }
595 
596 #ifdef CFG_PAGED_USER_TA
597 static struct tee_pager_area *find_uta_area(vaddr_t va)
598 {
599 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
600 
601 	if (!is_user_mode_ctx(ctx))
602 		return NULL;
603 	return find_area(to_user_mode_ctx(ctx)->areas, va);
604 }
605 #else
606 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
607 {
608 	return NULL;
609 }
610 #endif /*CFG_PAGED_USER_TA*/
611 
612 
613 static uint32_t get_area_mattr(uint32_t area_flags)
614 {
615 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
616 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
617 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
618 
619 	return attr;
620 }
621 
622 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
623 {
624 	struct core_mmu_table_info *ti;
625 	paddr_t pa;
626 	unsigned idx;
627 
628 	ti = find_table_info((vaddr_t)pmem->va_alias);
629 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
630 	core_mmu_get_entry(ti, idx, &pa, NULL);
631 	return pa;
632 }
633 
634 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
635 			void *va_alias)
636 {
637 	size_t fobj_pgoffs = ((page_va - area->base) >> SMALL_PAGE_SHIFT) +
638 			     area->fobj_pgoffs;
639 	struct core_mmu_table_info *ti;
640 	uint32_t attr_alias;
641 	paddr_t pa_alias;
642 	unsigned int idx_alias;
643 
644 	/* Insure we are allowed to write to aliased virtual page */
645 	ti = find_table_info((vaddr_t)va_alias);
646 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
647 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
648 	if (!(attr_alias & TEE_MATTR_PW)) {
649 		attr_alias |= TEE_MATTR_PW;
650 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
651 		tlbi_mva_allasid((vaddr_t)va_alias);
652 	}
653 
654 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
655 	if (fobj_load_page(area->fobj, fobj_pgoffs, va_alias)) {
656 		EMSG("PH 0x%" PRIxVA " failed", page_va);
657 		panic();
658 	}
659 	switch (area->type) {
660 	case PAGER_AREA_TYPE_RO:
661 		incr_ro_hits();
662 		/* Forbid write to aliases for read-only (maybe exec) pages */
663 		attr_alias &= ~TEE_MATTR_PW;
664 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
665 		tlbi_mva_allasid((vaddr_t)va_alias);
666 		break;
667 	case PAGER_AREA_TYPE_RW:
668 		incr_rw_hits();
669 		break;
670 	case PAGER_AREA_TYPE_LOCK:
671 		break;
672 	default:
673 		panic();
674 	}
675 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
676 }
677 
678 static void tee_pager_save_page(struct tee_pager_pmem *pmem)
679 {
680 	if (pmem_is_dirty(pmem)) {
681 		asan_tag_access(pmem->va_alias,
682 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
683 		if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
684 				   pmem->va_alias))
685 			panic("fobj_save_page");
686 		asan_tag_no_access(pmem->va_alias,
687 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
688 	}
689 }
690 
691 #ifdef CFG_PAGED_USER_TA
692 static void unlink_area(struct tee_pager_area_head *area_head,
693 			struct tee_pager_area *area)
694 {
695 	uint32_t exceptions = pager_lock_check_stack(64);
696 
697 	TAILQ_REMOVE(area_head, area, link);
698 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
699 
700 	pager_unlock(exceptions);
701 }
702 KEEP_PAGER(unlink_area);
703 
704 static void free_area(struct tee_pager_area *area)
705 {
706 	fobj_put(area->fobj);
707 	free(area);
708 }
709 
710 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
711 				    struct fobj *fobj, uint32_t prot)
712 {
713 	struct tee_pager_area *a_prev = NULL;
714 	struct tee_pager_area *area = NULL;
715 	vaddr_t b = base;
716 	size_t fobj_pgoffs = 0;
717 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
718 
719 	if (!uctx->areas) {
720 		uctx->areas = malloc(sizeof(*uctx->areas));
721 		if (!uctx->areas)
722 			return TEE_ERROR_OUT_OF_MEMORY;
723 		TAILQ_INIT(uctx->areas);
724 	}
725 
726 	area = TAILQ_FIRST(uctx->areas);
727 	while (area) {
728 		if (core_is_buffer_intersect(b, s, area->base,
729 					     area->size))
730 			return TEE_ERROR_BAD_PARAMETERS;
731 		if (b < area->base)
732 			break;
733 		a_prev = area;
734 		area = TAILQ_NEXT(area, link);
735 	}
736 
737 	while (s) {
738 		size_t s2;
739 
740 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
741 		area = calloc(1, sizeof(*area));
742 		if (!area)
743 			return TEE_ERROR_OUT_OF_MEMORY;
744 
745 		/* Table info will be set when the context is activated. */
746 		area->fobj = fobj_get(fobj);
747 		area->fobj_pgoffs = fobj_pgoffs;
748 		area->type = PAGER_AREA_TYPE_RW;
749 		area->base = b;
750 		area->size = s2;
751 		area->flags = prot;
752 
753 		area_insert(uctx->areas, area, a_prev);
754 
755 		a_prev = area;
756 		b += s2;
757 		s -= s2;
758 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
759 	}
760 
761 	return TEE_SUCCESS;
762 }
763 
764 TEE_Result tee_pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
765 				 struct fobj *fobj, uint32_t prot)
766 {
767 	TEE_Result res = TEE_SUCCESS;
768 	struct thread_specific_data *tsd = thread_get_tsd();
769 	struct tee_pager_area *area = NULL;
770 	struct core_mmu_table_info dir_info = { NULL };
771 
772 	if (&uctx->ctx != tsd->ctx) {
773 		/*
774 		 * Changes are to an utc that isn't active. Just add the
775 		 * areas page tables will be dealt with later.
776 		 */
777 		return pager_add_um_area(uctx, base, fobj, prot);
778 	}
779 
780 	/*
781 	 * Assign page tables before adding areas to be able to tell which
782 	 * are newly added and should be removed in case of failure.
783 	 */
784 	tee_pager_assign_um_tables(uctx);
785 	res = pager_add_um_area(uctx, base, fobj, prot);
786 	if (res) {
787 		struct tee_pager_area *next_a;
788 
789 		/* Remove all added areas */
790 		TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
791 			if (!area->pgt) {
792 				unlink_area(uctx->areas, area);
793 				free_area(area);
794 			}
795 		}
796 		return res;
797 	}
798 
799 	/*
800 	 * Assign page tables to the new areas and make sure that the page
801 	 * tables are registered in the upper table.
802 	 */
803 	tee_pager_assign_um_tables(uctx);
804 	core_mmu_get_user_pgdir(&dir_info);
805 	TAILQ_FOREACH(area, uctx->areas, link) {
806 		paddr_t pa;
807 		size_t idx;
808 		uint32_t attr;
809 
810 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
811 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
812 
813 		/*
814 		 * Check if the page table already is used, if it is, it's
815 		 * already registered.
816 		 */
817 		if (area->pgt->num_used_entries) {
818 			assert(attr & TEE_MATTR_TABLE);
819 			assert(pa == virt_to_phys(area->pgt->tbl));
820 			continue;
821 		}
822 
823 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
824 		pa = virt_to_phys(area->pgt->tbl);
825 		assert(pa);
826 		/*
827 		 * Note that the update of the table entry is guaranteed to
828 		 * be atomic.
829 		 */
830 		core_mmu_set_entry(&dir_info, idx, pa, attr);
831 	}
832 
833 	return TEE_SUCCESS;
834 }
835 
836 static void split_area(struct tee_pager_area_head *area_head,
837 		       struct tee_pager_area *area, struct tee_pager_area *a2,
838 		       vaddr_t va)
839 {
840 	uint32_t exceptions = pager_lock_check_stack(64);
841 	size_t diff = va - area->base;
842 
843 	a2->fobj = fobj_get(area->fobj);
844 	a2->fobj_pgoffs = area->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
845 	a2->type = area->type;
846 	a2->flags = area->flags;
847 	a2->base = va;
848 	a2->size = area->size - diff;
849 	a2->pgt = area->pgt;
850 	area->size = diff;
851 
852 	TAILQ_INSERT_AFTER(area_head, area, a2, link);
853 	TAILQ_INSERT_AFTER(&area->fobj->areas, area, a2, fobj_link);
854 
855 	pager_unlock(exceptions);
856 }
857 KEEP_PAGER(split_area);
858 
859 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
860 {
861 	struct tee_pager_area *area = NULL;
862 	struct tee_pager_area *a2 = NULL;
863 
864 	if (va & SMALL_PAGE_MASK)
865 		return TEE_ERROR_BAD_PARAMETERS;
866 
867 	TAILQ_FOREACH(area, uctx->areas, link) {
868 		if (va == area->base || va == area->base + area->size)
869 			return TEE_SUCCESS;
870 		if (va > area->base && va < area->base + area->size) {
871 			a2 = calloc(1, sizeof(*a2));
872 			if (!a2)
873 				return TEE_ERROR_OUT_OF_MEMORY;
874 			split_area(uctx->areas, area, a2, va);
875 			return TEE_SUCCESS;
876 		}
877 	}
878 
879 	return TEE_SUCCESS;
880 }
881 
882 static void merge_area_with_next(struct tee_pager_area_head *area_head,
883 				 struct tee_pager_area *a,
884 				 struct tee_pager_area *a_next)
885 {
886 	uint32_t exceptions = pager_lock_check_stack(64);
887 
888 	TAILQ_REMOVE(area_head, a_next, link);
889 	TAILQ_REMOVE(&a_next->fobj->areas, a_next, fobj_link);
890 	a->size += a_next->size;
891 
892 	pager_unlock(exceptions);
893 }
894 KEEP_PAGER(merge_area_with_next);
895 
896 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
897 			       size_t len)
898 {
899 	struct tee_pager_area *a_next = NULL;
900 	struct tee_pager_area *a = NULL;
901 	vaddr_t end_va = 0;
902 
903 	if ((va | len) & SMALL_PAGE_MASK)
904 		return;
905 	if (ADD_OVERFLOW(va, len, &end_va))
906 		return;
907 
908 	for (a = TAILQ_FIRST(uctx->areas);; a = a_next) {
909 		a_next = TAILQ_NEXT(a, link);
910 		if (!a_next)
911 			return;
912 
913 		/* Try merging with the area just before va */
914 		if (a->base + a->size < va)
915 			continue;
916 
917 		/*
918 		 * If a->base is well past our range we're done.
919 		 * Note that if it's just the page after our range we'll
920 		 * try to merge.
921 		 */
922 		if (a->base > end_va)
923 			return;
924 
925 		if (a->base + a->size != a_next->base)
926 			continue;
927 		if (a->fobj != a_next->fobj || a->type != a_next->type ||
928 		    a->flags != a_next->flags || a->pgt != a_next->pgt)
929 			continue;
930 		if (a->fobj_pgoffs + a->size / SMALL_PAGE_SIZE !=
931 		    a_next->fobj_pgoffs)
932 			continue;
933 
934 		merge_area_with_next(uctx->areas, a, a_next);
935 		free_area(a_next);
936 		a_next = a;
937 	}
938 }
939 
940 static void rem_area(struct tee_pager_area_head *area_head,
941 		     struct tee_pager_area *area)
942 {
943 	struct tee_pager_pmem *pmem;
944 	size_t last_pgoffs = area->fobj_pgoffs +
945 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
946 	uint32_t exceptions;
947 	size_t idx = 0;
948 	uint32_t a = 0;
949 
950 	exceptions = pager_lock_check_stack(64);
951 
952 	TAILQ_REMOVE(area_head, area, link);
953 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
954 
955 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
956 		if (pmem->fobj != area->fobj ||
957 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
958 		    pmem->fobj_pgidx > last_pgoffs)
959 			continue;
960 
961 		idx = pmem_get_area_tblidx(pmem, area);
962 		area_get_entry(area, idx, NULL, &a);
963 		if (!(a & TEE_MATTR_VALID_BLOCK))
964 			continue;
965 
966 		area_set_entry(area, idx, 0, 0);
967 		area_tlbi_entry(area, idx);
968 		pgt_dec_used_entries(area->pgt);
969 	}
970 
971 	pager_unlock(exceptions);
972 
973 	free_area(area);
974 }
975 KEEP_PAGER(rem_area);
976 
977 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
978 			     size_t size)
979 {
980 	struct tee_pager_area *area;
981 	struct tee_pager_area *next_a;
982 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
983 
984 	TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
985 		if (core_is_buffer_inside(area->base, area->size, base, s))
986 			rem_area(uctx->areas, area);
987 	}
988 	tlbi_asid(uctx->vm_info.asid);
989 }
990 
991 void tee_pager_rem_um_areas(struct user_mode_ctx *uctx)
992 {
993 	struct tee_pager_area *area = NULL;
994 
995 	if (!uctx->areas)
996 		return;
997 
998 	while (true) {
999 		area = TAILQ_FIRST(uctx->areas);
1000 		if (!area)
1001 			break;
1002 		unlink_area(uctx->areas, area);
1003 		free_area(area);
1004 	}
1005 
1006 	free(uctx->areas);
1007 }
1008 
1009 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
1010 {
1011 	struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas);
1012 	void *ctx = a->pgt->ctx;
1013 
1014 	do {
1015 		a = TAILQ_NEXT(a, fobj_link);
1016 		if (!a)
1017 			return true;
1018 	} while (a->pgt->ctx == ctx);
1019 
1020 	return false;
1021 }
1022 
1023 bool tee_pager_set_um_area_attr(struct user_mode_ctx *uctx, vaddr_t base,
1024 				size_t size, uint32_t flags)
1025 {
1026 	bool ret = false;
1027 	vaddr_t b = base;
1028 	size_t s = size;
1029 	size_t s2 = 0;
1030 	struct tee_pager_area *area = find_area(uctx->areas, b);
1031 	uint32_t exceptions = 0;
1032 	struct tee_pager_pmem *pmem = NULL;
1033 	uint32_t a = 0;
1034 	uint32_t f = 0;
1035 	uint32_t mattr = 0;
1036 	uint32_t f2 = 0;
1037 	size_t tblidx = 0;
1038 
1039 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1040 	if (f & TEE_MATTR_UW)
1041 		f |= TEE_MATTR_PW;
1042 	mattr = get_area_mattr(f);
1043 
1044 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1045 
1046 	while (s) {
1047 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1048 		if (!area || area->base != b || area->size != s2) {
1049 			ret = false;
1050 			goto out;
1051 		}
1052 		b += s2;
1053 		s -= s2;
1054 
1055 		if (area->flags == f)
1056 			goto next_area;
1057 
1058 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1059 			if (!pmem_is_covered_by_area(pmem, area))
1060 				continue;
1061 
1062 			tblidx = pmem_get_area_tblidx(pmem, area);
1063 			area_get_entry(area, tblidx, NULL, &a);
1064 			if (a == f)
1065 				continue;
1066 			area_set_entry(area, tblidx, 0, 0);
1067 			area_tlbi_entry(area, tblidx);
1068 
1069 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1070 			if (pmem_is_dirty(pmem))
1071 				f2 = mattr;
1072 			else
1073 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
1074 			area_set_entry(area, tblidx, get_pmem_pa(pmem), f2);
1075 			if (!(a & TEE_MATTR_VALID_BLOCK))
1076 				pgt_inc_used_entries(area->pgt);
1077 			/*
1078 			 * Make sure the table update is visible before
1079 			 * continuing.
1080 			 */
1081 			dsb_ishst();
1082 
1083 			/*
1084 			 * Here's a problem if this page already is shared.
1085 			 * We need do icache invalidate for each context
1086 			 * in which it is shared. In practice this will
1087 			 * never happen.
1088 			 */
1089 			if (flags & TEE_MATTR_UX) {
1090 				void *va = (void *)area_idx2va(area, tblidx);
1091 
1092 				/* Assert that the pmem isn't shared. */
1093 				assert(same_context(pmem));
1094 
1095 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1096 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1097 			}
1098 		}
1099 
1100 		area->flags = f;
1101 next_area:
1102 		area = TAILQ_NEXT(area, link);
1103 	}
1104 
1105 	ret = true;
1106 out:
1107 	pager_unlock(exceptions);
1108 	return ret;
1109 }
1110 
1111 KEEP_PAGER(tee_pager_set_um_area_attr);
1112 #endif /*CFG_PAGED_USER_TA*/
1113 
1114 void tee_pager_invalidate_fobj(struct fobj *fobj)
1115 {
1116 	struct tee_pager_pmem *pmem;
1117 	uint32_t exceptions;
1118 
1119 	exceptions = pager_lock_check_stack(64);
1120 
1121 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1122 		if (pmem->fobj == fobj) {
1123 			pmem->fobj = NULL;
1124 			pmem->fobj_pgidx = INVALID_PGIDX;
1125 		}
1126 	}
1127 
1128 	pager_unlock(exceptions);
1129 }
1130 KEEP_PAGER(tee_pager_invalidate_fobj);
1131 
1132 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area,
1133 					unsigned int tblidx)
1134 {
1135 	struct tee_pager_pmem *pmem = NULL;
1136 
1137 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1138 		if (pmem->fobj == area->fobj &&
1139 		    pmem_get_area_tblidx(pmem, area) == tblidx)
1140 			return pmem;
1141 
1142 	return NULL;
1143 }
1144 
1145 static bool tee_pager_unhide_page(struct tee_pager_area *area,
1146 				  unsigned int tblidx)
1147 {
1148 	struct tee_pager_pmem *pmem = pmem_find(area, tblidx);
1149 	uint32_t a = get_area_mattr(area->flags);
1150 	uint32_t attr = 0;
1151 	paddr_t pa = 0;
1152 
1153 	if (!pmem)
1154 		return false;
1155 
1156 	area_get_entry(area, tblidx, NULL, &attr);
1157 	if (attr & TEE_MATTR_VALID_BLOCK)
1158 		return false;
1159 
1160 	/*
1161 	 * The page is hidden, or not not mapped yet. Unhide the page and
1162 	 * move it to the tail.
1163 	 *
1164 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1165 	 * for this address, so no TLB invalidation is required after setting
1166 	 * the new entry. A DSB is needed though, to make the write visible.
1167 	 *
1168 	 * For user executable pages it's more complicated. Those pages can
1169 	 * be shared between multiple TA mappings and thus populated by
1170 	 * another TA. The reference manual states that:
1171 	 *
1172 	 * "instruction cache maintenance is required only after writing
1173 	 * new data to a physical address that holds an instruction."
1174 	 *
1175 	 * So for hidden pages we would not need to invalidate i-cache, but
1176 	 * for newly populated pages we do. Since we don't know which we
1177 	 * have to assume the worst and always invalidate the i-cache. We
1178 	 * don't need to clean the d-cache though, since that has already
1179 	 * been done earlier.
1180 	 *
1181 	 * Additional bookkeeping to tell if the i-cache invalidation is
1182 	 * needed or not is left as a future optimization.
1183 	 */
1184 
1185 	/* If it's not a dirty block, then it should be read only. */
1186 	if (!pmem_is_dirty(pmem))
1187 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1188 
1189 	pa = get_pmem_pa(pmem);
1190 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1191 	if (area->flags & TEE_MATTR_UX) {
1192 		void *va = (void *)area_idx2va(area, tblidx);
1193 
1194 		/* Set a temporary read-only mapping */
1195 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1196 		area_set_entry(area, tblidx, pa, a & ~TEE_MATTR_UX);
1197 		dsb_ishst();
1198 
1199 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1200 
1201 		/* Set the final mapping */
1202 		area_set_entry(area, tblidx, pa, a);
1203 		area_tlbi_entry(area, tblidx);
1204 	} else {
1205 		area_set_entry(area, tblidx, pa, a);
1206 		dsb_ishst();
1207 	}
1208 	pgt_inc_used_entries(area->pgt);
1209 
1210 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1211 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1212 	incr_hidden_hits();
1213 	return true;
1214 }
1215 
1216 static void tee_pager_hide_pages(void)
1217 {
1218 	struct tee_pager_pmem *pmem = NULL;
1219 	size_t n = 0;
1220 
1221 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1222 		if (n >= TEE_PAGER_NHIDE)
1223 			break;
1224 		n++;
1225 
1226 		/* we cannot hide pages when pmem->fobj is not defined. */
1227 		if (!pmem->fobj)
1228 			continue;
1229 
1230 		if (pmem_is_hidden(pmem))
1231 			continue;
1232 
1233 		pmem->flags |= PMEM_FLAG_HIDDEN;
1234 		pmem_unmap(pmem, NULL);
1235 	}
1236 }
1237 
1238 static unsigned int __maybe_unused
1239 num_areas_with_pmem(struct tee_pager_pmem *pmem)
1240 {
1241 	struct tee_pager_area *a = NULL;
1242 	unsigned int num_matches = 0;
1243 
1244 	TAILQ_FOREACH(a, &pmem->fobj->areas, fobj_link)
1245 		if (pmem_is_covered_by_area(pmem, a))
1246 			num_matches++;
1247 
1248 	return num_matches;
1249 }
1250 
1251 /*
1252  * Find mapped pmem, hide and move to pageble pmem.
1253  * Return false if page was not mapped, and true if page was mapped.
1254  */
1255 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1256 				       vaddr_t page_va)
1257 {
1258 	struct tee_pager_pmem *pmem;
1259 	size_t tblidx = 0;
1260 	size_t pgidx = area_va2idx(area, page_va) + area->fobj_pgoffs -
1261 		       ((area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT);
1262 
1263 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1264 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != pgidx)
1265 			continue;
1266 
1267 		/*
1268 		 * Locked pages may not be shared. We're asserting that the
1269 		 * number of areas using this pmem is one and only one as
1270 		 * we're about to unmap it.
1271 		 */
1272 		assert(num_areas_with_pmem(pmem) == 1);
1273 
1274 		tblidx = pmem_get_area_tblidx(pmem, area);
1275 		area_set_entry(area, tblidx, 0, 0);
1276 		pgt_dec_used_entries(area->pgt);
1277 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1278 		pmem->fobj = NULL;
1279 		pmem->fobj_pgidx = INVALID_PGIDX;
1280 		tee_pager_npages++;
1281 		set_npages();
1282 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1283 		incr_zi_released();
1284 		return true;
1285 	}
1286 
1287 	return false;
1288 }
1289 
1290 /* Finds the oldest page and unmaps it from all tables */
1291 static struct tee_pager_pmem *tee_pager_get_page(enum tee_pager_area_type at)
1292 {
1293 	struct tee_pager_pmem *pmem;
1294 
1295 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1296 	if (!pmem) {
1297 		EMSG("No pmem entries");
1298 		return NULL;
1299 	}
1300 
1301 	if (pmem->fobj) {
1302 		pmem_unmap(pmem, NULL);
1303 		tee_pager_save_page(pmem);
1304 	}
1305 
1306 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1307 	pmem->fobj = NULL;
1308 	pmem->fobj_pgidx = INVALID_PGIDX;
1309 	pmem->flags = 0;
1310 	if (at == PAGER_AREA_TYPE_LOCK) {
1311 		/* Move page to lock list */
1312 		if (tee_pager_npages <= 0)
1313 			panic("running out of page");
1314 		tee_pager_npages--;
1315 		set_npages();
1316 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1317 	} else {
1318 		/* move page to back */
1319 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1320 	}
1321 
1322 	return pmem;
1323 }
1324 
1325 static bool pager_update_permissions(struct tee_pager_area *area,
1326 			struct abort_info *ai, bool *handled)
1327 {
1328 	unsigned int pgidx = area_va2idx(area, ai->va);
1329 	struct tee_pager_pmem *pmem = NULL;
1330 	uint32_t attr = 0;
1331 	paddr_t pa = 0;
1332 
1333 	*handled = false;
1334 
1335 	area_get_entry(area, pgidx, &pa, &attr);
1336 
1337 	/* Not mapped */
1338 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1339 		return false;
1340 
1341 	/* Not readable, should not happen */
1342 	if (abort_is_user_exception(ai)) {
1343 		if (!(attr & TEE_MATTR_UR))
1344 			return true;
1345 	} else {
1346 		if (!(attr & TEE_MATTR_PR)) {
1347 			abort_print_error(ai);
1348 			panic();
1349 		}
1350 	}
1351 
1352 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1353 	case CORE_MMU_FAULT_TRANSLATION:
1354 	case CORE_MMU_FAULT_READ_PERMISSION:
1355 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1356 			/* Check attempting to execute from an NOX page */
1357 			if (abort_is_user_exception(ai)) {
1358 				if (!(attr & TEE_MATTR_UX))
1359 					return true;
1360 			} else {
1361 				if (!(attr & TEE_MATTR_PX)) {
1362 					abort_print_error(ai);
1363 					panic();
1364 				}
1365 			}
1366 		}
1367 		/* Since the page is mapped now it's OK */
1368 		break;
1369 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1370 		/* Check attempting to write to an RO page */
1371 		pmem = pmem_find(area, pgidx);
1372 		if (!pmem)
1373 			panic();
1374 		if (abort_is_user_exception(ai)) {
1375 			if (!(area->flags & TEE_MATTR_UW))
1376 				return true;
1377 			if (!(attr & TEE_MATTR_UW)) {
1378 				FMSG("Dirty %p",
1379 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1380 				pmem->flags |= PMEM_FLAG_DIRTY;
1381 				area_set_entry(area, pgidx, pa,
1382 					       get_area_mattr(area->flags));
1383 				area_tlbi_entry(area, pgidx);
1384 			}
1385 
1386 		} else {
1387 			if (!(area->flags & TEE_MATTR_PW)) {
1388 				abort_print_error(ai);
1389 				panic();
1390 			}
1391 			if (!(attr & TEE_MATTR_PW)) {
1392 				FMSG("Dirty %p",
1393 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1394 				pmem->flags |= PMEM_FLAG_DIRTY;
1395 				area_set_entry(area, pgidx, pa,
1396 					       get_area_mattr(area->flags));
1397 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1398 			}
1399 		}
1400 		/* Since permissions has been updated now it's OK */
1401 		break;
1402 	default:
1403 		/* Some fault we can't deal with */
1404 		if (abort_is_user_exception(ai))
1405 			return true;
1406 		abort_print_error(ai);
1407 		panic();
1408 	}
1409 	*handled = true;
1410 	return true;
1411 }
1412 
1413 #ifdef CFG_TEE_CORE_DEBUG
1414 static void stat_handle_fault(void)
1415 {
1416 	static size_t num_faults;
1417 	static size_t min_npages = SIZE_MAX;
1418 	static size_t total_min_npages = SIZE_MAX;
1419 
1420 	num_faults++;
1421 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1422 		DMSG("nfaults %zu npages %zu (min %zu)",
1423 		     num_faults, tee_pager_npages, min_npages);
1424 		min_npages = tee_pager_npages; /* reset */
1425 	}
1426 	if (tee_pager_npages < min_npages)
1427 		min_npages = tee_pager_npages;
1428 	if (tee_pager_npages < total_min_npages)
1429 		total_min_npages = tee_pager_npages;
1430 }
1431 #else
1432 static void stat_handle_fault(void)
1433 {
1434 }
1435 #endif
1436 
1437 bool tee_pager_handle_fault(struct abort_info *ai)
1438 {
1439 	struct tee_pager_area *area;
1440 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1441 	uint32_t exceptions;
1442 	bool ret;
1443 	bool clean_user_cache = false;
1444 
1445 #ifdef TEE_PAGER_DEBUG_PRINT
1446 	if (!abort_is_user_exception(ai))
1447 		abort_print(ai);
1448 #endif
1449 
1450 	/*
1451 	 * We're updating pages that can affect several active CPUs at a
1452 	 * time below. We end up here because a thread tries to access some
1453 	 * memory that isn't available. We have to be careful when making
1454 	 * that memory available as other threads may succeed in accessing
1455 	 * that address the moment after we've made it available.
1456 	 *
1457 	 * That means that we can't just map the memory and populate the
1458 	 * page, instead we use the aliased mapping to populate the page
1459 	 * and once everything is ready we map it.
1460 	 */
1461 	exceptions = pager_lock(ai);
1462 
1463 	stat_handle_fault();
1464 
1465 	/* check if the access is valid */
1466 	if (abort_is_user_exception(ai)) {
1467 		area = find_uta_area(ai->va);
1468 		clean_user_cache = true;
1469 	} else {
1470 		area = find_area(&tee_pager_area_head, ai->va);
1471 		if (!area) {
1472 			area = find_uta_area(ai->va);
1473 			clean_user_cache = true;
1474 		}
1475 	}
1476 	if (!area || !area->pgt) {
1477 		ret = false;
1478 		goto out;
1479 	}
1480 
1481 	if (!tee_pager_unhide_page(area, area_va2idx(area, page_va))) {
1482 		struct tee_pager_pmem *pmem = NULL;
1483 		uint32_t attr = 0;
1484 		paddr_t pa = 0;
1485 		size_t tblidx = 0;
1486 
1487 		/*
1488 		 * The page wasn't hidden, but some other core may have
1489 		 * updated the table entry before we got here or we need
1490 		 * to make a read-only page read-write (dirty).
1491 		 */
1492 		if (pager_update_permissions(area, ai, &ret)) {
1493 			/*
1494 			 * Nothing more to do with the abort. The problem
1495 			 * could already have been dealt with from another
1496 			 * core or if ret is false the TA will be paniced.
1497 			 */
1498 			goto out;
1499 		}
1500 
1501 		pmem = tee_pager_get_page(area->type);
1502 		if (!pmem) {
1503 			abort_print(ai);
1504 			panic();
1505 		}
1506 
1507 		/* load page code & data */
1508 		tee_pager_load_page(area, page_va, pmem->va_alias);
1509 
1510 
1511 		pmem->fobj = area->fobj;
1512 		pmem->fobj_pgidx = area_va2idx(area, page_va) +
1513 				   area->fobj_pgoffs -
1514 				   ((area->base & CORE_MMU_PGDIR_MASK) >>
1515 					SMALL_PAGE_SHIFT);
1516 		tblidx = pmem_get_area_tblidx(pmem, area);
1517 		attr = get_area_mattr(area->flags);
1518 		/*
1519 		 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1520 		 * able to tell when they are updated and should be tagged
1521 		 * as dirty.
1522 		 */
1523 		if (area->type == PAGER_AREA_TYPE_RW)
1524 			attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1525 		pa = get_pmem_pa(pmem);
1526 
1527 		/*
1528 		 * We've updated the page using the aliased mapping and
1529 		 * some cache maintenence is now needed if it's an
1530 		 * executable page.
1531 		 *
1532 		 * Since the d-cache is a Physically-indexed,
1533 		 * physically-tagged (PIPT) cache we can clean either the
1534 		 * aliased address or the real virtual address. In this
1535 		 * case we choose the real virtual address.
1536 		 *
1537 		 * The i-cache can also be PIPT, but may be something else
1538 		 * too like VIPT. The current code requires the caches to
1539 		 * implement the IVIPT extension, that is:
1540 		 * "instruction cache maintenance is required only after
1541 		 * writing new data to a physical address that holds an
1542 		 * instruction."
1543 		 *
1544 		 * To portably invalidate the icache the page has to
1545 		 * be mapped at the final virtual address but not
1546 		 * executable.
1547 		 */
1548 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1549 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1550 					TEE_MATTR_PW | TEE_MATTR_UW;
1551 			void *va = (void *)page_va;
1552 
1553 			/* Set a temporary read-only mapping */
1554 			area_set_entry(area, tblidx, pa, attr & ~mask);
1555 			area_tlbi_entry(area, tblidx);
1556 
1557 			dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1558 			if (clean_user_cache)
1559 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1560 			else
1561 				icache_inv_range(va, SMALL_PAGE_SIZE);
1562 
1563 			/* Set the final mapping */
1564 			area_set_entry(area, tblidx, pa, attr);
1565 			area_tlbi_entry(area, tblidx);
1566 		} else {
1567 			area_set_entry(area, tblidx, pa, attr);
1568 			/*
1569 			 * No need to flush TLB for this entry, it was
1570 			 * invalid. We should use a barrier though, to make
1571 			 * sure that the change is visible.
1572 			 */
1573 			dsb_ishst();
1574 		}
1575 		pgt_inc_used_entries(area->pgt);
1576 
1577 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1578 
1579 	}
1580 
1581 	tee_pager_hide_pages();
1582 	ret = true;
1583 out:
1584 	pager_unlock(exceptions);
1585 	return ret;
1586 }
1587 
1588 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1589 {
1590 	size_t n;
1591 
1592 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1593 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1594 
1595 	/* setup memory */
1596 	for (n = 0; n < npages; n++) {
1597 		struct core_mmu_table_info *ti;
1598 		struct tee_pager_pmem *pmem;
1599 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1600 		unsigned int pgidx;
1601 		paddr_t pa;
1602 		uint32_t attr;
1603 
1604 		ti = find_table_info(va);
1605 		pgidx = core_mmu_va2idx(ti, va);
1606 		/*
1607 		 * Note that we can only support adding pages in the
1608 		 * valid range of this table info, currently not a problem.
1609 		 */
1610 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1611 
1612 		/* Ignore unmapped pages/blocks */
1613 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1614 			continue;
1615 
1616 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1617 		if (!pmem)
1618 			panic("out of mem");
1619 
1620 		pmem->va_alias = pager_add_alias_page(pa);
1621 
1622 		if (unmap) {
1623 			pmem->fobj = NULL;
1624 			pmem->fobj_pgidx = INVALID_PGIDX;
1625 			core_mmu_set_entry(ti, pgidx, 0, 0);
1626 			pgt_dec_used_entries(find_core_pgt(va));
1627 		} else {
1628 			struct tee_pager_area *area = NULL;
1629 
1630 			/*
1631 			 * The page is still mapped, let's assign the area
1632 			 * and update the protection bits accordingly.
1633 			 */
1634 			area = find_area(&tee_pager_area_head, va);
1635 			assert(area && area->pgt == find_core_pgt(va));
1636 			pmem->fobj = area->fobj;
1637 			pmem->fobj_pgidx = pgidx + area->fobj_pgoffs -
1638 					   ((area->base &
1639 							CORE_MMU_PGDIR_MASK) >>
1640 						SMALL_PAGE_SHIFT);
1641 			assert(pgidx == pmem_get_area_tblidx(pmem, area));
1642 			assert(pa == get_pmem_pa(pmem));
1643 			area_set_entry(area, pgidx, pa,
1644 				       get_area_mattr(area->flags));
1645 		}
1646 
1647 		tee_pager_npages++;
1648 		incr_npages_all();
1649 		set_npages();
1650 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1651 	}
1652 
1653 	/*
1654 	 * As this is done at inits, invalidate all TLBs once instead of
1655 	 * targeting only the modified entries.
1656 	 */
1657 	tlbi_all();
1658 }
1659 
1660 #ifdef CFG_PAGED_USER_TA
1661 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1662 {
1663 	struct pgt *p = pgt;
1664 
1665 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1666 		p = SLIST_NEXT(p, link);
1667 	return p;
1668 }
1669 
1670 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1671 {
1672 	struct tee_pager_area *area = NULL;
1673 	struct pgt *pgt = NULL;
1674 
1675 	if (!uctx->areas)
1676 		return;
1677 
1678 	pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1679 	TAILQ_FOREACH(area, uctx->areas, link) {
1680 		if (!area->pgt)
1681 			area->pgt = find_pgt(pgt, area->base);
1682 		else
1683 			assert(area->pgt == find_pgt(pgt, area->base));
1684 		if (!area->pgt)
1685 			panic();
1686 	}
1687 }
1688 
1689 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1690 {
1691 	struct tee_pager_pmem *pmem = NULL;
1692 	struct tee_pager_area *area = NULL;
1693 	struct tee_pager_area_head *areas = NULL;
1694 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1695 
1696 	if (!pgt->num_used_entries)
1697 		goto out;
1698 
1699 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1700 		if (pmem->fobj)
1701 			pmem_unmap(pmem, pgt);
1702 	}
1703 	assert(!pgt->num_used_entries);
1704 
1705 out:
1706 	areas = to_user_ta_ctx(pgt->ctx)->uctx.areas;
1707 	if (areas) {
1708 		TAILQ_FOREACH(area, areas, link) {
1709 			if (area->pgt == pgt)
1710 				area->pgt = NULL;
1711 		}
1712 	}
1713 
1714 	pager_unlock(exceptions);
1715 }
1716 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1717 #endif /*CFG_PAGED_USER_TA*/
1718 
1719 void tee_pager_release_phys(void *addr, size_t size)
1720 {
1721 	bool unmaped = false;
1722 	vaddr_t va = (vaddr_t)addr;
1723 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1724 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1725 	struct tee_pager_area *area;
1726 	uint32_t exceptions;
1727 
1728 	if (end <= begin)
1729 		return;
1730 
1731 	exceptions = pager_lock_check_stack(128);
1732 
1733 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1734 		area = find_area(&tee_pager_area_head, va);
1735 		if (!area)
1736 			panic();
1737 		unmaped |= tee_pager_release_one_phys(area, va);
1738 	}
1739 
1740 	if (unmaped)
1741 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1742 
1743 	pager_unlock(exceptions);
1744 }
1745 KEEP_PAGER(tee_pager_release_phys);
1746 
1747 void *tee_pager_alloc(size_t size)
1748 {
1749 	tee_mm_entry_t *mm = NULL;
1750 	uint8_t *smem = NULL;
1751 	size_t num_pages = 0;
1752 	struct fobj *fobj = NULL;
1753 
1754 	if (!size)
1755 		return NULL;
1756 
1757 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1758 	if (!mm)
1759 		return NULL;
1760 
1761 	smem = (uint8_t *)tee_mm_get_smem(mm);
1762 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1763 	fobj = fobj_locked_paged_alloc(num_pages);
1764 	if (!fobj) {
1765 		tee_mm_free(mm);
1766 		return NULL;
1767 	}
1768 
1769 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1770 	fobj_put(fobj);
1771 
1772 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1773 
1774 	return smem;
1775 }
1776