xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 817466cb476de705a8e3dabe1ef165fe27a18c2f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <crypto/crypto.h>
10 #include <crypto/internal_aes-gcm.h>
11 #include <io.h>
12 #include <keep.h>
13 #include <kernel/abort.h>
14 #include <kernel/asan.h>
15 #include <kernel/panic.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/tlb_helpers.h>
21 #include <mm/core_memprot.h>
22 #include <mm/tee_mm.h>
23 #include <mm/tee_pager.h>
24 #include <stdlib.h>
25 #include <sys/queue.h>
26 #include <tee_api_defines.h>
27 #include <trace.h>
28 #include <types_ext.h>
29 #include <utee_defines.h>
30 #include <util.h>
31 
32 #define PAGER_AE_KEY_BITS	256
33 
34 struct pager_aes_gcm_iv {
35 	uint32_t iv[3];
36 };
37 
38 #define PAGER_AES_GCM_TAG_LEN	16
39 
40 struct pager_rw_pstate {
41 	uint64_t iv;
42 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
43 };
44 
45 enum area_type {
46 	AREA_TYPE_RO,
47 	AREA_TYPE_RW,
48 	AREA_TYPE_LOCK,
49 };
50 
51 struct tee_pager_area {
52 	union {
53 		const uint8_t *hashes;
54 		struct pager_rw_pstate *rwp;
55 	} u;
56 	uint8_t *store;
57 	enum area_type type;
58 	uint32_t flags;
59 	vaddr_t base;
60 	size_t size;
61 	struct pgt *pgt;
62 	TAILQ_ENTRY(tee_pager_area) link;
63 };
64 
65 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
66 
67 static struct tee_pager_area_head tee_pager_area_head =
68 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
69 
70 #define INVALID_PGIDX	UINT_MAX
71 
72 /*
73  * struct tee_pager_pmem - Represents a physical page used for paging.
74  *
75  * @pgidx	an index of the entry in area->ti.
76  * @va_alias	Virtual address where the physical page always is aliased.
77  *		Used during remapping of the page when the content need to
78  *		be updated before it's available at the new location.
79  * @area	a pointer to the pager area
80  */
81 struct tee_pager_pmem {
82 	unsigned pgidx;
83 	void *va_alias;
84 	struct tee_pager_area *area;
85 	TAILQ_ENTRY(tee_pager_pmem) link;
86 };
87 
88 /* The list of physical pages. The first page in the list is the oldest */
89 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
90 
91 static struct tee_pager_pmem_head tee_pager_pmem_head =
92 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
93 
94 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
95 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
96 
97 static struct internal_aes_gcm_key pager_ae_key;
98 
99 /* number of pages hidden */
100 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
101 
102 /* Number of registered physical pages, used hiding pages. */
103 static size_t tee_pager_npages;
104 
105 #ifdef CFG_WITH_STATS
106 static struct tee_pager_stats pager_stats;
107 
108 static inline void incr_ro_hits(void)
109 {
110 	pager_stats.ro_hits++;
111 }
112 
113 static inline void incr_rw_hits(void)
114 {
115 	pager_stats.rw_hits++;
116 }
117 
118 static inline void incr_hidden_hits(void)
119 {
120 	pager_stats.hidden_hits++;
121 }
122 
123 static inline void incr_zi_released(void)
124 {
125 	pager_stats.zi_released++;
126 }
127 
128 static inline void incr_npages_all(void)
129 {
130 	pager_stats.npages_all++;
131 }
132 
133 static inline void set_npages(void)
134 {
135 	pager_stats.npages = tee_pager_npages;
136 }
137 
138 void tee_pager_get_stats(struct tee_pager_stats *stats)
139 {
140 	*stats = pager_stats;
141 
142 	pager_stats.hidden_hits = 0;
143 	pager_stats.ro_hits = 0;
144 	pager_stats.rw_hits = 0;
145 	pager_stats.zi_released = 0;
146 }
147 
148 #else /* CFG_WITH_STATS */
149 static inline void incr_ro_hits(void) { }
150 static inline void incr_rw_hits(void) { }
151 static inline void incr_hidden_hits(void) { }
152 static inline void incr_zi_released(void) { }
153 static inline void incr_npages_all(void) { }
154 static inline void set_npages(void) { }
155 
156 void tee_pager_get_stats(struct tee_pager_stats *stats)
157 {
158 	memset(stats, 0, sizeof(struct tee_pager_stats));
159 }
160 #endif /* CFG_WITH_STATS */
161 
162 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
163 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
164 #define TBL_SHIFT	SMALL_PAGE_SHIFT
165 
166 #define EFFECTIVE_VA_SIZE \
167 	(ROUNDUP(TEE_RAM_VA_START + TEE_RAM_VA_SIZE, \
168 		 CORE_MMU_PGDIR_SIZE) - \
169 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
170 
171 static struct pager_table {
172 	struct pgt pgt;
173 	struct core_mmu_table_info tbl_info;
174 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
175 
176 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
177 
178 /* Defines the range of the alias area */
179 static tee_mm_entry_t *pager_alias_area;
180 /*
181  * Physical pages are added in a stack like fashion to the alias area,
182  * @pager_alias_next_free gives the address of next free entry if
183  * @pager_alias_next_free is != 0
184  */
185 static uintptr_t pager_alias_next_free;
186 
187 #ifdef CFG_TEE_CORE_DEBUG
188 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
189 
190 static uint32_t pager_lock_dldetect(const char *func, const int line,
191 				    struct abort_info *ai)
192 {
193 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
194 	unsigned int retries = 0;
195 	unsigned int reminder = 0;
196 
197 	while (!cpu_spin_trylock(&pager_spinlock)) {
198 		retries++;
199 		if (!retries) {
200 			/* wrapped, time to report */
201 			trace_printf(func, line, TRACE_ERROR, true,
202 				     "possible spinlock deadlock reminder %u",
203 				     reminder);
204 			if (reminder < UINT_MAX)
205 				reminder++;
206 			if (ai)
207 				abort_print(ai);
208 		}
209 	}
210 
211 	return exceptions;
212 }
213 #else
214 static uint32_t pager_lock(struct abort_info __unused *ai)
215 {
216 	return cpu_spin_lock_xsave(&pager_spinlock);
217 }
218 #endif
219 
220 static uint32_t pager_lock_check_stack(size_t stack_size)
221 {
222 	if (stack_size) {
223 		int8_t buf[stack_size];
224 		size_t n;
225 
226 		/*
227 		 * Make sure to touch all pages of the stack that we expect
228 		 * to use with this lock held. We need to take eventual
229 		 * page faults before the lock is taken or we'll deadlock
230 		 * the pager. The pages that are populated in this way will
231 		 * eventually be released at certain save transitions of
232 		 * the thread.
233 		 */
234 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
235 			write8(1, (vaddr_t)buf + n);
236 		write8(1, (vaddr_t)buf + stack_size - 1);
237 	}
238 
239 	return pager_lock(NULL);
240 }
241 
242 static void pager_unlock(uint32_t exceptions)
243 {
244 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
245 }
246 
247 void *tee_pager_phys_to_virt(paddr_t pa)
248 {
249 	struct core_mmu_table_info ti;
250 	unsigned idx;
251 	uint32_t a;
252 	paddr_t p;
253 	vaddr_t v;
254 	size_t n;
255 
256 	/*
257 	 * Most addresses are mapped lineary, try that first if possible.
258 	 */
259 	if (!tee_pager_get_table_info(pa, &ti))
260 		return NULL; /* impossible pa */
261 	idx = core_mmu_va2idx(&ti, pa);
262 	core_mmu_get_entry(&ti, idx, &p, &a);
263 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
264 		return (void *)core_mmu_idx2va(&ti, idx);
265 
266 	n = 0;
267 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
268 	while (true) {
269 		while (idx < TBL_NUM_ENTRIES) {
270 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
271 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
272 				return NULL;
273 
274 			core_mmu_get_entry(&pager_tables[n].tbl_info,
275 					   idx, &p, &a);
276 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
277 				return (void *)v;
278 			idx++;
279 		}
280 
281 		n++;
282 		if (n >= ARRAY_SIZE(pager_tables))
283 			return NULL;
284 		idx = 0;
285 	}
286 
287 	return NULL;
288 }
289 
290 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
291 {
292 	size_t n;
293 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
294 
295 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
296 	    CORE_MMU_PGDIR_SHIFT;
297 	if (n >= ARRAY_SIZE(pager_tables))
298 		return NULL;
299 
300 	assert(va >= pager_tables[n].tbl_info.va_base &&
301 	       va <= (pager_tables[n].tbl_info.va_base | mask));
302 
303 	return pager_tables + n;
304 }
305 
306 static struct pager_table *find_pager_table(vaddr_t va)
307 {
308 	struct pager_table *pt = find_pager_table_may_fail(va);
309 
310 	assert(pt);
311 	return pt;
312 }
313 
314 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
315 {
316 	struct pager_table *pt = find_pager_table_may_fail(va);
317 
318 	if (!pt)
319 		return false;
320 
321 	*ti = pt->tbl_info;
322 	return true;
323 }
324 
325 static struct core_mmu_table_info *find_table_info(vaddr_t va)
326 {
327 	return &find_pager_table(va)->tbl_info;
328 }
329 
330 static struct pgt *find_core_pgt(vaddr_t va)
331 {
332 	return &find_pager_table(va)->pgt;
333 }
334 
335 static void set_alias_area(tee_mm_entry_t *mm)
336 {
337 	struct pager_table *pt;
338 	unsigned idx;
339 	vaddr_t smem = tee_mm_get_smem(mm);
340 	size_t nbytes = tee_mm_get_bytes(mm);
341 	vaddr_t v;
342 
343 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
344 
345 	assert(!pager_alias_area);
346 	pager_alias_area = mm;
347 	pager_alias_next_free = smem;
348 
349 	/* Clear all mapping in the alias area */
350 	pt = find_pager_table(smem);
351 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
352 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
353 		while (idx < TBL_NUM_ENTRIES) {
354 			v = core_mmu_idx2va(&pt->tbl_info, idx);
355 			if (v >= (smem + nbytes))
356 				goto out;
357 
358 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
359 			idx++;
360 		}
361 
362 		pt++;
363 		idx = 0;
364 	}
365 
366 out:
367 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
368 }
369 
370 static void generate_ae_key(void)
371 {
372 	uint8_t key[PAGER_AE_KEY_BITS / 8];
373 
374 	if (rng_generate(key, sizeof(key)) != TEE_SUCCESS)
375 		panic("failed to generate random");
376 	if (internal_aes_gcm_expand_enc_key(key, sizeof(key),
377 					    &pager_ae_key))
378 		panic("failed to expand key");
379 }
380 
381 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
382 {
383 	size_t n;
384 	paddr_t pa;
385 	size_t usage = 0;
386 
387 	for (n = 0; n < ti->num_entries; n++) {
388 		core_mmu_get_entry(ti, n, &pa, NULL);
389 		if (pa)
390 			usage++;
391 	}
392 	return usage;
393 }
394 
395 static void area_get_entry(struct tee_pager_area *area, size_t idx,
396 			   paddr_t *pa, uint32_t *attr)
397 {
398 	assert(area->pgt);
399 	assert(idx < TBL_NUM_ENTRIES);
400 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
401 }
402 
403 static void area_set_entry(struct tee_pager_area *area, size_t idx,
404 			   paddr_t pa, uint32_t attr)
405 {
406 	assert(area->pgt);
407 	assert(idx < TBL_NUM_ENTRIES);
408 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
409 }
410 
411 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
412 {
413 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
414 }
415 
416 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
417 {
418 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
419 }
420 
421 void tee_pager_early_init(void)
422 {
423 	size_t n;
424 
425 	/*
426 	 * Note that this depends on add_pager_vaspace() adding vaspace
427 	 * after end of memory.
428 	 */
429 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
430 		if (!core_mmu_find_table(TEE_RAM_VA_START +
431 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
432 					 &pager_tables[n].tbl_info))
433 			panic("can't find mmu tables");
434 
435 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
436 			panic("Unsupported page size in translation table");
437 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
438 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
439 
440 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
441 		pgt_set_used_entries(&pager_tables[n].pgt,
442 				tbl_usage_count(&pager_tables[n].tbl_info));
443 	}
444 }
445 
446 void tee_pager_init(tee_mm_entry_t *mm_alias)
447 {
448 	set_alias_area(mm_alias);
449 	generate_ae_key();
450 }
451 
452 static void *pager_add_alias_page(paddr_t pa)
453 {
454 	unsigned idx;
455 	struct core_mmu_table_info *ti;
456 	/* Alias pages mapped without write permission: runtime will care */
457 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
458 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
459 			TEE_MATTR_SECURE | TEE_MATTR_PR;
460 
461 	DMSG("0x%" PRIxPA, pa);
462 
463 	ti = find_table_info(pager_alias_next_free);
464 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
465 	core_mmu_set_entry(ti, idx, pa, attr);
466 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
467 	pager_alias_next_free += SMALL_PAGE_SIZE;
468 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
469 				      tee_mm_get_bytes(pager_alias_area)))
470 		pager_alias_next_free = 0;
471 	return (void *)core_mmu_idx2va(ti, idx);
472 }
473 
474 static struct tee_pager_area *alloc_area(struct pgt *pgt,
475 					 vaddr_t base, size_t size,
476 					 uint32_t flags, const void *store,
477 					 const void *hashes)
478 {
479 	struct tee_pager_area *area = calloc(1, sizeof(*area));
480 	enum area_type at;
481 	tee_mm_entry_t *mm_store = NULL;
482 
483 	if (!area)
484 		return NULL;
485 
486 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
487 		if (flags & TEE_MATTR_LOCKED) {
488 			at = AREA_TYPE_LOCK;
489 			goto out;
490 		}
491 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
492 		if (!mm_store)
493 			goto bad;
494 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
495 					   MEM_AREA_TA_RAM);
496 		if (!area->store)
497 			goto bad;
498 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
499 				     sizeof(struct pager_rw_pstate));
500 		if (!area->u.rwp)
501 			goto bad;
502 		at = AREA_TYPE_RW;
503 	} else {
504 		area->store = (void *)store;
505 		area->u.hashes = hashes;
506 		at = AREA_TYPE_RO;
507 	}
508 out:
509 	area->pgt = pgt;
510 	area->base = base;
511 	area->size = size;
512 	area->flags = flags;
513 	area->type = at;
514 	return area;
515 bad:
516 	tee_mm_free(mm_store);
517 	free(area->u.rwp);
518 	free(area);
519 	return NULL;
520 }
521 
522 static void area_insert_tail(struct tee_pager_area *area)
523 {
524 	uint32_t exceptions = pager_lock_check_stack(8);
525 
526 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
527 
528 	pager_unlock(exceptions);
529 }
530 KEEP_PAGER(area_insert_tail);
531 
532 void tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
533 			     const void *store, const void *hashes)
534 {
535 	struct tee_pager_area *area;
536 	vaddr_t b = base;
537 	size_t s = size;
538 	size_t s2;
539 
540 
541 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
542 		base, base + size, flags, store, hashes);
543 
544 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
545 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
546 		panic();
547 	}
548 
549 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
550 		panic("write pages cannot provide store or hashes");
551 
552 	if ((flags & TEE_MATTR_PW) && (store || hashes))
553 		panic("non-write pages must provide store and hashes");
554 
555 	while (s) {
556 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
557 		area = alloc_area(find_core_pgt(b), b, s2, flags,
558 				  (const uint8_t *)store + b - base,
559 				  (const uint8_t *)hashes + (b - base) /
560 							SMALL_PAGE_SIZE *
561 							TEE_SHA256_HASH_SIZE);
562 		if (!area)
563 			panic("alloc_area");
564 		area_insert_tail(area);
565 		b += s2;
566 		s -= s2;
567 	}
568 }
569 
570 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
571 					vaddr_t va)
572 {
573 	struct tee_pager_area *area;
574 
575 	if (!areas)
576 		return NULL;
577 
578 	TAILQ_FOREACH(area, areas, link) {
579 		if (core_is_buffer_inside(va, 1, area->base, area->size))
580 			return area;
581 	}
582 	return NULL;
583 }
584 
585 #ifdef CFG_PAGED_USER_TA
586 static struct tee_pager_area *find_uta_area(vaddr_t va)
587 {
588 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
589 
590 	if (!ctx || !is_user_ta_ctx(ctx))
591 		return NULL;
592 	return find_area(to_user_ta_ctx(ctx)->areas, va);
593 }
594 #else
595 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
596 {
597 	return NULL;
598 }
599 #endif /*CFG_PAGED_USER_TA*/
600 
601 
602 static uint32_t get_area_mattr(uint32_t area_flags)
603 {
604 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
605 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
606 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
607 
608 	return attr;
609 }
610 
611 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
612 {
613 	struct core_mmu_table_info *ti;
614 	paddr_t pa;
615 	unsigned idx;
616 
617 	ti = find_table_info((vaddr_t)pmem->va_alias);
618 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
619 	core_mmu_get_entry(ti, idx, &pa, NULL);
620 	return pa;
621 }
622 
623 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
624 			void *dst)
625 {
626 	struct pager_aes_gcm_iv iv = {
627 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
628 	};
629 	size_t tag_len = sizeof(rwp->tag);
630 
631 	return !internal_aes_gcm_dec(&pager_ae_key, &iv, sizeof(iv),
632 				     NULL, 0, src, SMALL_PAGE_SIZE, dst,
633 				     rwp->tag, tag_len);
634 }
635 
636 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
637 {
638 	struct pager_aes_gcm_iv iv;
639 	size_t tag_len = sizeof(rwp->tag);
640 
641 	assert((rwp->iv + 1) > rwp->iv);
642 	rwp->iv++;
643 	/*
644 	 * IV is constructed as recommended in section "8.2.1 Deterministic
645 	 * Construction" of "Recommendation for Block Cipher Modes of
646 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
647 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
648 	 */
649 	iv.iv[0] = (vaddr_t)rwp;
650 	iv.iv[1] = rwp->iv >> 32;
651 	iv.iv[2] = rwp->iv;
652 
653 	if (internal_aes_gcm_enc(&pager_ae_key, &iv, sizeof(iv), NULL, 0,
654 				 src, SMALL_PAGE_SIZE, dst, rwp->tag, &tag_len))
655 		panic("gcm failed");
656 }
657 
658 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
659 			void *va_alias)
660 {
661 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
662 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
663 	struct core_mmu_table_info *ti;
664 	uint32_t attr_alias;
665 	paddr_t pa_alias;
666 	unsigned int idx_alias;
667 
668 	/* Insure we are allowed to write to aliased virtual page */
669 	ti = find_table_info((vaddr_t)va_alias);
670 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
671 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
672 	if (!(attr_alias & TEE_MATTR_PW)) {
673 		attr_alias |= TEE_MATTR_PW;
674 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
675 		tlbi_mva_allasid((vaddr_t)va_alias);
676 	}
677 
678 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
679 	switch (area->type) {
680 	case AREA_TYPE_RO:
681 		{
682 			const void *hash = area->u.hashes +
683 					   idx * TEE_SHA256_HASH_SIZE;
684 
685 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
686 			incr_ro_hits();
687 
688 			if (hash_sha256_check(hash, va_alias,
689 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
690 				EMSG("PH 0x%" PRIxVA " failed", page_va);
691 				panic();
692 			}
693 		}
694 		/* Forbid write to aliases for read-only (maybe exec) pages */
695 		attr_alias &= ~TEE_MATTR_PW;
696 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
697 		tlbi_mva_allasid((vaddr_t)va_alias);
698 		break;
699 	case AREA_TYPE_RW:
700 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
701 			va_alias, page_va, area->u.rwp[idx].iv);
702 		if (!area->u.rwp[idx].iv)
703 			memset(va_alias, 0, SMALL_PAGE_SIZE);
704 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
705 				       va_alias)) {
706 			EMSG("PH 0x%" PRIxVA " failed", page_va);
707 			panic();
708 		}
709 		incr_rw_hits();
710 		break;
711 	case AREA_TYPE_LOCK:
712 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
713 		memset(va_alias, 0, SMALL_PAGE_SIZE);
714 		break;
715 	default:
716 		panic();
717 	}
718 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
719 }
720 
721 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
722 {
723 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
724 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
725 
726 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
727 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
728 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
729 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
730 
731 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
732 		asan_tag_access(pmem->va_alias,
733 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
734 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
735 			     stored_page);
736 		asan_tag_no_access(pmem->va_alias,
737 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
738 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
739 			pmem->area->base + idx * SMALL_PAGE_SIZE,
740 			pmem->area->u.rwp[idx].iv);
741 	}
742 }
743 
744 #ifdef CFG_PAGED_USER_TA
745 static void free_area(struct tee_pager_area *area)
746 {
747 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
748 				virt_to_phys(area->store)));
749 	if (area->type == AREA_TYPE_RW)
750 		free(area->u.rwp);
751 	free(area);
752 }
753 
754 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
755 			       size_t size)
756 {
757 	struct tee_pager_area *area;
758 	uint32_t flags;
759 	vaddr_t b = base;
760 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
761 
762 	if (!utc->areas) {
763 		utc->areas = malloc(sizeof(*utc->areas));
764 		if (!utc->areas)
765 			return false;
766 		TAILQ_INIT(utc->areas);
767 	}
768 
769 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
770 
771 	while (s) {
772 		size_t s2;
773 
774 		if (find_area(utc->areas, b))
775 			return false;
776 
777 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
778 
779 		/* Table info will be set when the context is activated. */
780 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
781 		if (!area)
782 			return false;
783 		TAILQ_INSERT_TAIL(utc->areas, area, link);
784 		b += s2;
785 		s -= s2;
786 	}
787 
788 	return true;
789 }
790 
791 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
792 {
793 	struct thread_specific_data *tsd = thread_get_tsd();
794 	struct tee_pager_area *area;
795 	struct core_mmu_table_info dir_info = { NULL };
796 
797 	if (&utc->ctx != tsd->ctx) {
798 		/*
799 		 * Changes are to an utc that isn't active. Just add the
800 		 * areas page tables will be dealt with later.
801 		 */
802 		return pager_add_uta_area(utc, base, size);
803 	}
804 
805 	/*
806 	 * Assign page tables before adding areas to be able to tell which
807 	 * are newly added and should be removed in case of failure.
808 	 */
809 	tee_pager_assign_uta_tables(utc);
810 	if (!pager_add_uta_area(utc, base, size)) {
811 		struct tee_pager_area *next_a;
812 
813 		/* Remove all added areas */
814 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
815 			if (!area->pgt) {
816 				TAILQ_REMOVE(utc->areas, area, link);
817 				free_area(area);
818 			}
819 		}
820 		return false;
821 	}
822 
823 	/*
824 	 * Assign page tables to the new areas and make sure that the page
825 	 * tables are registered in the upper table.
826 	 */
827 	tee_pager_assign_uta_tables(utc);
828 	core_mmu_get_user_pgdir(&dir_info);
829 	TAILQ_FOREACH(area, utc->areas, link) {
830 		paddr_t pa;
831 		size_t idx;
832 		uint32_t attr;
833 
834 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
835 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
836 
837 		/*
838 		 * Check if the page table already is used, if it is, it's
839 		 * already registered.
840 		 */
841 		if (area->pgt->num_used_entries) {
842 			assert(attr & TEE_MATTR_TABLE);
843 			assert(pa == virt_to_phys(area->pgt->tbl));
844 			continue;
845 		}
846 
847 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
848 		pa = virt_to_phys(area->pgt->tbl);
849 		assert(pa);
850 		/*
851 		 * Note that the update of the table entry is guaranteed to
852 		 * be atomic.
853 		 */
854 		core_mmu_set_entry(&dir_info, idx, pa, attr);
855 	}
856 
857 	return true;
858 }
859 
860 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
861 				   struct pgt *pgt)
862 {
863 	assert(pgt);
864 	ti->table = pgt->tbl;
865 	ti->va_base = pgt->vabase;
866 	ti->level = TBL_LEVEL;
867 	ti->shift = TBL_SHIFT;
868 	ti->num_entries = TBL_NUM_ENTRIES;
869 }
870 
871 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
872 			   vaddr_t new_base)
873 {
874 	uint32_t exceptions = pager_lock_check_stack(64);
875 
876 	/*
877 	 * If there's no pgt assigned to the old area there's no pages to
878 	 * deal with either, just update with a new pgt and base.
879 	 */
880 	if (area->pgt) {
881 		struct core_mmu_table_info old_ti;
882 		struct core_mmu_table_info new_ti;
883 		struct tee_pager_pmem *pmem;
884 
885 		init_tbl_info_from_pgt(&old_ti, area->pgt);
886 		init_tbl_info_from_pgt(&new_ti, new_pgt);
887 
888 
889 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
890 			vaddr_t va;
891 			paddr_t pa;
892 			uint32_t attr;
893 
894 			if (pmem->area != area)
895 				continue;
896 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
897 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
898 
899 			assert(pa == get_pmem_pa(pmem));
900 			assert(attr);
901 			assert(area->pgt->num_used_entries);
902 			area->pgt->num_used_entries--;
903 
904 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
905 			va = va - area->base + new_base;
906 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
907 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
908 			new_pgt->num_used_entries++;
909 		}
910 	}
911 
912 	area->pgt = new_pgt;
913 	area->base = new_base;
914 	pager_unlock(exceptions);
915 }
916 KEEP_PAGER(transpose_area);
917 
918 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
919 				   vaddr_t src_base,
920 				   struct user_ta_ctx *dst_utc,
921 				   vaddr_t dst_base, struct pgt **dst_pgt,
922 				   size_t size)
923 {
924 	struct tee_pager_area *area;
925 	struct tee_pager_area *next_a;
926 
927 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
928 		vaddr_t new_area_base;
929 		size_t new_idx;
930 
931 		if (!core_is_buffer_inside(area->base, area->size,
932 					  src_base, size))
933 			continue;
934 
935 		TAILQ_REMOVE(src_utc->areas, area, link);
936 
937 		new_area_base = dst_base + (src_base - area->base);
938 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
939 			  CORE_MMU_PGDIR_SIZE;
940 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
941 		       dst_pgt[new_idx]->vabase);
942 		transpose_area(area, dst_pgt[new_idx], new_area_base);
943 
944 		/*
945 		 * Assert that this will not cause any conflicts in the new
946 		 * utc.  This should already be guaranteed, but a bug here
947 		 * could be tricky to find.
948 		 */
949 		assert(!find_area(dst_utc->areas, area->base));
950 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
951 	}
952 }
953 
954 static void rem_area(struct tee_pager_area_head *area_head,
955 		     struct tee_pager_area *area)
956 {
957 	struct tee_pager_pmem *pmem;
958 	uint32_t exceptions;
959 
960 	exceptions = pager_lock_check_stack(64);
961 
962 	TAILQ_REMOVE(area_head, area, link);
963 
964 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
965 		if (pmem->area == area) {
966 			area_set_entry(area, pmem->pgidx, 0, 0);
967 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
968 			pgt_dec_used_entries(area->pgt);
969 			pmem->area = NULL;
970 			pmem->pgidx = INVALID_PGIDX;
971 		}
972 	}
973 
974 	pager_unlock(exceptions);
975 	free_area(area);
976 }
977 KEEP_PAGER(rem_area);
978 
979 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
980 			      size_t size)
981 {
982 	struct tee_pager_area *area;
983 	struct tee_pager_area *next_a;
984 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
985 
986 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
987 		if (core_is_buffer_inside(area->base, area->size, base, s))
988 			rem_area(utc->areas, area);
989 	}
990 }
991 
992 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
993 {
994 	struct tee_pager_area *area;
995 
996 	if (!utc->areas)
997 		return;
998 
999 	while (true) {
1000 		area = TAILQ_FIRST(utc->areas);
1001 		if (!area)
1002 			break;
1003 		TAILQ_REMOVE(utc->areas, area, link);
1004 		free_area(area);
1005 	}
1006 
1007 	free(utc->areas);
1008 }
1009 
1010 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
1011 				 size_t size, uint32_t flags)
1012 {
1013 	bool ret;
1014 	vaddr_t b = base;
1015 	size_t s = size;
1016 	size_t s2;
1017 	struct tee_pager_area *area = find_area(utc->areas, b);
1018 	uint32_t exceptions;
1019 	struct tee_pager_pmem *pmem;
1020 	paddr_t pa;
1021 	uint32_t a;
1022 	uint32_t f;
1023 
1024 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1025 	if (f & TEE_MATTR_UW)
1026 		f |= TEE_MATTR_PW;
1027 	f = get_area_mattr(f);
1028 
1029 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1030 
1031 	while (s) {
1032 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1033 		if (!area || area->base != b || area->size != s2) {
1034 			ret = false;
1035 			goto out;
1036 		}
1037 		b += s2;
1038 		s -= s2;
1039 
1040 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1041 			if (pmem->area != area)
1042 				continue;
1043 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
1044 			if (a & TEE_MATTR_VALID_BLOCK)
1045 				assert(pa == get_pmem_pa(pmem));
1046 			else
1047 				pa = get_pmem_pa(pmem);
1048 			if (a == f)
1049 				continue;
1050 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1051 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1052 			if (!(flags & TEE_MATTR_UW))
1053 				tee_pager_save_page(pmem, a);
1054 
1055 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
1056 			/*
1057 			 * Make sure the table update is visible before
1058 			 * continuing.
1059 			 */
1060 			dsb_ishst();
1061 
1062 			if (flags & TEE_MATTR_UX) {
1063 				void *va = (void *)area_idx2va(pmem->area,
1064 							       pmem->pgidx);
1065 
1066 				cache_op_inner(DCACHE_AREA_CLEAN, va,
1067 						SMALL_PAGE_SIZE);
1068 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
1069 						SMALL_PAGE_SIZE);
1070 			}
1071 		}
1072 
1073 		area->flags = f;
1074 		area = TAILQ_NEXT(area, link);
1075 	}
1076 
1077 	ret = true;
1078 out:
1079 	pager_unlock(exceptions);
1080 	return ret;
1081 }
1082 KEEP_PAGER(tee_pager_set_uta_area_attr);
1083 #endif /*CFG_PAGED_USER_TA*/
1084 
1085 static bool tee_pager_unhide_page(vaddr_t page_va)
1086 {
1087 	struct tee_pager_pmem *pmem;
1088 
1089 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1090 		paddr_t pa;
1091 		uint32_t attr;
1092 
1093 		if (pmem->pgidx == INVALID_PGIDX)
1094 			continue;
1095 
1096 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1097 
1098 		if (!(attr &
1099 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
1100 			continue;
1101 
1102 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
1103 			uint32_t a = get_area_mattr(pmem->area->flags);
1104 
1105 			/* page is hidden, show and move to back */
1106 			if (pa != get_pmem_pa(pmem))
1107 				panic("unexpected pa");
1108 
1109 			/*
1110 			 * If it's not a dirty block, then it should be
1111 			 * read only.
1112 			 */
1113 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
1114 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1115 			else
1116 				FMSG("Unhide %#" PRIxVA, page_va);
1117 
1118 			if (page_va == 0x8000a000)
1119 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
1120 					page_va, a);
1121 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
1122 			/*
1123 			 * Note that TLB invalidation isn't needed since
1124 			 * there wasn't a valid mapping before. We should
1125 			 * use a barrier though, to make sure that the
1126 			 * change is visible.
1127 			 */
1128 			dsb_ishst();
1129 
1130 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1131 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1132 			incr_hidden_hits();
1133 			return true;
1134 		}
1135 	}
1136 
1137 	return false;
1138 }
1139 
1140 static void tee_pager_hide_pages(void)
1141 {
1142 	struct tee_pager_pmem *pmem;
1143 	size_t n = 0;
1144 
1145 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1146 		paddr_t pa;
1147 		uint32_t attr;
1148 		uint32_t a;
1149 
1150 		if (n >= TEE_PAGER_NHIDE)
1151 			break;
1152 		n++;
1153 
1154 		/* we cannot hide pages when pmem->area is not defined. */
1155 		if (!pmem->area)
1156 			continue;
1157 
1158 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1159 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1160 			continue;
1161 
1162 		assert(pa == get_pmem_pa(pmem));
1163 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1164 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1165 			FMSG("Hide %#" PRIxVA,
1166 			     area_idx2va(pmem->area, pmem->pgidx));
1167 		} else
1168 			a = TEE_MATTR_HIDDEN_BLOCK;
1169 
1170 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1171 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1172 	}
1173 }
1174 
1175 /*
1176  * Find mapped pmem, hide and move to pageble pmem.
1177  * Return false if page was not mapped, and true if page was mapped.
1178  */
1179 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1180 				       vaddr_t page_va)
1181 {
1182 	struct tee_pager_pmem *pmem;
1183 	unsigned pgidx;
1184 	paddr_t pa;
1185 	uint32_t attr;
1186 
1187 	pgidx = area_va2idx(area, page_va);
1188 	area_get_entry(area, pgidx, &pa, &attr);
1189 
1190 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1191 
1192 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1193 		if (pmem->area != area || pmem->pgidx != pgidx)
1194 			continue;
1195 
1196 		assert(pa == get_pmem_pa(pmem));
1197 		area_set_entry(area, pgidx, 0, 0);
1198 		pgt_dec_used_entries(area->pgt);
1199 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1200 		pmem->area = NULL;
1201 		pmem->pgidx = INVALID_PGIDX;
1202 		tee_pager_npages++;
1203 		set_npages();
1204 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1205 		incr_zi_released();
1206 		return true;
1207 	}
1208 
1209 	return false;
1210 }
1211 
1212 /* Finds the oldest page and unmats it from its old virtual address */
1213 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1214 {
1215 	struct tee_pager_pmem *pmem;
1216 
1217 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1218 	if (!pmem) {
1219 		EMSG("No pmem entries");
1220 		return NULL;
1221 	}
1222 	if (pmem->pgidx != INVALID_PGIDX) {
1223 		uint32_t a;
1224 
1225 		assert(pmem->area && pmem->area->pgt);
1226 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1227 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1228 		pgt_dec_used_entries(pmem->area->pgt);
1229 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1230 		tee_pager_save_page(pmem, a);
1231 	}
1232 
1233 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1234 	pmem->pgidx = INVALID_PGIDX;
1235 	pmem->area = NULL;
1236 	if (area->type == AREA_TYPE_LOCK) {
1237 		/* Move page to lock list */
1238 		if (tee_pager_npages <= 0)
1239 			panic("running out of page");
1240 		tee_pager_npages--;
1241 		set_npages();
1242 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1243 	} else {
1244 		/* move page to back */
1245 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1246 	}
1247 
1248 	return pmem;
1249 }
1250 
1251 static bool pager_update_permissions(struct tee_pager_area *area,
1252 			struct abort_info *ai, bool *handled)
1253 {
1254 	unsigned int pgidx = area_va2idx(area, ai->va);
1255 	uint32_t attr;
1256 	paddr_t pa;
1257 
1258 	*handled = false;
1259 
1260 	area_get_entry(area, pgidx, &pa, &attr);
1261 
1262 	/* Not mapped */
1263 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1264 		return false;
1265 
1266 	/* Not readable, should not happen */
1267 	if (abort_is_user_exception(ai)) {
1268 		if (!(attr & TEE_MATTR_UR))
1269 			return true;
1270 	} else {
1271 		if (!(attr & TEE_MATTR_PR)) {
1272 			abort_print_error(ai);
1273 			panic();
1274 		}
1275 	}
1276 
1277 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1278 	case CORE_MMU_FAULT_TRANSLATION:
1279 	case CORE_MMU_FAULT_READ_PERMISSION:
1280 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1281 			/* Check attempting to execute from an NOX page */
1282 			if (abort_is_user_exception(ai)) {
1283 				if (!(attr & TEE_MATTR_UX))
1284 					return true;
1285 			} else {
1286 				if (!(attr & TEE_MATTR_PX)) {
1287 					abort_print_error(ai);
1288 					panic();
1289 				}
1290 			}
1291 		}
1292 		/* Since the page is mapped now it's OK */
1293 		break;
1294 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1295 		/* Check attempting to write to an RO page */
1296 		if (abort_is_user_exception(ai)) {
1297 			if (!(area->flags & TEE_MATTR_UW))
1298 				return true;
1299 			if (!(attr & TEE_MATTR_UW)) {
1300 				FMSG("Dirty %p",
1301 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1302 				area_set_entry(area, pgidx, pa,
1303 					       get_area_mattr(area->flags));
1304 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1305 			}
1306 
1307 		} else {
1308 			if (!(area->flags & TEE_MATTR_PW)) {
1309 				abort_print_error(ai);
1310 				panic();
1311 			}
1312 			if (!(attr & TEE_MATTR_PW)) {
1313 				FMSG("Dirty %p",
1314 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1315 				area_set_entry(area, pgidx, pa,
1316 					       get_area_mattr(area->flags));
1317 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1318 			}
1319 		}
1320 		/* Since permissions has been updated now it's OK */
1321 		break;
1322 	default:
1323 		/* Some fault we can't deal with */
1324 		if (abort_is_user_exception(ai))
1325 			return true;
1326 		abort_print_error(ai);
1327 		panic();
1328 	}
1329 	*handled = true;
1330 	return true;
1331 }
1332 
1333 #ifdef CFG_TEE_CORE_DEBUG
1334 static void stat_handle_fault(void)
1335 {
1336 	static size_t num_faults;
1337 	static size_t min_npages = SIZE_MAX;
1338 	static size_t total_min_npages = SIZE_MAX;
1339 
1340 	num_faults++;
1341 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1342 		DMSG("nfaults %zu npages %zu (min %zu)",
1343 		     num_faults, tee_pager_npages, min_npages);
1344 		min_npages = tee_pager_npages; /* reset */
1345 	}
1346 	if (tee_pager_npages < min_npages)
1347 		min_npages = tee_pager_npages;
1348 	if (tee_pager_npages < total_min_npages)
1349 		total_min_npages = tee_pager_npages;
1350 }
1351 #else
1352 static void stat_handle_fault(void)
1353 {
1354 }
1355 #endif
1356 
1357 bool tee_pager_handle_fault(struct abort_info *ai)
1358 {
1359 	struct tee_pager_area *area;
1360 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1361 	uint32_t exceptions;
1362 	bool ret;
1363 
1364 #ifdef TEE_PAGER_DEBUG_PRINT
1365 	abort_print(ai);
1366 #endif
1367 
1368 	/*
1369 	 * We're updating pages that can affect several active CPUs at a
1370 	 * time below. We end up here because a thread tries to access some
1371 	 * memory that isn't available. We have to be careful when making
1372 	 * that memory available as other threads may succeed in accessing
1373 	 * that address the moment after we've made it available.
1374 	 *
1375 	 * That means that we can't just map the memory and populate the
1376 	 * page, instead we use the aliased mapping to populate the page
1377 	 * and once everything is ready we map it.
1378 	 */
1379 	exceptions = pager_lock(ai);
1380 
1381 	stat_handle_fault();
1382 
1383 	/* check if the access is valid */
1384 	if (abort_is_user_exception(ai)) {
1385 		area = find_uta_area(ai->va);
1386 
1387 	} else {
1388 		area = find_area(&tee_pager_area_head, ai->va);
1389 		if (!area)
1390 			area = find_uta_area(ai->va);
1391 	}
1392 	if (!area || !area->pgt) {
1393 		ret = false;
1394 		goto out;
1395 	}
1396 
1397 	if (!tee_pager_unhide_page(page_va)) {
1398 		struct tee_pager_pmem *pmem = NULL;
1399 		uint32_t attr;
1400 		paddr_t pa;
1401 
1402 		/*
1403 		 * The page wasn't hidden, but some other core may have
1404 		 * updated the table entry before we got here or we need
1405 		 * to make a read-only page read-write (dirty).
1406 		 */
1407 		if (pager_update_permissions(area, ai, &ret)) {
1408 			/*
1409 			 * Nothing more to do with the abort. The problem
1410 			 * could already have been dealt with from another
1411 			 * core or if ret is false the TA will be paniced.
1412 			 */
1413 			goto out;
1414 		}
1415 
1416 		pmem = tee_pager_get_page(area);
1417 		if (!pmem) {
1418 			abort_print(ai);
1419 			panic();
1420 		}
1421 
1422 		/* load page code & data */
1423 		tee_pager_load_page(area, page_va, pmem->va_alias);
1424 
1425 
1426 		pmem->area = area;
1427 		pmem->pgidx = area_va2idx(area, ai->va);
1428 		attr = get_area_mattr(area->flags) &
1429 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1430 		pa = get_pmem_pa(pmem);
1431 
1432 		/*
1433 		 * We've updated the page using the aliased mapping and
1434 		 * some cache maintenence is now needed if it's an
1435 		 * executable page.
1436 		 *
1437 		 * Since the d-cache is a Physically-indexed,
1438 		 * physically-tagged (PIPT) cache we can clean either the
1439 		 * aliased address or the real virtual address. In this
1440 		 * case we choose the real virtual address.
1441 		 *
1442 		 * The i-cache can also be PIPT, but may be something else
1443 		 * too like VIPT. The current code requires the caches to
1444 		 * implement the IVIPT extension, that is:
1445 		 * "instruction cache maintenance is required only after
1446 		 * writing new data to a physical address that holds an
1447 		 * instruction."
1448 		 *
1449 		 * To portably invalidate the icache the page has to
1450 		 * be mapped at the final virtual address but not
1451 		 * executable.
1452 		 */
1453 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1454 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1455 					TEE_MATTR_PW | TEE_MATTR_UW;
1456 
1457 			/* Set a temporary read-only mapping */
1458 			area_set_entry(pmem->area, pmem->pgidx, pa,
1459 				       attr & ~mask);
1460 			tlbi_mva_allasid(page_va);
1461 
1462 			/*
1463 			 * Doing these operations to LoUIS (Level of
1464 			 * unification, Inner Shareable) would be enough
1465 			 */
1466 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1467 				       SMALL_PAGE_SIZE);
1468 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1469 				       SMALL_PAGE_SIZE);
1470 
1471 			/* Set the final mapping */
1472 			area_set_entry(area, pmem->pgidx, pa, attr);
1473 			tlbi_mva_allasid(page_va);
1474 		} else {
1475 			area_set_entry(area, pmem->pgidx, pa, attr);
1476 			/*
1477 			 * No need to flush TLB for this entry, it was
1478 			 * invalid. We should use a barrier though, to make
1479 			 * sure that the change is visible.
1480 			 */
1481 			dsb_ishst();
1482 		}
1483 		pgt_inc_used_entries(area->pgt);
1484 
1485 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1486 
1487 	}
1488 
1489 	tee_pager_hide_pages();
1490 	ret = true;
1491 out:
1492 	pager_unlock(exceptions);
1493 	return ret;
1494 }
1495 
1496 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1497 {
1498 	size_t n;
1499 
1500 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1501 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1502 
1503 	/* setup memory */
1504 	for (n = 0; n < npages; n++) {
1505 		struct core_mmu_table_info *ti;
1506 		struct tee_pager_pmem *pmem;
1507 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1508 		unsigned int pgidx;
1509 		paddr_t pa;
1510 		uint32_t attr;
1511 
1512 		ti = find_table_info(va);
1513 		pgidx = core_mmu_va2idx(ti, va);
1514 		/*
1515 		 * Note that we can only support adding pages in the
1516 		 * valid range of this table info, currently not a problem.
1517 		 */
1518 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1519 
1520 		/* Ignore unmapped pages/blocks */
1521 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1522 			continue;
1523 
1524 		pmem = malloc(sizeof(struct tee_pager_pmem));
1525 		if (!pmem)
1526 			panic("out of mem");
1527 
1528 		pmem->va_alias = pager_add_alias_page(pa);
1529 
1530 		if (unmap) {
1531 			pmem->area = NULL;
1532 			pmem->pgidx = INVALID_PGIDX;
1533 			core_mmu_set_entry(ti, pgidx, 0, 0);
1534 			pgt_dec_used_entries(find_core_pgt(va));
1535 		} else {
1536 			/*
1537 			 * The page is still mapped, let's assign the area
1538 			 * and update the protection bits accordingly.
1539 			 */
1540 			pmem->area = find_area(&tee_pager_area_head, va);
1541 			assert(pmem->area->pgt == find_core_pgt(va));
1542 			pmem->pgidx = pgidx;
1543 			assert(pa == get_pmem_pa(pmem));
1544 			area_set_entry(pmem->area, pgidx, pa,
1545 				       get_area_mattr(pmem->area->flags));
1546 		}
1547 
1548 		tee_pager_npages++;
1549 		incr_npages_all();
1550 		set_npages();
1551 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1552 	}
1553 
1554 	/*
1555 	 * As this is done at inits, invalidate all TLBs once instead of
1556 	 * targeting only the modified entries.
1557 	 */
1558 	tlbi_all();
1559 }
1560 
1561 #ifdef CFG_PAGED_USER_TA
1562 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1563 {
1564 	struct pgt *p = pgt;
1565 
1566 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1567 		p = SLIST_NEXT(p, link);
1568 	return p;
1569 }
1570 
1571 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1572 {
1573 	struct tee_pager_area *area;
1574 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1575 
1576 	TAILQ_FOREACH(area, utc->areas, link) {
1577 		if (!area->pgt)
1578 			area->pgt = find_pgt(pgt, area->base);
1579 		else
1580 			assert(area->pgt == find_pgt(pgt, area->base));
1581 		if (!area->pgt)
1582 			panic();
1583 	}
1584 }
1585 
1586 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1587 {
1588 	uint32_t attr;
1589 
1590 	assert(pmem->area && pmem->area->pgt);
1591 
1592 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1593 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1594 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1595 	tee_pager_save_page(pmem, attr);
1596 	assert(pmem->area->pgt->num_used_entries);
1597 	pmem->area->pgt->num_used_entries--;
1598 	pmem->pgidx = INVALID_PGIDX;
1599 	pmem->area = NULL;
1600 }
1601 
1602 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1603 {
1604 	struct tee_pager_pmem *pmem;
1605 	struct tee_pager_area *area;
1606 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1607 
1608 	if (!pgt->num_used_entries)
1609 		goto out;
1610 
1611 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1612 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1613 			continue;
1614 		if (pmem->area->pgt == pgt)
1615 			pager_save_and_release_entry(pmem);
1616 	}
1617 	assert(!pgt->num_used_entries);
1618 
1619 out:
1620 	if (is_user_ta_ctx(pgt->ctx)) {
1621 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1622 			if (area->pgt == pgt)
1623 				area->pgt = NULL;
1624 		}
1625 	}
1626 
1627 	pager_unlock(exceptions);
1628 }
1629 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1630 #endif /*CFG_PAGED_USER_TA*/
1631 
1632 void tee_pager_release_phys(void *addr, size_t size)
1633 {
1634 	bool unmaped = false;
1635 	vaddr_t va = (vaddr_t)addr;
1636 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1637 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1638 	struct tee_pager_area *area;
1639 	uint32_t exceptions;
1640 
1641 	if (end <= begin)
1642 		return;
1643 
1644 	exceptions = pager_lock_check_stack(128);
1645 
1646 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1647 		area = find_area(&tee_pager_area_head, va);
1648 		if (!area)
1649 			panic();
1650 		unmaped |= tee_pager_release_one_phys(area, va);
1651 	}
1652 
1653 	if (unmaped)
1654 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1655 
1656 	pager_unlock(exceptions);
1657 }
1658 KEEP_PAGER(tee_pager_release_phys);
1659 
1660 void *tee_pager_alloc(size_t size, uint32_t flags)
1661 {
1662 	tee_mm_entry_t *mm;
1663 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1664 	uint8_t *smem;
1665 	size_t bytes;
1666 
1667 	if (!size)
1668 		return NULL;
1669 
1670 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1671 	if (!mm)
1672 		return NULL;
1673 
1674 	bytes = tee_mm_get_bytes(mm);
1675 	smem = (uint8_t *)tee_mm_get_smem(mm);
1676 	tee_pager_add_core_area((vaddr_t)smem, bytes, f, NULL, NULL);
1677 	asan_tag_access(smem, smem + bytes);
1678 
1679 	return smem;
1680 }
1681