xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 8e81e2f5366a971afdd2ac47fb8529d1def5feb0)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <assert.h>
31 #include <io.h>
32 #include <keep.h>
33 #include <kernel/abort.h>
34 #include <kernel/asan.h>
35 #include <kernel/panic.h>
36 #include <kernel/spinlock.h>
37 #include <kernel/tee_misc.h>
38 #include <kernel/tee_ta_manager.h>
39 #include <kernel/thread.h>
40 #include <kernel/tlb_helpers.h>
41 #include <mm/core_memprot.h>
42 #include <mm/tee_mm.h>
43 #include <mm/tee_pager.h>
44 #include <stdlib.h>
45 #include <sys/queue.h>
46 #include <tee_api_defines.h>
47 #include <tee/tee_cryp_provider.h>
48 #include <trace.h>
49 #include <types_ext.h>
50 #include <utee_defines.h>
51 #include <util.h>
52 
53 #include "pager_private.h"
54 
55 #define PAGER_AE_KEY_BITS	256
56 
57 struct pager_rw_pstate {
58 	uint64_t iv;
59 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
60 };
61 
62 enum area_type {
63 	AREA_TYPE_RO,
64 	AREA_TYPE_RW,
65 	AREA_TYPE_LOCK,
66 };
67 
68 struct tee_pager_area {
69 	union {
70 		const uint8_t *hashes;
71 		struct pager_rw_pstate *rwp;
72 	} u;
73 	uint8_t *store;
74 	enum area_type type;
75 	uint32_t flags;
76 	vaddr_t base;
77 	size_t size;
78 	struct pgt *pgt;
79 	TAILQ_ENTRY(tee_pager_area) link;
80 };
81 
82 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
83 
84 static struct tee_pager_area_head tee_pager_area_head =
85 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
86 
87 #define INVALID_PGIDX	UINT_MAX
88 
89 /*
90  * struct tee_pager_pmem - Represents a physical page used for paging.
91  *
92  * @pgidx	an index of the entry in area->ti.
93  * @va_alias	Virtual address where the physical page always is aliased.
94  *		Used during remapping of the page when the content need to
95  *		be updated before it's available at the new location.
96  * @area	a pointer to the pager area
97  */
98 struct tee_pager_pmem {
99 	unsigned pgidx;
100 	void *va_alias;
101 	struct tee_pager_area *area;
102 	TAILQ_ENTRY(tee_pager_pmem) link;
103 };
104 
105 /* The list of physical pages. The first page in the list is the oldest */
106 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
107 
108 static struct tee_pager_pmem_head tee_pager_pmem_head =
109 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
110 
111 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
112 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
113 
114 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
115 
116 /* number of pages hidden */
117 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
118 
119 /* Number of registered physical pages, used hiding pages. */
120 static size_t tee_pager_npages;
121 
122 #ifdef CFG_WITH_STATS
123 static struct tee_pager_stats pager_stats;
124 
125 static inline void incr_ro_hits(void)
126 {
127 	pager_stats.ro_hits++;
128 }
129 
130 static inline void incr_rw_hits(void)
131 {
132 	pager_stats.rw_hits++;
133 }
134 
135 static inline void incr_hidden_hits(void)
136 {
137 	pager_stats.hidden_hits++;
138 }
139 
140 static inline void incr_zi_released(void)
141 {
142 	pager_stats.zi_released++;
143 }
144 
145 static inline void incr_npages_all(void)
146 {
147 	pager_stats.npages_all++;
148 }
149 
150 static inline void set_npages(void)
151 {
152 	pager_stats.npages = tee_pager_npages;
153 }
154 
155 void tee_pager_get_stats(struct tee_pager_stats *stats)
156 {
157 	*stats = pager_stats;
158 
159 	pager_stats.hidden_hits = 0;
160 	pager_stats.ro_hits = 0;
161 	pager_stats.rw_hits = 0;
162 	pager_stats.zi_released = 0;
163 }
164 
165 #else /* CFG_WITH_STATS */
166 static inline void incr_ro_hits(void) { }
167 static inline void incr_rw_hits(void) { }
168 static inline void incr_hidden_hits(void) { }
169 static inline void incr_zi_released(void) { }
170 static inline void incr_npages_all(void) { }
171 static inline void set_npages(void) { }
172 
173 void tee_pager_get_stats(struct tee_pager_stats *stats)
174 {
175 	memset(stats, 0, sizeof(struct tee_pager_stats));
176 }
177 #endif /* CFG_WITH_STATS */
178 
179 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
180 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
181 #define TBL_SHIFT	SMALL_PAGE_SHIFT
182 
183 #define EFFECTIVE_VA_SIZE \
184 	(ROUNDUP(TEE_RAM_VA_START + CFG_TEE_RAM_VA_SIZE, \
185 		 CORE_MMU_PGDIR_SIZE) - \
186 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
187 
188 static struct pager_table {
189 	struct pgt pgt;
190 	struct core_mmu_table_info tbl_info;
191 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
192 
193 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
194 
195 /* Defines the range of the alias area */
196 static tee_mm_entry_t *pager_alias_area;
197 /*
198  * Physical pages are added in a stack like fashion to the alias area,
199  * @pager_alias_next_free gives the address of next free entry if
200  * @pager_alias_next_free is != 0
201  */
202 static uintptr_t pager_alias_next_free;
203 
204 #ifdef CFG_TEE_CORE_DEBUG
205 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
206 
207 static uint32_t pager_lock_dldetect(const char *func, const int line,
208 				    struct abort_info *ai)
209 {
210 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
211 	unsigned int retries = 0;
212 	unsigned int reminder = 0;
213 
214 	while (!cpu_spin_trylock(&pager_spinlock)) {
215 		retries++;
216 		if (!retries) {
217 			/* wrapped, time to report */
218 			trace_printf(func, line, TRACE_ERROR, true,
219 				     "possible spinlock deadlock reminder %u",
220 				     reminder);
221 			if (reminder < UINT_MAX)
222 				reminder++;
223 			if (ai)
224 				abort_print(ai);
225 		}
226 	}
227 
228 	return exceptions;
229 }
230 #else
231 static uint32_t pager_lock(struct abort_info __unused *ai)
232 {
233 	return cpu_spin_lock_xsave(&pager_spinlock);
234 }
235 #endif
236 
237 static uint32_t pager_lock_check_stack(size_t stack_size)
238 {
239 	if (stack_size) {
240 		int8_t buf[stack_size];
241 		size_t n;
242 
243 		/*
244 		 * Make sure to touch all pages of the stack that we expect
245 		 * to use with this lock held. We need to take eventual
246 		 * page faults before the lock is taken or we'll deadlock
247 		 * the pager. The pages that are populated in this way will
248 		 * eventually be released at certain save transitions of
249 		 * the thread.
250 		 */
251 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
252 			write8(1, (vaddr_t)buf + n);
253 		write8(1, (vaddr_t)buf + stack_size - 1);
254 	}
255 
256 	return pager_lock(NULL);
257 }
258 
259 static void pager_unlock(uint32_t exceptions)
260 {
261 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
262 }
263 
264 void *tee_pager_phys_to_virt(paddr_t pa)
265 {
266 	struct core_mmu_table_info ti;
267 	unsigned idx;
268 	uint32_t a;
269 	paddr_t p;
270 	vaddr_t v;
271 	size_t n;
272 
273 	/*
274 	 * Most addresses are mapped lineary, try that first if possible.
275 	 */
276 	if (!tee_pager_get_table_info(pa, &ti))
277 		return NULL; /* impossible pa */
278 	idx = core_mmu_va2idx(&ti, pa);
279 	core_mmu_get_entry(&ti, idx, &p, &a);
280 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
281 		return (void *)core_mmu_idx2va(&ti, idx);
282 
283 	n = 0;
284 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
285 	while (true) {
286 		while (idx < TBL_NUM_ENTRIES) {
287 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
288 			if (v >= (TEE_RAM_VA_START + CFG_TEE_RAM_VA_SIZE))
289 				return NULL;
290 
291 			core_mmu_get_entry(&pager_tables[n].tbl_info,
292 					   idx, &p, &a);
293 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
294 				return (void *)v;
295 			idx++;
296 		}
297 
298 		n++;
299 		if (n >= ARRAY_SIZE(pager_tables))
300 			return NULL;
301 		idx = 0;
302 	}
303 
304 	return NULL;
305 }
306 
307 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
308 {
309 	size_t n;
310 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
311 
312 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
313 	    CORE_MMU_PGDIR_SHIFT;
314 	if (n >= ARRAY_SIZE(pager_tables))
315 		return NULL;
316 
317 	assert(va >= pager_tables[n].tbl_info.va_base &&
318 	       va <= (pager_tables[n].tbl_info.va_base | mask));
319 
320 	return pager_tables + n;
321 }
322 
323 static struct pager_table *find_pager_table(vaddr_t va)
324 {
325 	struct pager_table *pt = find_pager_table_may_fail(va);
326 
327 	assert(pt);
328 	return pt;
329 }
330 
331 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
332 {
333 	struct pager_table *pt = find_pager_table_may_fail(va);
334 
335 	if (!pt)
336 		return false;
337 
338 	*ti = pt->tbl_info;
339 	return true;
340 }
341 
342 static struct core_mmu_table_info *find_table_info(vaddr_t va)
343 {
344 	return &find_pager_table(va)->tbl_info;
345 }
346 
347 static struct pgt *find_core_pgt(vaddr_t va)
348 {
349 	return &find_pager_table(va)->pgt;
350 }
351 
352 static void set_alias_area(tee_mm_entry_t *mm)
353 {
354 	struct pager_table *pt;
355 	unsigned idx;
356 	vaddr_t smem = tee_mm_get_smem(mm);
357 	size_t nbytes = tee_mm_get_bytes(mm);
358 	vaddr_t v;
359 
360 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
361 
362 	assert(!pager_alias_area);
363 	pager_alias_area = mm;
364 	pager_alias_next_free = smem;
365 
366 	/* Clear all mapping in the alias area */
367 	pt = find_pager_table(smem);
368 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
369 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
370 		while (idx < TBL_NUM_ENTRIES) {
371 			v = core_mmu_idx2va(&pt->tbl_info, idx);
372 			if (v >= (smem + nbytes))
373 				goto out;
374 
375 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
376 			idx++;
377 		}
378 
379 		pt++;
380 		idx = 0;
381 	}
382 
383 out:
384 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
385 }
386 
387 static void generate_ae_key(void)
388 {
389 	if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
390 		panic("failed to generate random");
391 }
392 
393 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
394 {
395 	size_t n;
396 	paddr_t pa;
397 	size_t usage = 0;
398 
399 	for (n = 0; n < ti->num_entries; n++) {
400 		core_mmu_get_entry(ti, n, &pa, NULL);
401 		if (pa)
402 			usage++;
403 	}
404 	return usage;
405 }
406 
407 static void area_get_entry(struct tee_pager_area *area, size_t idx,
408 			   paddr_t *pa, uint32_t *attr)
409 {
410 	assert(area->pgt);
411 	assert(idx < TBL_NUM_ENTRIES);
412 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
413 }
414 
415 static void area_set_entry(struct tee_pager_area *area, size_t idx,
416 			   paddr_t pa, uint32_t attr)
417 {
418 	assert(area->pgt);
419 	assert(idx < TBL_NUM_ENTRIES);
420 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
421 }
422 
423 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
424 {
425 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
426 }
427 
428 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
429 {
430 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
431 }
432 
433 void tee_pager_early_init(void)
434 {
435 	size_t n;
436 
437 	/*
438 	 * Note that this depends on add_pager_vaspace() adding vaspace
439 	 * after end of memory.
440 	 */
441 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
442 		if (!core_mmu_find_table(TEE_RAM_VA_START +
443 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
444 					 &pager_tables[n].tbl_info))
445 			panic("can't find mmu tables");
446 
447 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
448 			panic("Unsupported page size in translation table");
449 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
450 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
451 
452 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
453 		pgt_set_used_entries(&pager_tables[n].pgt,
454 				tbl_usage_count(&pager_tables[n].tbl_info));
455 	}
456 }
457 
458 void tee_pager_init(tee_mm_entry_t *mm_alias)
459 {
460 	set_alias_area(mm_alias);
461 	generate_ae_key();
462 }
463 
464 static void *pager_add_alias_page(paddr_t pa)
465 {
466 	unsigned idx;
467 	struct core_mmu_table_info *ti;
468 	/* Alias pages mapped without write permission: runtime will care */
469 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
470 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
471 			TEE_MATTR_SECURE | TEE_MATTR_PR;
472 
473 	DMSG("0x%" PRIxPA, pa);
474 
475 	ti = find_table_info(pager_alias_next_free);
476 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
477 	core_mmu_set_entry(ti, idx, pa, attr);
478 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
479 	pager_alias_next_free += SMALL_PAGE_SIZE;
480 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
481 				      tee_mm_get_bytes(pager_alias_area)))
482 		pager_alias_next_free = 0;
483 	return (void *)core_mmu_idx2va(ti, idx);
484 }
485 
486 static struct tee_pager_area *alloc_area(struct pgt *pgt,
487 					 vaddr_t base, size_t size,
488 					 uint32_t flags, const void *store,
489 					 const void *hashes)
490 {
491 	struct tee_pager_area *area = calloc(1, sizeof(*area));
492 	enum area_type at;
493 	tee_mm_entry_t *mm_store = NULL;
494 
495 	if (!area)
496 		return NULL;
497 
498 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
499 		if (flags & TEE_MATTR_LOCKED) {
500 			at = AREA_TYPE_LOCK;
501 			goto out;
502 		}
503 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
504 		if (!mm_store)
505 			goto bad;
506 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
507 					   MEM_AREA_TA_RAM);
508 		if (!area->store)
509 			goto bad;
510 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
511 				     sizeof(struct pager_rw_pstate));
512 		if (!area->u.rwp)
513 			goto bad;
514 		at = AREA_TYPE_RW;
515 	} else {
516 		area->store = (void *)store;
517 		area->u.hashes = hashes;
518 		at = AREA_TYPE_RO;
519 	}
520 out:
521 	area->pgt = pgt;
522 	area->base = base;
523 	area->size = size;
524 	area->flags = flags;
525 	area->type = at;
526 	return area;
527 bad:
528 	tee_mm_free(mm_store);
529 	free(area->u.rwp);
530 	free(area);
531 	return NULL;
532 }
533 
534 static void area_insert_tail(struct tee_pager_area *area)
535 {
536 	uint32_t exceptions = pager_lock_check_stack(8);
537 
538 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
539 
540 	pager_unlock(exceptions);
541 }
542 KEEP_PAGER(area_insert_tail);
543 
544 void tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
545 			     const void *store, const void *hashes)
546 {
547 	struct tee_pager_area *area;
548 	vaddr_t b = base;
549 	size_t s = size;
550 	size_t s2;
551 
552 
553 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
554 		base, base + size, flags, store, hashes);
555 
556 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
557 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
558 		panic();
559 	}
560 
561 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
562 		panic("write pages cannot provide store or hashes");
563 
564 	if ((flags & TEE_MATTR_PW) && (store || hashes))
565 		panic("non-write pages must provide store and hashes");
566 
567 	while (s) {
568 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
569 		area = alloc_area(find_core_pgt(b), b, s2, flags,
570 				  (const uint8_t *)store + b - base,
571 				  (const uint8_t *)hashes + (b - base) /
572 							SMALL_PAGE_SIZE *
573 							TEE_SHA256_HASH_SIZE);
574 		if (!area)
575 			panic("alloc_area");
576 		area_insert_tail(area);
577 		b += s2;
578 		s -= s2;
579 	}
580 }
581 
582 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
583 					vaddr_t va)
584 {
585 	struct tee_pager_area *area;
586 
587 	if (!areas)
588 		return NULL;
589 
590 	TAILQ_FOREACH(area, areas, link) {
591 		if (core_is_buffer_inside(va, 1, area->base, area->size))
592 			return area;
593 	}
594 	return NULL;
595 }
596 
597 #ifdef CFG_PAGED_USER_TA
598 static struct tee_pager_area *find_uta_area(vaddr_t va)
599 {
600 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
601 
602 	if (!ctx || !is_user_ta_ctx(ctx))
603 		return NULL;
604 	return find_area(to_user_ta_ctx(ctx)->areas, va);
605 }
606 #else
607 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
608 {
609 	return NULL;
610 }
611 #endif /*CFG_PAGED_USER_TA*/
612 
613 
614 static uint32_t get_area_mattr(uint32_t area_flags)
615 {
616 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
617 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
618 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
619 
620 	if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
621 		attr |= TEE_MATTR_GLOBAL;
622 
623 	return attr;
624 }
625 
626 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
627 {
628 	struct core_mmu_table_info *ti;
629 	paddr_t pa;
630 	unsigned idx;
631 
632 	ti = find_table_info((vaddr_t)pmem->va_alias);
633 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
634 	core_mmu_get_entry(ti, idx, &pa, NULL);
635 	return pa;
636 }
637 
638 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
639 			void *dst)
640 {
641 	struct pager_aes_gcm_iv iv = {
642 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
643 	};
644 
645 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
646 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
647 }
648 
649 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
650 {
651 	struct pager_aes_gcm_iv iv;
652 
653 	assert((rwp->iv + 1) > rwp->iv);
654 	rwp->iv++;
655 	/*
656 	 * IV is constructed as recommended in section "8.2.1 Deterministic
657 	 * Construction" of "Recommendation for Block Cipher Modes of
658 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
659 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
660 	 */
661 	iv.iv[0] = (vaddr_t)rwp;
662 	iv.iv[1] = rwp->iv >> 32;
663 	iv.iv[2] = rwp->iv;
664 
665 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
666 				   &iv, rwp->tag,
667 				   src, dst, SMALL_PAGE_SIZE))
668 		panic("gcm failed");
669 }
670 
671 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
672 			void *va_alias)
673 {
674 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
675 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
676 	struct core_mmu_table_info *ti;
677 	uint32_t attr_alias;
678 	paddr_t pa_alias;
679 	unsigned int idx_alias;
680 
681 	/* Insure we are allowed to write to aliased virtual page */
682 	ti = find_table_info((vaddr_t)va_alias);
683 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
684 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
685 	if (!(attr_alias & TEE_MATTR_PW)) {
686 		attr_alias |= TEE_MATTR_PW;
687 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
688 		tlbi_mva_allasid((vaddr_t)va_alias);
689 	}
690 
691 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
692 	switch (area->type) {
693 	case AREA_TYPE_RO:
694 		{
695 			const void *hash = area->u.hashes +
696 					   idx * TEE_SHA256_HASH_SIZE;
697 
698 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
699 			incr_ro_hits();
700 
701 			if (hash_sha256_check(hash, va_alias,
702 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
703 				EMSG("PH 0x%" PRIxVA " failed", page_va);
704 				panic();
705 			}
706 		}
707 		/* Forbid write to aliases for read-only (maybe exec) pages */
708 		attr_alias &= ~TEE_MATTR_PW;
709 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
710 		tlbi_mva_allasid((vaddr_t)va_alias);
711 		break;
712 	case AREA_TYPE_RW:
713 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
714 			va_alias, page_va, area->u.rwp[idx].iv);
715 		if (!area->u.rwp[idx].iv)
716 			memset(va_alias, 0, SMALL_PAGE_SIZE);
717 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
718 				       va_alias)) {
719 			EMSG("PH 0x%" PRIxVA " failed", page_va);
720 			panic();
721 		}
722 		incr_rw_hits();
723 		break;
724 	case AREA_TYPE_LOCK:
725 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
726 		memset(va_alias, 0, SMALL_PAGE_SIZE);
727 		break;
728 	default:
729 		panic();
730 	}
731 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
732 }
733 
734 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
735 {
736 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
737 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
738 
739 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
740 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
741 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
742 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
743 
744 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
745 		asan_tag_access(pmem->va_alias,
746 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
747 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
748 			     stored_page);
749 		asan_tag_no_access(pmem->va_alias,
750 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
751 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
752 			pmem->area->base + idx * SMALL_PAGE_SIZE,
753 			pmem->area->u.rwp[idx].iv);
754 	}
755 }
756 
757 #ifdef CFG_PAGED_USER_TA
758 static void free_area(struct tee_pager_area *area)
759 {
760 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
761 				virt_to_phys(area->store)));
762 	if (area->type == AREA_TYPE_RW)
763 		free(area->u.rwp);
764 	free(area);
765 }
766 
767 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
768 			       size_t size)
769 {
770 	struct tee_pager_area *area;
771 	uint32_t flags;
772 	vaddr_t b = base;
773 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
774 
775 	if (!utc->areas) {
776 		utc->areas = malloc(sizeof(*utc->areas));
777 		if (!utc->areas)
778 			return false;
779 		TAILQ_INIT(utc->areas);
780 	}
781 
782 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
783 
784 	while (s) {
785 		size_t s2;
786 
787 		if (find_area(utc->areas, b))
788 			return false;
789 
790 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
791 
792 		/* Table info will be set when the context is activated. */
793 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
794 		if (!area)
795 			return false;
796 		TAILQ_INSERT_TAIL(utc->areas, area, link);
797 		b += s2;
798 		s -= s2;
799 	}
800 
801 	return true;
802 }
803 
804 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
805 {
806 	struct thread_specific_data *tsd = thread_get_tsd();
807 	struct tee_pager_area *area;
808 	struct core_mmu_table_info dir_info = { NULL };
809 
810 	if (&utc->ctx != tsd->ctx) {
811 		/*
812 		 * Changes are to an utc that isn't active. Just add the
813 		 * areas page tables will be dealt with later.
814 		 */
815 		return pager_add_uta_area(utc, base, size);
816 	}
817 
818 	/*
819 	 * Assign page tables before adding areas to be able to tell which
820 	 * are newly added and should be removed in case of failure.
821 	 */
822 	tee_pager_assign_uta_tables(utc);
823 	if (!pager_add_uta_area(utc, base, size)) {
824 		struct tee_pager_area *next_a;
825 
826 		/* Remove all added areas */
827 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
828 			if (!area->pgt) {
829 				TAILQ_REMOVE(utc->areas, area, link);
830 				free_area(area);
831 			}
832 		}
833 		return false;
834 	}
835 
836 	/*
837 	 * Assign page tables to the new areas and make sure that the page
838 	 * tables are registered in the upper table.
839 	 */
840 	tee_pager_assign_uta_tables(utc);
841 	core_mmu_get_user_pgdir(&dir_info);
842 	TAILQ_FOREACH(area, utc->areas, link) {
843 		paddr_t pa;
844 		size_t idx;
845 		uint32_t attr;
846 
847 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
848 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
849 
850 		/*
851 		 * Check if the page table already is used, if it is, it's
852 		 * already registered.
853 		 */
854 		if (area->pgt->num_used_entries) {
855 			assert(attr & TEE_MATTR_TABLE);
856 			assert(pa == virt_to_phys(area->pgt->tbl));
857 			continue;
858 		}
859 
860 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
861 		pa = virt_to_phys(area->pgt->tbl);
862 		assert(pa);
863 		/*
864 		 * Note that the update of the table entry is guaranteed to
865 		 * be atomic.
866 		 */
867 		core_mmu_set_entry(&dir_info, idx, pa, attr);
868 	}
869 
870 	return true;
871 }
872 
873 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
874 				   struct pgt *pgt)
875 {
876 	assert(pgt);
877 	ti->table = pgt->tbl;
878 	ti->va_base = pgt->vabase;
879 	ti->level = TBL_LEVEL;
880 	ti->shift = TBL_SHIFT;
881 	ti->num_entries = TBL_NUM_ENTRIES;
882 }
883 
884 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
885 			   vaddr_t new_base)
886 {
887 	uint32_t exceptions = pager_lock_check_stack(64);
888 
889 	/*
890 	 * If there's no pgt assigned to the old area there's no pages to
891 	 * deal with either, just update with a new pgt and base.
892 	 */
893 	if (area->pgt) {
894 		struct core_mmu_table_info old_ti;
895 		struct core_mmu_table_info new_ti;
896 		struct tee_pager_pmem *pmem;
897 
898 		init_tbl_info_from_pgt(&old_ti, area->pgt);
899 		init_tbl_info_from_pgt(&new_ti, new_pgt);
900 
901 
902 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
903 			vaddr_t va;
904 			paddr_t pa;
905 			uint32_t attr;
906 
907 			if (pmem->area != area)
908 				continue;
909 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
910 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
911 
912 			assert(pa == get_pmem_pa(pmem));
913 			assert(attr);
914 			assert(area->pgt->num_used_entries);
915 			area->pgt->num_used_entries--;
916 
917 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
918 			va = va - area->base + new_base;
919 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
920 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
921 			new_pgt->num_used_entries++;
922 		}
923 	}
924 
925 	area->pgt = new_pgt;
926 	area->base = new_base;
927 	pager_unlock(exceptions);
928 }
929 KEEP_PAGER(transpose_area);
930 
931 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
932 				   vaddr_t src_base,
933 				   struct user_ta_ctx *dst_utc,
934 				   vaddr_t dst_base, struct pgt **dst_pgt,
935 				   size_t size)
936 {
937 	struct tee_pager_area *area;
938 	struct tee_pager_area *next_a;
939 
940 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
941 		vaddr_t new_area_base;
942 		size_t new_idx;
943 
944 		if (!core_is_buffer_inside(area->base, area->size,
945 					  src_base, size))
946 			continue;
947 
948 		TAILQ_REMOVE(src_utc->areas, area, link);
949 
950 		new_area_base = dst_base + (src_base - area->base);
951 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
952 			  CORE_MMU_PGDIR_SIZE;
953 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
954 		       dst_pgt[new_idx]->vabase);
955 		transpose_area(area, dst_pgt[new_idx], new_area_base);
956 
957 		/*
958 		 * Assert that this will not cause any conflicts in the new
959 		 * utc.  This should already be guaranteed, but a bug here
960 		 * could be tricky to find.
961 		 */
962 		assert(!find_area(dst_utc->areas, area->base));
963 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
964 	}
965 }
966 
967 static void rem_area(struct tee_pager_area_head *area_head,
968 		     struct tee_pager_area *area)
969 {
970 	struct tee_pager_pmem *pmem;
971 	uint32_t exceptions;
972 
973 	exceptions = pager_lock_check_stack(64);
974 
975 	TAILQ_REMOVE(area_head, area, link);
976 
977 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
978 		if (pmem->area == area) {
979 			area_set_entry(area, pmem->pgidx, 0, 0);
980 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
981 			pgt_dec_used_entries(area->pgt);
982 			pmem->area = NULL;
983 			pmem->pgidx = INVALID_PGIDX;
984 		}
985 	}
986 
987 	pager_unlock(exceptions);
988 	free_area(area);
989 }
990 KEEP_PAGER(rem_area);
991 
992 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
993 			      size_t size)
994 {
995 	struct tee_pager_area *area;
996 	struct tee_pager_area *next_a;
997 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
998 
999 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
1000 		if (core_is_buffer_inside(area->base, area->size, base, s))
1001 			rem_area(utc->areas, area);
1002 	}
1003 }
1004 
1005 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
1006 {
1007 	struct tee_pager_area *area;
1008 
1009 	if (!utc->areas)
1010 		return;
1011 
1012 	while (true) {
1013 		area = TAILQ_FIRST(utc->areas);
1014 		if (!area)
1015 			break;
1016 		TAILQ_REMOVE(utc->areas, area, link);
1017 		free_area(area);
1018 	}
1019 
1020 	free(utc->areas);
1021 }
1022 
1023 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
1024 				 size_t size, uint32_t flags)
1025 {
1026 	bool ret;
1027 	vaddr_t b = base;
1028 	size_t s = size;
1029 	size_t s2;
1030 	struct tee_pager_area *area = find_area(utc->areas, b);
1031 	uint32_t exceptions;
1032 	struct tee_pager_pmem *pmem;
1033 	paddr_t pa;
1034 	uint32_t a;
1035 	uint32_t f;
1036 
1037 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1038 	if (f & TEE_MATTR_UW)
1039 		f |= TEE_MATTR_PW;
1040 	f = get_area_mattr(f);
1041 
1042 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1043 
1044 	while (s) {
1045 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1046 		if (!area || area->base != b || area->size != s2) {
1047 			ret = false;
1048 			goto out;
1049 		}
1050 		b += s2;
1051 		s -= s2;
1052 
1053 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1054 			if (pmem->area != area)
1055 				continue;
1056 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
1057 			if (a & TEE_MATTR_VALID_BLOCK)
1058 				assert(pa == get_pmem_pa(pmem));
1059 			else
1060 				pa = get_pmem_pa(pmem);
1061 			if (a == f)
1062 				continue;
1063 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1064 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1065 			if (!(flags & TEE_MATTR_UW))
1066 				tee_pager_save_page(pmem, a);
1067 
1068 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
1069 			/*
1070 			 * Make sure the table update is visible before
1071 			 * continuing.
1072 			 */
1073 			dsb_ishst();
1074 
1075 			if (flags & TEE_MATTR_UX) {
1076 				void *va = (void *)area_idx2va(pmem->area,
1077 							       pmem->pgidx);
1078 
1079 				cache_op_inner(DCACHE_AREA_CLEAN, va,
1080 						SMALL_PAGE_SIZE);
1081 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
1082 						SMALL_PAGE_SIZE);
1083 			}
1084 		}
1085 
1086 		area->flags = f;
1087 		area = TAILQ_NEXT(area, link);
1088 	}
1089 
1090 	ret = true;
1091 out:
1092 	pager_unlock(exceptions);
1093 	return ret;
1094 }
1095 KEEP_PAGER(tee_pager_set_uta_area_attr);
1096 #endif /*CFG_PAGED_USER_TA*/
1097 
1098 static bool tee_pager_unhide_page(vaddr_t page_va)
1099 {
1100 	struct tee_pager_pmem *pmem;
1101 
1102 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1103 		paddr_t pa;
1104 		uint32_t attr;
1105 
1106 		if (pmem->pgidx == INVALID_PGIDX)
1107 			continue;
1108 
1109 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1110 
1111 		if (!(attr &
1112 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
1113 			continue;
1114 
1115 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
1116 			uint32_t a = get_area_mattr(pmem->area->flags);
1117 
1118 			/* page is hidden, show and move to back */
1119 			if (pa != get_pmem_pa(pmem))
1120 				panic("unexpected pa");
1121 
1122 			/*
1123 			 * If it's not a dirty block, then it should be
1124 			 * read only.
1125 			 */
1126 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
1127 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1128 			else
1129 				FMSG("Unhide %#" PRIxVA, page_va);
1130 
1131 			if (page_va == 0x8000a000)
1132 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
1133 					page_va, a);
1134 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
1135 			/*
1136 			 * Note that TLB invalidation isn't needed since
1137 			 * there wasn't a valid mapping before. We should
1138 			 * use a barrier though, to make sure that the
1139 			 * change is visible.
1140 			 */
1141 			dsb_ishst();
1142 
1143 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1144 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1145 			incr_hidden_hits();
1146 			return true;
1147 		}
1148 	}
1149 
1150 	return false;
1151 }
1152 
1153 static void tee_pager_hide_pages(void)
1154 {
1155 	struct tee_pager_pmem *pmem;
1156 	size_t n = 0;
1157 
1158 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1159 		paddr_t pa;
1160 		uint32_t attr;
1161 		uint32_t a;
1162 
1163 		if (n >= TEE_PAGER_NHIDE)
1164 			break;
1165 		n++;
1166 
1167 		/* we cannot hide pages when pmem->area is not defined. */
1168 		if (!pmem->area)
1169 			continue;
1170 
1171 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1172 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1173 			continue;
1174 
1175 		assert(pa == get_pmem_pa(pmem));
1176 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1177 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1178 			FMSG("Hide %#" PRIxVA,
1179 			     area_idx2va(pmem->area, pmem->pgidx));
1180 		} else
1181 			a = TEE_MATTR_HIDDEN_BLOCK;
1182 
1183 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1184 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1185 	}
1186 }
1187 
1188 /*
1189  * Find mapped pmem, hide and move to pageble pmem.
1190  * Return false if page was not mapped, and true if page was mapped.
1191  */
1192 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1193 				       vaddr_t page_va)
1194 {
1195 	struct tee_pager_pmem *pmem;
1196 	unsigned pgidx;
1197 	paddr_t pa;
1198 	uint32_t attr;
1199 
1200 	pgidx = area_va2idx(area, page_va);
1201 	area_get_entry(area, pgidx, &pa, &attr);
1202 
1203 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1204 
1205 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1206 		if (pmem->area != area || pmem->pgidx != pgidx)
1207 			continue;
1208 
1209 		assert(pa == get_pmem_pa(pmem));
1210 		area_set_entry(area, pgidx, 0, 0);
1211 		pgt_dec_used_entries(area->pgt);
1212 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1213 		pmem->area = NULL;
1214 		pmem->pgidx = INVALID_PGIDX;
1215 		tee_pager_npages++;
1216 		set_npages();
1217 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1218 		incr_zi_released();
1219 		return true;
1220 	}
1221 
1222 	return false;
1223 }
1224 
1225 /* Finds the oldest page and unmats it from its old virtual address */
1226 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1227 {
1228 	struct tee_pager_pmem *pmem;
1229 
1230 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1231 	if (!pmem) {
1232 		EMSG("No pmem entries");
1233 		return NULL;
1234 	}
1235 	if (pmem->pgidx != INVALID_PGIDX) {
1236 		uint32_t a;
1237 
1238 		assert(pmem->area && pmem->area->pgt);
1239 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1240 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1241 		pgt_dec_used_entries(pmem->area->pgt);
1242 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1243 		tee_pager_save_page(pmem, a);
1244 	}
1245 
1246 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1247 	pmem->pgidx = INVALID_PGIDX;
1248 	pmem->area = NULL;
1249 	if (area->type == AREA_TYPE_LOCK) {
1250 		/* Move page to lock list */
1251 		if (tee_pager_npages <= 0)
1252 			panic("running out of page");
1253 		tee_pager_npages--;
1254 		set_npages();
1255 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1256 	} else {
1257 		/* move page to back */
1258 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1259 	}
1260 
1261 	return pmem;
1262 }
1263 
1264 static bool pager_update_permissions(struct tee_pager_area *area,
1265 			struct abort_info *ai, bool *handled)
1266 {
1267 	unsigned int pgidx = area_va2idx(area, ai->va);
1268 	uint32_t attr;
1269 	paddr_t pa;
1270 
1271 	*handled = false;
1272 
1273 	area_get_entry(area, pgidx, &pa, &attr);
1274 
1275 	/* Not mapped */
1276 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1277 		return false;
1278 
1279 	/* Not readable, should not happen */
1280 	if (abort_is_user_exception(ai)) {
1281 		if (!(attr & TEE_MATTR_UR))
1282 			return true;
1283 	} else {
1284 		if (!(attr & TEE_MATTR_PR)) {
1285 			abort_print_error(ai);
1286 			panic();
1287 		}
1288 	}
1289 
1290 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1291 	case CORE_MMU_FAULT_TRANSLATION:
1292 	case CORE_MMU_FAULT_READ_PERMISSION:
1293 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1294 			/* Check attempting to execute from an NOX page */
1295 			if (abort_is_user_exception(ai)) {
1296 				if (!(attr & TEE_MATTR_UX))
1297 					return true;
1298 			} else {
1299 				if (!(attr & TEE_MATTR_PX)) {
1300 					abort_print_error(ai);
1301 					panic();
1302 				}
1303 			}
1304 		}
1305 		/* Since the page is mapped now it's OK */
1306 		break;
1307 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1308 		/* Check attempting to write to an RO page */
1309 		if (abort_is_user_exception(ai)) {
1310 			if (!(area->flags & TEE_MATTR_UW))
1311 				return true;
1312 			if (!(attr & TEE_MATTR_UW)) {
1313 				FMSG("Dirty %p",
1314 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1315 				area_set_entry(area, pgidx, pa,
1316 					       get_area_mattr(area->flags));
1317 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1318 			}
1319 
1320 		} else {
1321 			if (!(area->flags & TEE_MATTR_PW)) {
1322 				abort_print_error(ai);
1323 				panic();
1324 			}
1325 			if (!(attr & TEE_MATTR_PW)) {
1326 				FMSG("Dirty %p",
1327 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1328 				area_set_entry(area, pgidx, pa,
1329 					       get_area_mattr(area->flags));
1330 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1331 			}
1332 		}
1333 		/* Since permissions has been updated now it's OK */
1334 		break;
1335 	default:
1336 		/* Some fault we can't deal with */
1337 		if (abort_is_user_exception(ai))
1338 			return true;
1339 		abort_print_error(ai);
1340 		panic();
1341 	}
1342 	*handled = true;
1343 	return true;
1344 }
1345 
1346 #ifdef CFG_TEE_CORE_DEBUG
1347 static void stat_handle_fault(void)
1348 {
1349 	static size_t num_faults;
1350 	static size_t min_npages = SIZE_MAX;
1351 	static size_t total_min_npages = SIZE_MAX;
1352 
1353 	num_faults++;
1354 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1355 		DMSG("nfaults %zu npages %zu (min %zu)",
1356 		     num_faults, tee_pager_npages, min_npages);
1357 		min_npages = tee_pager_npages; /* reset */
1358 	}
1359 	if (tee_pager_npages < min_npages)
1360 		min_npages = tee_pager_npages;
1361 	if (tee_pager_npages < total_min_npages)
1362 		total_min_npages = tee_pager_npages;
1363 }
1364 #else
1365 static void stat_handle_fault(void)
1366 {
1367 }
1368 #endif
1369 
1370 bool tee_pager_handle_fault(struct abort_info *ai)
1371 {
1372 	struct tee_pager_area *area;
1373 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1374 	uint32_t exceptions;
1375 	bool ret;
1376 
1377 #ifdef TEE_PAGER_DEBUG_PRINT
1378 	abort_print(ai);
1379 #endif
1380 
1381 	/*
1382 	 * We're updating pages that can affect several active CPUs at a
1383 	 * time below. We end up here because a thread tries to access some
1384 	 * memory that isn't available. We have to be careful when making
1385 	 * that memory available as other threads may succeed in accessing
1386 	 * that address the moment after we've made it available.
1387 	 *
1388 	 * That means that we can't just map the memory and populate the
1389 	 * page, instead we use the aliased mapping to populate the page
1390 	 * and once everything is ready we map it.
1391 	 */
1392 	exceptions = pager_lock(ai);
1393 
1394 	stat_handle_fault();
1395 
1396 	/* check if the access is valid */
1397 	if (abort_is_user_exception(ai)) {
1398 		area = find_uta_area(ai->va);
1399 
1400 	} else {
1401 		area = find_area(&tee_pager_area_head, ai->va);
1402 		if (!area)
1403 			area = find_uta_area(ai->va);
1404 	}
1405 	if (!area || !area->pgt) {
1406 		ret = false;
1407 		goto out;
1408 	}
1409 
1410 	if (!tee_pager_unhide_page(page_va)) {
1411 		struct tee_pager_pmem *pmem = NULL;
1412 		uint32_t attr;
1413 		paddr_t pa;
1414 
1415 		/*
1416 		 * The page wasn't hidden, but some other core may have
1417 		 * updated the table entry before we got here or we need
1418 		 * to make a read-only page read-write (dirty).
1419 		 */
1420 		if (pager_update_permissions(area, ai, &ret)) {
1421 			/*
1422 			 * Nothing more to do with the abort. The problem
1423 			 * could already have been dealt with from another
1424 			 * core or if ret is false the TA will be paniced.
1425 			 */
1426 			goto out;
1427 		}
1428 
1429 		pmem = tee_pager_get_page(area);
1430 		if (!pmem) {
1431 			abort_print(ai);
1432 			panic();
1433 		}
1434 
1435 		/* load page code & data */
1436 		tee_pager_load_page(area, page_va, pmem->va_alias);
1437 
1438 
1439 		pmem->area = area;
1440 		pmem->pgidx = area_va2idx(area, ai->va);
1441 		attr = get_area_mattr(area->flags) &
1442 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1443 		pa = get_pmem_pa(pmem);
1444 
1445 		/*
1446 		 * We've updated the page using the aliased mapping and
1447 		 * some cache maintenence is now needed if it's an
1448 		 * executable page.
1449 		 *
1450 		 * Since the d-cache is a Physically-indexed,
1451 		 * physically-tagged (PIPT) cache we can clean either the
1452 		 * aliased address or the real virtual address. In this
1453 		 * case we choose the real virtual address.
1454 		 *
1455 		 * The i-cache can also be PIPT, but may be something else
1456 		 * too like VIPT. The current code requires the caches to
1457 		 * implement the IVIPT extension, that is:
1458 		 * "instruction cache maintenance is required only after
1459 		 * writing new data to a physical address that holds an
1460 		 * instruction."
1461 		 *
1462 		 * To portably invalidate the icache the page has to
1463 		 * be mapped at the final virtual address but not
1464 		 * executable.
1465 		 */
1466 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1467 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1468 					TEE_MATTR_PW | TEE_MATTR_UW;
1469 
1470 			/* Set a temporary read-only mapping */
1471 			area_set_entry(pmem->area, pmem->pgidx, pa,
1472 				       attr & ~mask);
1473 			tlbi_mva_allasid(page_va);
1474 
1475 			/*
1476 			 * Doing these operations to LoUIS (Level of
1477 			 * unification, Inner Shareable) would be enough
1478 			 */
1479 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1480 				       SMALL_PAGE_SIZE);
1481 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1482 				       SMALL_PAGE_SIZE);
1483 
1484 			/* Set the final mapping */
1485 			area_set_entry(area, pmem->pgidx, pa, attr);
1486 			tlbi_mva_allasid(page_va);
1487 		} else {
1488 			area_set_entry(area, pmem->pgidx, pa, attr);
1489 			/*
1490 			 * No need to flush TLB for this entry, it was
1491 			 * invalid. We should use a barrier though, to make
1492 			 * sure that the change is visible.
1493 			 */
1494 			dsb_ishst();
1495 		}
1496 		pgt_inc_used_entries(area->pgt);
1497 
1498 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1499 
1500 	}
1501 
1502 	tee_pager_hide_pages();
1503 	ret = true;
1504 out:
1505 	pager_unlock(exceptions);
1506 	return ret;
1507 }
1508 
1509 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1510 {
1511 	size_t n;
1512 
1513 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1514 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1515 
1516 	/* setup memory */
1517 	for (n = 0; n < npages; n++) {
1518 		struct core_mmu_table_info *ti;
1519 		struct tee_pager_pmem *pmem;
1520 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1521 		unsigned int pgidx;
1522 		paddr_t pa;
1523 		uint32_t attr;
1524 
1525 		ti = find_table_info(va);
1526 		pgidx = core_mmu_va2idx(ti, va);
1527 		/*
1528 		 * Note that we can only support adding pages in the
1529 		 * valid range of this table info, currently not a problem.
1530 		 */
1531 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1532 
1533 		/* Ignore unmapped pages/blocks */
1534 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1535 			continue;
1536 
1537 		pmem = malloc(sizeof(struct tee_pager_pmem));
1538 		if (!pmem)
1539 			panic("out of mem");
1540 
1541 		pmem->va_alias = pager_add_alias_page(pa);
1542 
1543 		if (unmap) {
1544 			pmem->area = NULL;
1545 			pmem->pgidx = INVALID_PGIDX;
1546 			core_mmu_set_entry(ti, pgidx, 0, 0);
1547 			pgt_dec_used_entries(find_core_pgt(va));
1548 		} else {
1549 			/*
1550 			 * The page is still mapped, let's assign the area
1551 			 * and update the protection bits accordingly.
1552 			 */
1553 			pmem->area = find_area(&tee_pager_area_head, va);
1554 			assert(pmem->area->pgt == find_core_pgt(va));
1555 			pmem->pgidx = pgidx;
1556 			assert(pa == get_pmem_pa(pmem));
1557 			area_set_entry(pmem->area, pgidx, pa,
1558 				       get_area_mattr(pmem->area->flags));
1559 		}
1560 
1561 		tee_pager_npages++;
1562 		incr_npages_all();
1563 		set_npages();
1564 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1565 	}
1566 
1567 	/*
1568 	 * As this is done at inits, invalidate all TLBs once instead of
1569 	 * targeting only the modified entries.
1570 	 */
1571 	tlbi_all();
1572 }
1573 
1574 #ifdef CFG_PAGED_USER_TA
1575 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1576 {
1577 	struct pgt *p = pgt;
1578 
1579 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1580 		p = SLIST_NEXT(p, link);
1581 	return p;
1582 }
1583 
1584 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1585 {
1586 	struct tee_pager_area *area;
1587 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1588 
1589 	TAILQ_FOREACH(area, utc->areas, link) {
1590 		if (!area->pgt)
1591 			area->pgt = find_pgt(pgt, area->base);
1592 		else
1593 			assert(area->pgt == find_pgt(pgt, area->base));
1594 		if (!area->pgt)
1595 			panic();
1596 	}
1597 }
1598 
1599 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1600 {
1601 	uint32_t attr;
1602 
1603 	assert(pmem->area && pmem->area->pgt);
1604 
1605 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1606 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1607 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1608 	tee_pager_save_page(pmem, attr);
1609 	assert(pmem->area->pgt->num_used_entries);
1610 	pmem->area->pgt->num_used_entries--;
1611 	pmem->pgidx = INVALID_PGIDX;
1612 	pmem->area = NULL;
1613 }
1614 
1615 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1616 {
1617 	struct tee_pager_pmem *pmem;
1618 	struct tee_pager_area *area;
1619 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1620 
1621 	if (!pgt->num_used_entries)
1622 		goto out;
1623 
1624 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1625 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1626 			continue;
1627 		if (pmem->area->pgt == pgt)
1628 			pager_save_and_release_entry(pmem);
1629 	}
1630 	assert(!pgt->num_used_entries);
1631 
1632 out:
1633 	if (is_user_ta_ctx(pgt->ctx)) {
1634 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1635 			if (area->pgt == pgt)
1636 				area->pgt = NULL;
1637 		}
1638 	}
1639 
1640 	pager_unlock(exceptions);
1641 }
1642 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1643 #endif /*CFG_PAGED_USER_TA*/
1644 
1645 void tee_pager_release_phys(void *addr, size_t size)
1646 {
1647 	bool unmaped = false;
1648 	vaddr_t va = (vaddr_t)addr;
1649 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1650 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1651 	struct tee_pager_area *area;
1652 	uint32_t exceptions;
1653 
1654 	if (end <= begin)
1655 		return;
1656 
1657 	exceptions = pager_lock_check_stack(128);
1658 
1659 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1660 		area = find_area(&tee_pager_area_head, va);
1661 		if (!area)
1662 			panic();
1663 		unmaped |= tee_pager_release_one_phys(area, va);
1664 	}
1665 
1666 	if (unmaped)
1667 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1668 
1669 	pager_unlock(exceptions);
1670 }
1671 KEEP_PAGER(tee_pager_release_phys);
1672 
1673 void *tee_pager_alloc(size_t size, uint32_t flags)
1674 {
1675 	tee_mm_entry_t *mm;
1676 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1677 	uint8_t *smem;
1678 	size_t bytes;
1679 
1680 	if (!size)
1681 		return NULL;
1682 
1683 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1684 	if (!mm)
1685 		return NULL;
1686 
1687 	bytes = tee_mm_get_bytes(mm);
1688 	smem = (uint8_t *)tee_mm_get_smem(mm);
1689 	tee_pager_add_core_area((vaddr_t)smem, bytes, f, NULL, NULL);
1690 	asan_tag_access(smem, smem + bytes);
1691 
1692 	return smem;
1693 }
1694