xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision abccd9090fb4e8b9e838ccc5deeac4f7d2c901fb)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <assert.h>
31 #include <io.h>
32 #include <keep.h>
33 #include <kernel/abort.h>
34 #include <kernel/panic.h>
35 #include <kernel/spinlock.h>
36 #include <kernel/tee_misc.h>
37 #include <kernel/tee_ta_manager.h>
38 #include <kernel/thread.h>
39 #include <kernel/tlb_helpers.h>
40 #include <mm/core_memprot.h>
41 #include <mm/tee_mm.h>
42 #include <mm/tee_pager.h>
43 #include <stdlib.h>
44 #include <sys/queue.h>
45 #include <tee_api_defines.h>
46 #include <tee/tee_cryp_provider.h>
47 #include <trace.h>
48 #include <types_ext.h>
49 #include <utee_defines.h>
50 #include <util.h>
51 
52 #include "pager_private.h"
53 
54 #define PAGER_AE_KEY_BITS	256
55 
56 struct pager_rw_pstate {
57 	uint64_t iv;
58 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
59 };
60 
61 enum area_type {
62 	AREA_TYPE_RO,
63 	AREA_TYPE_RW,
64 	AREA_TYPE_LOCK,
65 };
66 
67 struct tee_pager_area {
68 	union {
69 		const uint8_t *hashes;
70 		struct pager_rw_pstate *rwp;
71 	} u;
72 	uint8_t *store;
73 	enum area_type type;
74 	uint32_t flags;
75 	vaddr_t base;
76 	size_t size;
77 	struct pgt *pgt;
78 	TAILQ_ENTRY(tee_pager_area) link;
79 };
80 
81 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
82 
83 static struct tee_pager_area_head tee_pager_area_head =
84 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
85 
86 #define INVALID_PGIDX	UINT_MAX
87 
88 /*
89  * struct tee_pager_pmem - Represents a physical page used for paging.
90  *
91  * @pgidx	an index of the entry in area->ti.
92  * @va_alias	Virtual address where the physical page always is aliased.
93  *		Used during remapping of the page when the content need to
94  *		be updated before it's available at the new location.
95  * @area	a pointer to the pager area
96  */
97 struct tee_pager_pmem {
98 	unsigned pgidx;
99 	void *va_alias;
100 	struct tee_pager_area *area;
101 	TAILQ_ENTRY(tee_pager_pmem) link;
102 };
103 
104 /* The list of physical pages. The first page in the list is the oldest */
105 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
106 
107 static struct tee_pager_pmem_head tee_pager_pmem_head =
108 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
109 
110 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
111 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
112 
113 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
114 
115 /* number of pages hidden */
116 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
117 
118 /* Number of registered physical pages, used hiding pages. */
119 static size_t tee_pager_npages;
120 
121 #ifdef CFG_WITH_STATS
122 static struct tee_pager_stats pager_stats;
123 
124 static inline void incr_ro_hits(void)
125 {
126 	pager_stats.ro_hits++;
127 }
128 
129 static inline void incr_rw_hits(void)
130 {
131 	pager_stats.rw_hits++;
132 }
133 
134 static inline void incr_hidden_hits(void)
135 {
136 	pager_stats.hidden_hits++;
137 }
138 
139 static inline void incr_zi_released(void)
140 {
141 	pager_stats.zi_released++;
142 }
143 
144 static inline void incr_npages_all(void)
145 {
146 	pager_stats.npages_all++;
147 }
148 
149 static inline void set_npages(void)
150 {
151 	pager_stats.npages = tee_pager_npages;
152 }
153 
154 void tee_pager_get_stats(struct tee_pager_stats *stats)
155 {
156 	*stats = pager_stats;
157 
158 	pager_stats.hidden_hits = 0;
159 	pager_stats.ro_hits = 0;
160 	pager_stats.rw_hits = 0;
161 	pager_stats.zi_released = 0;
162 }
163 
164 #else /* CFG_WITH_STATS */
165 static inline void incr_ro_hits(void) { }
166 static inline void incr_rw_hits(void) { }
167 static inline void incr_hidden_hits(void) { }
168 static inline void incr_zi_released(void) { }
169 static inline void incr_npages_all(void) { }
170 static inline void set_npages(void) { }
171 
172 void tee_pager_get_stats(struct tee_pager_stats *stats)
173 {
174 	memset(stats, 0, sizeof(struct tee_pager_stats));
175 }
176 #endif /* CFG_WITH_STATS */
177 
178 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
179 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
180 #define TBL_SHIFT	SMALL_PAGE_SHIFT
181 
182 #define EFFECTIVE_VA_SIZE \
183 	(ROUNDUP(TEE_RAM_VA_START + CFG_TEE_RAM_VA_SIZE, \
184 		 CORE_MMU_PGDIR_SIZE) - \
185 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
186 
187 static struct pager_table {
188 	struct pgt pgt;
189 	struct core_mmu_table_info tbl_info;
190 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
191 
192 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
193 
194 /* Defines the range of the alias area */
195 static tee_mm_entry_t *pager_alias_area;
196 /*
197  * Physical pages are added in a stack like fashion to the alias area,
198  * @pager_alias_next_free gives the address of next free entry if
199  * @pager_alias_next_free is != 0
200  */
201 static uintptr_t pager_alias_next_free;
202 
203 #ifdef CFG_TEE_CORE_DEBUG
204 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
205 
206 static uint32_t pager_lock_dldetect(const char *func, const int line,
207 				    struct abort_info *ai)
208 {
209 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
210 	unsigned int retries = 0;
211 	unsigned int reminder = 0;
212 
213 	while (!cpu_spin_trylock(&pager_spinlock)) {
214 		retries++;
215 		if (!retries) {
216 			/* wrapped, time to report */
217 			trace_printf(func, line, TRACE_ERROR, true,
218 				     "possible spinlock deadlock reminder %u",
219 				     reminder);
220 			if (reminder < UINT_MAX)
221 				reminder++;
222 			if (ai)
223 				abort_print(ai);
224 		}
225 	}
226 
227 	return exceptions;
228 }
229 #else
230 static uint32_t pager_lock(struct abort_info __unused *ai)
231 {
232 	return cpu_spin_lock_xsave(&pager_spinlock);
233 }
234 #endif
235 
236 static uint32_t pager_lock_check_stack(size_t stack_size)
237 {
238 	if (stack_size) {
239 		int8_t buf[stack_size];
240 		size_t n;
241 
242 		/*
243 		 * Make sure to touch all pages of the stack that we expect
244 		 * to use with this lock held. We need to take eventual
245 		 * page faults before the lock is taken or we'll deadlock
246 		 * the pager. The pages that are populated in this way will
247 		 * eventually be released at certain save transitions of
248 		 * the thread.
249 		 */
250 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
251 			write8(1, (vaddr_t)buf + n);
252 		write8(1, (vaddr_t)buf + stack_size - 1);
253 	}
254 
255 	return pager_lock(NULL);
256 }
257 
258 static void pager_unlock(uint32_t exceptions)
259 {
260 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
261 }
262 
263 void *tee_pager_phys_to_virt(paddr_t pa)
264 {
265 	struct core_mmu_table_info ti;
266 	unsigned idx;
267 	uint32_t a;
268 	paddr_t p;
269 	vaddr_t v;
270 	size_t n;
271 
272 	/*
273 	 * Most addresses are mapped lineary, try that first if possible.
274 	 */
275 	if (!tee_pager_get_table_info(pa, &ti))
276 		return NULL; /* impossible pa */
277 	idx = core_mmu_va2idx(&ti, pa);
278 	core_mmu_get_entry(&ti, idx, &p, &a);
279 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
280 		return (void *)core_mmu_idx2va(&ti, idx);
281 
282 	n = 0;
283 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
284 	while (true) {
285 		while (idx < TBL_NUM_ENTRIES) {
286 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
287 			if (v >= (TEE_RAM_VA_START + CFG_TEE_RAM_VA_SIZE))
288 				return NULL;
289 
290 			core_mmu_get_entry(&pager_tables[n].tbl_info,
291 					   idx, &p, &a);
292 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
293 				return (void *)v;
294 			idx++;
295 		}
296 
297 		n++;
298 		if (n >= ARRAY_SIZE(pager_tables))
299 			return NULL;
300 		idx = 0;
301 	}
302 
303 	return NULL;
304 }
305 
306 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
307 {
308 	size_t n;
309 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
310 
311 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
312 	    CORE_MMU_PGDIR_SHIFT;
313 	if (n >= ARRAY_SIZE(pager_tables))
314 		return NULL;
315 
316 	assert(va >= pager_tables[n].tbl_info.va_base &&
317 	       va <= (pager_tables[n].tbl_info.va_base | mask));
318 
319 	return pager_tables + n;
320 }
321 
322 static struct pager_table *find_pager_table(vaddr_t va)
323 {
324 	struct pager_table *pt = find_pager_table_may_fail(va);
325 
326 	assert(pt);
327 	return pt;
328 }
329 
330 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
331 {
332 	struct pager_table *pt = find_pager_table_may_fail(va);
333 
334 	if (!pt)
335 		return false;
336 
337 	*ti = pt->tbl_info;
338 	return true;
339 }
340 
341 static struct core_mmu_table_info *find_table_info(vaddr_t va)
342 {
343 	return &find_pager_table(va)->tbl_info;
344 }
345 
346 static struct pgt *find_core_pgt(vaddr_t va)
347 {
348 	return &find_pager_table(va)->pgt;
349 }
350 
351 static void set_alias_area(tee_mm_entry_t *mm)
352 {
353 	struct pager_table *pt;
354 	unsigned idx;
355 	vaddr_t smem = tee_mm_get_smem(mm);
356 	size_t nbytes = tee_mm_get_bytes(mm);
357 	vaddr_t v;
358 
359 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
360 
361 	assert(!pager_alias_area);
362 	pager_alias_area = mm;
363 	pager_alias_next_free = smem;
364 
365 	/* Clear all mapping in the alias area */
366 	pt = find_pager_table(smem);
367 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
368 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
369 		while (idx < TBL_NUM_ENTRIES) {
370 			v = core_mmu_idx2va(&pt->tbl_info, idx);
371 			if (v > (smem + nbytes))
372 				goto out;
373 
374 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
375 			idx++;
376 		}
377 
378 		pt++;
379 		idx = 0;
380 	}
381 
382 out:
383 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
384 }
385 
386 static void generate_ae_key(void)
387 {
388 	if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
389 		panic("failed to generate random");
390 }
391 
392 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
393 {
394 	size_t n;
395 	paddr_t pa;
396 	size_t usage = 0;
397 
398 	for (n = 0; n < ti->num_entries; n++) {
399 		core_mmu_get_entry(ti, n, &pa, NULL);
400 		if (pa)
401 			usage++;
402 	}
403 	return usage;
404 }
405 
406 static void area_get_entry(struct tee_pager_area *area, size_t idx,
407 			   paddr_t *pa, uint32_t *attr)
408 {
409 	assert(area->pgt);
410 	assert(idx < TBL_NUM_ENTRIES);
411 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
412 }
413 
414 static void area_set_entry(struct tee_pager_area *area, size_t idx,
415 			   paddr_t pa, uint32_t attr)
416 {
417 	assert(area->pgt);
418 	assert(idx < TBL_NUM_ENTRIES);
419 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
420 }
421 
422 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
423 {
424 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
425 }
426 
427 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
428 {
429 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
430 }
431 
432 void tee_pager_early_init(void)
433 {
434 	size_t n;
435 
436 	/*
437 	 * Note that this depends on add_pager_vaspace() adding vaspace
438 	 * after end of memory.
439 	 */
440 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
441 		if (!core_mmu_find_table(TEE_RAM_VA_START +
442 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
443 					 &pager_tables[n].tbl_info))
444 			panic("can't find mmu tables");
445 
446 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
447 			panic("Unsupported page size in translation table");
448 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
449 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
450 
451 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
452 		pgt_set_used_entries(&pager_tables[n].pgt,
453 				tbl_usage_count(&pager_tables[n].tbl_info));
454 	}
455 }
456 
457 void tee_pager_init(tee_mm_entry_t *mm_alias)
458 {
459 	set_alias_area(mm_alias);
460 	generate_ae_key();
461 }
462 
463 static void *pager_add_alias_page(paddr_t pa)
464 {
465 	unsigned idx;
466 	struct core_mmu_table_info *ti;
467 	/* Alias pages mapped without write permission: runtime will care */
468 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
469 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
470 			TEE_MATTR_SECURE | TEE_MATTR_PR;
471 
472 	DMSG("0x%" PRIxPA, pa);
473 
474 	ti = find_table_info(pager_alias_next_free);
475 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
476 	core_mmu_set_entry(ti, idx, pa, attr);
477 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
478 	pager_alias_next_free += SMALL_PAGE_SIZE;
479 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
480 				      tee_mm_get_bytes(pager_alias_area)))
481 		pager_alias_next_free = 0;
482 	return (void *)core_mmu_idx2va(ti, idx);
483 }
484 
485 static struct tee_pager_area *alloc_area(struct pgt *pgt,
486 					 vaddr_t base, size_t size,
487 					 uint32_t flags, const void *store,
488 					 const void *hashes)
489 {
490 	struct tee_pager_area *area = calloc(1, sizeof(*area));
491 	enum area_type at;
492 	tee_mm_entry_t *mm_store = NULL;
493 
494 	if (!area)
495 		return NULL;
496 
497 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
498 		if (flags & TEE_MATTR_LOCKED) {
499 			at = AREA_TYPE_LOCK;
500 			goto out;
501 		}
502 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
503 		if (!mm_store)
504 			goto bad;
505 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
506 					   MEM_AREA_TA_RAM);
507 		if (!area->store)
508 			goto bad;
509 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
510 				     sizeof(struct pager_rw_pstate));
511 		if (!area->u.rwp)
512 			goto bad;
513 		at = AREA_TYPE_RW;
514 	} else {
515 		area->store = (void *)store;
516 		area->u.hashes = hashes;
517 		at = AREA_TYPE_RO;
518 	}
519 out:
520 	area->pgt = pgt;
521 	area->base = base;
522 	area->size = size;
523 	area->flags = flags;
524 	area->type = at;
525 	return area;
526 bad:
527 	tee_mm_free(mm_store);
528 	free(area->u.rwp);
529 	free(area);
530 	return NULL;
531 }
532 
533 static void area_insert_tail(struct tee_pager_area *area)
534 {
535 	uint32_t exceptions = pager_lock_check_stack(8);
536 
537 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
538 
539 	pager_unlock(exceptions);
540 }
541 KEEP_PAGER(area_insert_tail);
542 
543 void tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
544 			     const void *store, const void *hashes)
545 {
546 	struct tee_pager_area *area;
547 	vaddr_t b = base;
548 	size_t s = size;
549 	size_t s2;
550 
551 
552 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
553 		base, base + size, flags, store, hashes);
554 
555 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
556 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
557 		panic();
558 	}
559 
560 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
561 		panic("write pages cannot provide store or hashes");
562 
563 	if ((flags & TEE_MATTR_PW) && (store || hashes))
564 		panic("non-write pages must provide store and hashes");
565 
566 	while (s) {
567 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
568 		area = alloc_area(find_core_pgt(b), b, s2, flags,
569 				  (const uint8_t *)store + b - base,
570 				  (const uint8_t *)hashes + (b - base) /
571 							SMALL_PAGE_SIZE *
572 							TEE_SHA256_HASH_SIZE);
573 		if (!area)
574 			panic("alloc_area");
575 		area_insert_tail(area);
576 		b += s2;
577 		s -= s2;
578 	}
579 }
580 
581 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
582 					vaddr_t va)
583 {
584 	struct tee_pager_area *area;
585 
586 	if (!areas)
587 		return NULL;
588 
589 	TAILQ_FOREACH(area, areas, link) {
590 		if (core_is_buffer_inside(va, 1, area->base, area->size))
591 			return area;
592 	}
593 	return NULL;
594 }
595 
596 #ifdef CFG_PAGED_USER_TA
597 static struct tee_pager_area *find_uta_area(vaddr_t va)
598 {
599 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
600 
601 	if (!ctx || !is_user_ta_ctx(ctx))
602 		return NULL;
603 	return find_area(to_user_ta_ctx(ctx)->areas, va);
604 }
605 #else
606 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
607 {
608 	return NULL;
609 }
610 #endif /*CFG_PAGED_USER_TA*/
611 
612 
613 static uint32_t get_area_mattr(uint32_t area_flags)
614 {
615 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
616 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
617 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
618 
619 	if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
620 		attr |= TEE_MATTR_GLOBAL;
621 
622 	return attr;
623 }
624 
625 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
626 {
627 	struct core_mmu_table_info *ti;
628 	paddr_t pa;
629 	unsigned idx;
630 
631 	ti = find_table_info((vaddr_t)pmem->va_alias);
632 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
633 	core_mmu_get_entry(ti, idx, &pa, NULL);
634 	return pa;
635 }
636 
637 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
638 			void *dst)
639 {
640 	struct pager_aes_gcm_iv iv = {
641 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
642 	};
643 
644 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
645 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
646 }
647 
648 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
649 {
650 	struct pager_aes_gcm_iv iv;
651 
652 	assert((rwp->iv + 1) > rwp->iv);
653 	rwp->iv++;
654 	/*
655 	 * IV is constructed as recommended in section "8.2.1 Deterministic
656 	 * Construction" of "Recommendation for Block Cipher Modes of
657 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
658 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
659 	 */
660 	iv.iv[0] = (vaddr_t)rwp;
661 	iv.iv[1] = rwp->iv >> 32;
662 	iv.iv[2] = rwp->iv;
663 
664 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
665 				   &iv, rwp->tag,
666 				   src, dst, SMALL_PAGE_SIZE))
667 		panic("gcm failed");
668 }
669 
670 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
671 			void *va_alias)
672 {
673 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
674 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
675 	struct core_mmu_table_info *ti;
676 	uint32_t attr_alias;
677 	paddr_t pa_alias;
678 	unsigned int idx_alias;
679 
680 	/* Insure we are allowed to write to aliased virtual page */
681 	ti = find_table_info((vaddr_t)va_alias);
682 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
683 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
684 	if (!(attr_alias & TEE_MATTR_PW)) {
685 		attr_alias |= TEE_MATTR_PW;
686 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
687 		tlbi_mva_allasid((vaddr_t)va_alias);
688 	}
689 
690 	switch (area->type) {
691 	case AREA_TYPE_RO:
692 		{
693 			const void *hash = area->u.hashes +
694 					   idx * TEE_SHA256_HASH_SIZE;
695 
696 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
697 			incr_ro_hits();
698 
699 			if (hash_sha256_check(hash, va_alias,
700 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
701 				EMSG("PH 0x%" PRIxVA " failed", page_va);
702 				panic();
703 			}
704 		}
705 		/* Forbid write to aliases for read-only (maybe exec) pages */
706 		attr_alias &= ~TEE_MATTR_PW;
707 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
708 		tlbi_mva_allasid((vaddr_t)va_alias);
709 		break;
710 	case AREA_TYPE_RW:
711 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
712 			va_alias, page_va, area->u.rwp[idx].iv);
713 		if (!area->u.rwp[idx].iv)
714 			memset(va_alias, 0, SMALL_PAGE_SIZE);
715 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
716 				       va_alias)) {
717 			EMSG("PH 0x%" PRIxVA " failed", page_va);
718 			panic();
719 		}
720 		incr_rw_hits();
721 		break;
722 	case AREA_TYPE_LOCK:
723 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
724 		memset(va_alias, 0, SMALL_PAGE_SIZE);
725 		break;
726 	default:
727 		panic();
728 	}
729 }
730 
731 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
732 {
733 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
734 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
735 
736 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
737 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
738 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
739 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
740 
741 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
742 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
743 			     stored_page);
744 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
745 			pmem->area->base + idx * SMALL_PAGE_SIZE,
746 			pmem->area->u.rwp[idx].iv);
747 	}
748 }
749 
750 #ifdef CFG_PAGED_USER_TA
751 static void free_area(struct tee_pager_area *area)
752 {
753 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
754 				virt_to_phys(area->store)));
755 	if (area->type == AREA_TYPE_RW)
756 		free(area->u.rwp);
757 	free(area);
758 }
759 
760 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
761 			       size_t size)
762 {
763 	struct tee_pager_area *area;
764 	uint32_t flags;
765 	vaddr_t b = base;
766 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
767 
768 	if (!utc->areas) {
769 		utc->areas = malloc(sizeof(*utc->areas));
770 		if (!utc->areas)
771 			return false;
772 		TAILQ_INIT(utc->areas);
773 	}
774 
775 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
776 
777 	while (s) {
778 		size_t s2;
779 
780 		if (find_area(utc->areas, b))
781 			return false;
782 
783 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
784 
785 		/* Table info will be set when the context is activated. */
786 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
787 		if (!area)
788 			return false;
789 		TAILQ_INSERT_TAIL(utc->areas, area, link);
790 		b += s2;
791 		s -= s2;
792 	}
793 
794 	return true;
795 }
796 
797 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
798 {
799 	struct thread_specific_data *tsd = thread_get_tsd();
800 	struct tee_pager_area *area;
801 	struct core_mmu_table_info dir_info = { NULL };
802 
803 	if (&utc->ctx != tsd->ctx) {
804 		/*
805 		 * Changes are to an utc that isn't active. Just add the
806 		 * areas page tables will be dealt with later.
807 		 */
808 		return pager_add_uta_area(utc, base, size);
809 	}
810 
811 	/*
812 	 * Assign page tables before adding areas to be able to tell which
813 	 * are newly added and should be removed in case of failure.
814 	 */
815 	tee_pager_assign_uta_tables(utc);
816 	if (!pager_add_uta_area(utc, base, size)) {
817 		struct tee_pager_area *next_a;
818 
819 		/* Remove all added areas */
820 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
821 			if (!area->pgt) {
822 				TAILQ_REMOVE(utc->areas, area, link);
823 				free_area(area);
824 			}
825 		}
826 		return false;
827 	}
828 
829 	/*
830 	 * Assign page tables to the new areas and make sure that the page
831 	 * tables are registered in the upper table.
832 	 */
833 	tee_pager_assign_uta_tables(utc);
834 	core_mmu_get_user_pgdir(&dir_info);
835 	TAILQ_FOREACH(area, utc->areas, link) {
836 		paddr_t pa;
837 		size_t idx;
838 		uint32_t attr;
839 
840 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
841 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
842 
843 		/*
844 		 * Check if the page table already is used, if it is, it's
845 		 * already registered.
846 		 */
847 		if (area->pgt->num_used_entries) {
848 			assert(attr & TEE_MATTR_TABLE);
849 			assert(pa == virt_to_phys(area->pgt->tbl));
850 			continue;
851 		}
852 
853 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
854 		pa = virt_to_phys(area->pgt->tbl);
855 		assert(pa);
856 		/*
857 		 * Note that the update of the table entry is guaranteed to
858 		 * be atomic.
859 		 */
860 		core_mmu_set_entry(&dir_info, idx, pa, attr);
861 	}
862 
863 	return true;
864 }
865 
866 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
867 				   struct pgt *pgt)
868 {
869 	assert(pgt);
870 	ti->table = pgt->tbl;
871 	ti->va_base = pgt->vabase;
872 	ti->level = TBL_LEVEL;
873 	ti->shift = TBL_SHIFT;
874 	ti->num_entries = TBL_NUM_ENTRIES;
875 }
876 
877 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
878 			   vaddr_t new_base)
879 {
880 	uint32_t exceptions = pager_lock_check_stack(64);
881 
882 	/*
883 	 * If there's no pgt assigned to the old area there's no pages to
884 	 * deal with either, just update with a new pgt and base.
885 	 */
886 	if (area->pgt) {
887 		struct core_mmu_table_info old_ti;
888 		struct core_mmu_table_info new_ti;
889 		struct tee_pager_pmem *pmem;
890 
891 		init_tbl_info_from_pgt(&old_ti, area->pgt);
892 		init_tbl_info_from_pgt(&new_ti, new_pgt);
893 
894 
895 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
896 			vaddr_t va;
897 			paddr_t pa;
898 			uint32_t attr;
899 
900 			if (pmem->area != area)
901 				continue;
902 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
903 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
904 
905 			assert(pa == get_pmem_pa(pmem));
906 			assert(attr);
907 			assert(area->pgt->num_used_entries);
908 			area->pgt->num_used_entries--;
909 
910 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
911 			va = va - area->base + new_base;
912 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
913 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
914 			new_pgt->num_used_entries++;
915 		}
916 	}
917 
918 	area->pgt = new_pgt;
919 	area->base = new_base;
920 	pager_unlock(exceptions);
921 }
922 KEEP_PAGER(transpose_area);
923 
924 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
925 				   vaddr_t src_base,
926 				   struct user_ta_ctx *dst_utc,
927 				   vaddr_t dst_base, struct pgt **dst_pgt,
928 				   size_t size)
929 {
930 	struct tee_pager_area *area;
931 	struct tee_pager_area *next_a;
932 
933 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
934 		vaddr_t new_area_base;
935 		size_t new_idx;
936 
937 		if (!core_is_buffer_inside(area->base, area->size,
938 					  src_base, size))
939 			continue;
940 
941 		TAILQ_REMOVE(src_utc->areas, area, link);
942 
943 		new_area_base = dst_base + (src_base - area->base);
944 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
945 			  CORE_MMU_PGDIR_SIZE;
946 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
947 		       dst_pgt[new_idx]->vabase);
948 		transpose_area(area, dst_pgt[new_idx], new_area_base);
949 
950 		/*
951 		 * Assert that this will not cause any conflicts in the new
952 		 * utc.  This should already be guaranteed, but a bug here
953 		 * could be tricky to find.
954 		 */
955 		assert(!find_area(dst_utc->areas, area->base));
956 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
957 	}
958 }
959 
960 static void rem_area(struct tee_pager_area_head *area_head,
961 		     struct tee_pager_area *area)
962 {
963 	struct tee_pager_pmem *pmem;
964 	uint32_t exceptions;
965 
966 	exceptions = pager_lock_check_stack(64);
967 
968 	TAILQ_REMOVE(area_head, area, link);
969 
970 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
971 		if (pmem->area == area) {
972 			area_set_entry(area, pmem->pgidx, 0, 0);
973 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
974 			pgt_dec_used_entries(area->pgt);
975 			pmem->area = NULL;
976 			pmem->pgidx = INVALID_PGIDX;
977 		}
978 	}
979 
980 	pager_unlock(exceptions);
981 	free_area(area);
982 }
983 KEEP_PAGER(rem_area);
984 
985 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
986 			      size_t size)
987 {
988 	struct tee_pager_area *area;
989 	struct tee_pager_area *next_a;
990 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
991 
992 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
993 		if (core_is_buffer_inside(area->base, area->size, base, s))
994 			rem_area(utc->areas, area);
995 	}
996 }
997 
998 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
999 {
1000 	struct tee_pager_area *area;
1001 
1002 	if (!utc->areas)
1003 		return;
1004 
1005 	while (true) {
1006 		area = TAILQ_FIRST(utc->areas);
1007 		if (!area)
1008 			break;
1009 		TAILQ_REMOVE(utc->areas, area, link);
1010 		free_area(area);
1011 	}
1012 
1013 	free(utc->areas);
1014 }
1015 
1016 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
1017 				 size_t size, uint32_t flags)
1018 {
1019 	bool ret;
1020 	vaddr_t b = base;
1021 	size_t s = size;
1022 	size_t s2;
1023 	struct tee_pager_area *area = find_area(utc->areas, b);
1024 	uint32_t exceptions;
1025 	struct tee_pager_pmem *pmem;
1026 	paddr_t pa;
1027 	uint32_t a;
1028 	uint32_t f;
1029 
1030 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1031 	if (f & TEE_MATTR_UW)
1032 		f |= TEE_MATTR_PW;
1033 	f = get_area_mattr(f);
1034 
1035 	exceptions = pager_lock_check_stack(64);
1036 
1037 	while (s) {
1038 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1039 		if (!area || area->base != b || area->size != s2) {
1040 			ret = false;
1041 			goto out;
1042 		}
1043 		b += s2;
1044 		s -= s2;
1045 
1046 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1047 			if (pmem->area != area)
1048 				continue;
1049 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
1050 			if (a & TEE_MATTR_VALID_BLOCK)
1051 				assert(pa == get_pmem_pa(pmem));
1052 			else
1053 				pa = get_pmem_pa(pmem);
1054 			if (a == f)
1055 				continue;
1056 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1057 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1058 			if (!(flags & TEE_MATTR_UW))
1059 				tee_pager_save_page(pmem, a);
1060 
1061 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
1062 			/*
1063 			 * Make sure the table update is visible before
1064 			 * continuing.
1065 			 */
1066 			dsb_ishst();
1067 
1068 			if (flags & TEE_MATTR_UX) {
1069 				void *va = (void *)area_idx2va(pmem->area,
1070 							       pmem->pgidx);
1071 
1072 				cache_op_inner(DCACHE_AREA_CLEAN, va,
1073 						SMALL_PAGE_SIZE);
1074 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
1075 						SMALL_PAGE_SIZE);
1076 			}
1077 		}
1078 
1079 		area->flags = f;
1080 		area = TAILQ_NEXT(area, link);
1081 	}
1082 
1083 	ret = true;
1084 out:
1085 	pager_unlock(exceptions);
1086 	return ret;
1087 }
1088 KEEP_PAGER(tee_pager_set_uta_area_attr);
1089 #endif /*CFG_PAGED_USER_TA*/
1090 
1091 static bool tee_pager_unhide_page(vaddr_t page_va)
1092 {
1093 	struct tee_pager_pmem *pmem;
1094 
1095 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1096 		paddr_t pa;
1097 		uint32_t attr;
1098 
1099 		if (pmem->pgidx == INVALID_PGIDX)
1100 			continue;
1101 
1102 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1103 
1104 		if (!(attr &
1105 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
1106 			continue;
1107 
1108 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
1109 			uint32_t a = get_area_mattr(pmem->area->flags);
1110 
1111 			/* page is hidden, show and move to back */
1112 			if (pa != get_pmem_pa(pmem))
1113 				panic("unexpected pa");
1114 
1115 			/*
1116 			 * If it's not a dirty block, then it should be
1117 			 * read only.
1118 			 */
1119 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
1120 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1121 			else
1122 				FMSG("Unhide %#" PRIxVA, page_va);
1123 
1124 			if (page_va == 0x8000a000)
1125 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
1126 					page_va, a);
1127 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
1128 			/*
1129 			 * Note that TLB invalidation isn't needed since
1130 			 * there wasn't a valid mapping before. We should
1131 			 * use a barrier though, to make sure that the
1132 			 * change is visible.
1133 			 */
1134 			dsb_ishst();
1135 
1136 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1137 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1138 			incr_hidden_hits();
1139 			return true;
1140 		}
1141 	}
1142 
1143 	return false;
1144 }
1145 
1146 static void tee_pager_hide_pages(void)
1147 {
1148 	struct tee_pager_pmem *pmem;
1149 	size_t n = 0;
1150 
1151 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1152 		paddr_t pa;
1153 		uint32_t attr;
1154 		uint32_t a;
1155 
1156 		if (n >= TEE_PAGER_NHIDE)
1157 			break;
1158 		n++;
1159 
1160 		/* we cannot hide pages when pmem->area is not defined. */
1161 		if (!pmem->area)
1162 			continue;
1163 
1164 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1165 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1166 			continue;
1167 
1168 		assert(pa == get_pmem_pa(pmem));
1169 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1170 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1171 			FMSG("Hide %#" PRIxVA,
1172 			     area_idx2va(pmem->area, pmem->pgidx));
1173 		} else
1174 			a = TEE_MATTR_HIDDEN_BLOCK;
1175 
1176 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1177 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1178 	}
1179 }
1180 
1181 /*
1182  * Find mapped pmem, hide and move to pageble pmem.
1183  * Return false if page was not mapped, and true if page was mapped.
1184  */
1185 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1186 				       vaddr_t page_va)
1187 {
1188 	struct tee_pager_pmem *pmem;
1189 	unsigned pgidx;
1190 	paddr_t pa;
1191 	uint32_t attr;
1192 
1193 	pgidx = area_va2idx(area, page_va);
1194 	area_get_entry(area, pgidx, &pa, &attr);
1195 
1196 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1197 
1198 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1199 		if (pmem->area != area || pmem->pgidx != pgidx)
1200 			continue;
1201 
1202 		assert(pa == get_pmem_pa(pmem));
1203 		area_set_entry(area, pgidx, 0, 0);
1204 		pgt_dec_used_entries(area->pgt);
1205 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1206 		pmem->area = NULL;
1207 		pmem->pgidx = INVALID_PGIDX;
1208 		tee_pager_npages++;
1209 		set_npages();
1210 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1211 		incr_zi_released();
1212 		return true;
1213 	}
1214 
1215 	return false;
1216 }
1217 
1218 /* Finds the oldest page and unmats it from its old virtual address */
1219 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1220 {
1221 	struct tee_pager_pmem *pmem;
1222 
1223 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1224 	if (!pmem) {
1225 		EMSG("No pmem entries");
1226 		return NULL;
1227 	}
1228 	if (pmem->pgidx != INVALID_PGIDX) {
1229 		uint32_t a;
1230 
1231 		assert(pmem->area && pmem->area->pgt);
1232 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1233 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1234 		pgt_dec_used_entries(pmem->area->pgt);
1235 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1236 		tee_pager_save_page(pmem, a);
1237 	}
1238 
1239 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1240 	pmem->pgidx = INVALID_PGIDX;
1241 	pmem->area = NULL;
1242 	if (area->type == AREA_TYPE_LOCK) {
1243 		/* Move page to lock list */
1244 		if (tee_pager_npages <= 0)
1245 			panic("running out of page");
1246 		tee_pager_npages--;
1247 		set_npages();
1248 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1249 	} else {
1250 		/* move page to back */
1251 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1252 	}
1253 
1254 	return pmem;
1255 }
1256 
1257 static bool pager_update_permissions(struct tee_pager_area *area,
1258 			struct abort_info *ai, bool *handled)
1259 {
1260 	unsigned int pgidx = area_va2idx(area, ai->va);
1261 	uint32_t attr;
1262 	paddr_t pa;
1263 
1264 	*handled = false;
1265 
1266 	area_get_entry(area, pgidx, &pa, &attr);
1267 
1268 	/* Not mapped */
1269 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1270 		return false;
1271 
1272 	/* Not readable, should not happen */
1273 	if (abort_is_user_exception(ai)) {
1274 		if (!(attr & TEE_MATTR_UR))
1275 			return true;
1276 	} else {
1277 		if (!(attr & TEE_MATTR_PR)) {
1278 			abort_print_error(ai);
1279 			panic();
1280 		}
1281 	}
1282 
1283 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1284 	case CORE_MMU_FAULT_TRANSLATION:
1285 	case CORE_MMU_FAULT_READ_PERMISSION:
1286 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1287 			/* Check attempting to execute from an NOX page */
1288 			if (abort_is_user_exception(ai)) {
1289 				if (!(attr & TEE_MATTR_UX))
1290 					return true;
1291 			} else {
1292 				if (!(attr & TEE_MATTR_PX)) {
1293 					abort_print_error(ai);
1294 					panic();
1295 				}
1296 			}
1297 		}
1298 		/* Since the page is mapped now it's OK */
1299 		break;
1300 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1301 		/* Check attempting to write to an RO page */
1302 		if (abort_is_user_exception(ai)) {
1303 			if (!(area->flags & TEE_MATTR_UW))
1304 				return true;
1305 			if (!(attr & TEE_MATTR_UW)) {
1306 				FMSG("Dirty %p",
1307 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1308 				area_set_entry(area, pgidx, pa,
1309 					       get_area_mattr(area->flags));
1310 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1311 			}
1312 
1313 		} else {
1314 			if (!(area->flags & TEE_MATTR_PW)) {
1315 				abort_print_error(ai);
1316 				panic();
1317 			}
1318 			if (!(attr & TEE_MATTR_PW)) {
1319 				FMSG("Dirty %p",
1320 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1321 				area_set_entry(area, pgidx, pa,
1322 					       get_area_mattr(area->flags));
1323 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1324 			}
1325 		}
1326 		/* Since permissions has been updated now it's OK */
1327 		break;
1328 	default:
1329 		/* Some fault we can't deal with */
1330 		if (abort_is_user_exception(ai))
1331 			return true;
1332 		abort_print_error(ai);
1333 		panic();
1334 	}
1335 	*handled = true;
1336 	return true;
1337 }
1338 
1339 #ifdef CFG_TEE_CORE_DEBUG
1340 static void stat_handle_fault(void)
1341 {
1342 	static size_t num_faults;
1343 	static size_t min_npages = SIZE_MAX;
1344 	static size_t total_min_npages = SIZE_MAX;
1345 
1346 	num_faults++;
1347 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1348 		DMSG("nfaults %zu npages %zu (min %zu)",
1349 		     num_faults, tee_pager_npages, min_npages);
1350 		min_npages = tee_pager_npages; /* reset */
1351 	}
1352 	if (tee_pager_npages < min_npages)
1353 		min_npages = tee_pager_npages;
1354 	if (tee_pager_npages < total_min_npages)
1355 		total_min_npages = tee_pager_npages;
1356 }
1357 #else
1358 static void stat_handle_fault(void)
1359 {
1360 }
1361 #endif
1362 
1363 bool tee_pager_handle_fault(struct abort_info *ai)
1364 {
1365 	struct tee_pager_area *area;
1366 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1367 	uint32_t exceptions;
1368 	bool ret;
1369 
1370 #ifdef TEE_PAGER_DEBUG_PRINT
1371 	abort_print(ai);
1372 #endif
1373 
1374 	/*
1375 	 * We're updating pages that can affect several active CPUs at a
1376 	 * time below. We end up here because a thread tries to access some
1377 	 * memory that isn't available. We have to be careful when making
1378 	 * that memory available as other threads may succeed in accessing
1379 	 * that address the moment after we've made it available.
1380 	 *
1381 	 * That means that we can't just map the memory and populate the
1382 	 * page, instead we use the aliased mapping to populate the page
1383 	 * and once everything is ready we map it.
1384 	 */
1385 	exceptions = pager_lock(ai);
1386 
1387 	stat_handle_fault();
1388 
1389 	/* check if the access is valid */
1390 	if (abort_is_user_exception(ai)) {
1391 		area = find_uta_area(ai->va);
1392 
1393 	} else {
1394 		area = find_area(&tee_pager_area_head, ai->va);
1395 		if (!area)
1396 			area = find_uta_area(ai->va);
1397 	}
1398 	if (!area || !area->pgt) {
1399 		ret = false;
1400 		goto out;
1401 	}
1402 
1403 	if (!tee_pager_unhide_page(page_va)) {
1404 		struct tee_pager_pmem *pmem = NULL;
1405 		uint32_t attr;
1406 		paddr_t pa;
1407 
1408 		/*
1409 		 * The page wasn't hidden, but some other core may have
1410 		 * updated the table entry before we got here or we need
1411 		 * to make a read-only page read-write (dirty).
1412 		 */
1413 		if (pager_update_permissions(area, ai, &ret)) {
1414 			/*
1415 			 * Nothing more to do with the abort. The problem
1416 			 * could already have been dealt with from another
1417 			 * core or if ret is false the TA will be paniced.
1418 			 */
1419 			goto out;
1420 		}
1421 
1422 		pmem = tee_pager_get_page(area);
1423 		if (!pmem) {
1424 			abort_print(ai);
1425 			panic();
1426 		}
1427 
1428 		/* load page code & data */
1429 		tee_pager_load_page(area, page_va, pmem->va_alias);
1430 
1431 
1432 		pmem->area = area;
1433 		pmem->pgidx = area_va2idx(area, ai->va);
1434 		attr = get_area_mattr(area->flags) &
1435 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1436 		pa = get_pmem_pa(pmem);
1437 
1438 		/*
1439 		 * We've updated the page using the aliased mapping and
1440 		 * some cache maintenence is now needed if it's an
1441 		 * executable page.
1442 		 *
1443 		 * Since the d-cache is a Physically-indexed,
1444 		 * physically-tagged (PIPT) cache we can clean either the
1445 		 * aliased address or the real virtual address. In this
1446 		 * case we choose the real virtual address.
1447 		 *
1448 		 * The i-cache can also be PIPT, but may be something else
1449 		 * too like VIPT. The current code requires the caches to
1450 		 * implement the IVIPT extension, that is:
1451 		 * "instruction cache maintenance is required only after
1452 		 * writing new data to a physical address that holds an
1453 		 * instruction."
1454 		 *
1455 		 * To portably invalidate the icache the page has to
1456 		 * be mapped at the final virtual address but not
1457 		 * executable.
1458 		 */
1459 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1460 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1461 					TEE_MATTR_PW | TEE_MATTR_UW;
1462 
1463 			/* Set a temporary read-only mapping */
1464 			area_set_entry(pmem->area, pmem->pgidx, pa,
1465 				       attr & ~mask);
1466 			tlbi_mva_allasid(page_va);
1467 
1468 			/*
1469 			 * Doing these operations to LoUIS (Level of
1470 			 * unification, Inner Shareable) would be enough
1471 			 */
1472 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1473 				       SMALL_PAGE_SIZE);
1474 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1475 				       SMALL_PAGE_SIZE);
1476 
1477 			/* Set the final mapping */
1478 			area_set_entry(area, pmem->pgidx, pa, attr);
1479 			tlbi_mva_allasid(page_va);
1480 		} else {
1481 			area_set_entry(area, pmem->pgidx, pa, attr);
1482 			/*
1483 			 * No need to flush TLB for this entry, it was
1484 			 * invalid. We should use a barrier though, to make
1485 			 * sure that the change is visible.
1486 			 */
1487 			dsb_ishst();
1488 		}
1489 		pgt_inc_used_entries(area->pgt);
1490 
1491 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1492 
1493 	}
1494 
1495 	tee_pager_hide_pages();
1496 	ret = true;
1497 out:
1498 	pager_unlock(exceptions);
1499 	return ret;
1500 }
1501 
1502 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1503 {
1504 	size_t n;
1505 
1506 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1507 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1508 
1509 	/* setup memory */
1510 	for (n = 0; n < npages; n++) {
1511 		struct core_mmu_table_info *ti;
1512 		struct tee_pager_pmem *pmem;
1513 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1514 		unsigned int pgidx;
1515 		paddr_t pa;
1516 		uint32_t attr;
1517 
1518 		ti = find_table_info(va);
1519 		pgidx = core_mmu_va2idx(ti, va);
1520 		/*
1521 		 * Note that we can only support adding pages in the
1522 		 * valid range of this table info, currently not a problem.
1523 		 */
1524 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1525 
1526 		/* Ignore unmapped pages/blocks */
1527 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1528 			continue;
1529 
1530 		pmem = malloc(sizeof(struct tee_pager_pmem));
1531 		if (!pmem)
1532 			panic("out of mem");
1533 
1534 		pmem->va_alias = pager_add_alias_page(pa);
1535 
1536 		if (unmap) {
1537 			pmem->area = NULL;
1538 			pmem->pgidx = INVALID_PGIDX;
1539 			core_mmu_set_entry(ti, pgidx, 0, 0);
1540 			pgt_dec_used_entries(find_core_pgt(va));
1541 		} else {
1542 			/*
1543 			 * The page is still mapped, let's assign the area
1544 			 * and update the protection bits accordingly.
1545 			 */
1546 			pmem->area = find_area(&tee_pager_area_head, va);
1547 			assert(pmem->area->pgt == find_core_pgt(va));
1548 			pmem->pgidx = pgidx;
1549 			assert(pa == get_pmem_pa(pmem));
1550 			area_set_entry(pmem->area, pgidx, pa,
1551 				       get_area_mattr(pmem->area->flags));
1552 		}
1553 
1554 		tee_pager_npages++;
1555 		incr_npages_all();
1556 		set_npages();
1557 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1558 	}
1559 
1560 	/*
1561 	 * As this is done at inits, invalidate all TLBs once instead of
1562 	 * targeting only the modified entries.
1563 	 */
1564 	tlbi_all();
1565 }
1566 
1567 #ifdef CFG_PAGED_USER_TA
1568 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1569 {
1570 	struct pgt *p = pgt;
1571 
1572 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1573 		p = SLIST_NEXT(p, link);
1574 	return p;
1575 }
1576 
1577 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1578 {
1579 	struct tee_pager_area *area;
1580 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1581 
1582 	TAILQ_FOREACH(area, utc->areas, link) {
1583 		if (!area->pgt)
1584 			area->pgt = find_pgt(pgt, area->base);
1585 		else
1586 			assert(area->pgt == find_pgt(pgt, area->base));
1587 		if (!area->pgt)
1588 			panic();
1589 	}
1590 }
1591 
1592 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1593 {
1594 	uint32_t attr;
1595 
1596 	assert(pmem->area && pmem->area->pgt);
1597 
1598 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1599 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1600 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1601 	tee_pager_save_page(pmem, attr);
1602 	assert(pmem->area->pgt->num_used_entries);
1603 	pmem->area->pgt->num_used_entries--;
1604 	pmem->pgidx = INVALID_PGIDX;
1605 	pmem->area = NULL;
1606 }
1607 
1608 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1609 {
1610 	struct tee_pager_pmem *pmem;
1611 	struct tee_pager_area *area;
1612 	uint32_t exceptions = pager_lock_check_stack(2048);
1613 
1614 	if (!pgt->num_used_entries)
1615 		goto out;
1616 
1617 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1618 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1619 			continue;
1620 		if (pmem->area->pgt == pgt)
1621 			pager_save_and_release_entry(pmem);
1622 	}
1623 	assert(!pgt->num_used_entries);
1624 
1625 out:
1626 	if (is_user_ta_ctx(pgt->ctx)) {
1627 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1628 			if (area->pgt == pgt)
1629 				area->pgt = NULL;
1630 		}
1631 	}
1632 
1633 	pager_unlock(exceptions);
1634 }
1635 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1636 #endif /*CFG_PAGED_USER_TA*/
1637 
1638 void tee_pager_release_phys(void *addr, size_t size)
1639 {
1640 	bool unmaped = false;
1641 	vaddr_t va = (vaddr_t)addr;
1642 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1643 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1644 	struct tee_pager_area *area;
1645 	uint32_t exceptions;
1646 
1647 	if (end <= begin)
1648 		return;
1649 
1650 	exceptions = pager_lock_check_stack(128);
1651 
1652 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1653 		area = find_area(&tee_pager_area_head, va);
1654 		if (!area)
1655 			panic();
1656 		unmaped |= tee_pager_release_one_phys(area, va);
1657 	}
1658 
1659 	if (unmaped)
1660 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1661 
1662 	pager_unlock(exceptions);
1663 }
1664 KEEP_PAGER(tee_pager_release_phys);
1665 
1666 void *tee_pager_alloc(size_t size, uint32_t flags)
1667 {
1668 	tee_mm_entry_t *mm;
1669 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1670 
1671 	if (!size)
1672 		return NULL;
1673 
1674 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1675 	if (!mm)
1676 		return NULL;
1677 
1678 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
1679 				f, NULL, NULL);
1680 
1681 	return (void *)tee_mm_get_smem(mm);
1682 }
1683