xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision b97e9666f646ca681890b1f5c61b8d62f0160d34)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <assert.h>
31 #include <crypto/crypto.h>
32 #include <crypto/internal_aes-gcm.h>
33 #include <io.h>
34 #include <keep.h>
35 #include <kernel/abort.h>
36 #include <kernel/asan.h>
37 #include <kernel/panic.h>
38 #include <kernel/spinlock.h>
39 #include <kernel/tee_misc.h>
40 #include <kernel/tee_ta_manager.h>
41 #include <kernel/thread.h>
42 #include <kernel/tlb_helpers.h>
43 #include <mm/core_memprot.h>
44 #include <mm/tee_mm.h>
45 #include <mm/tee_pager.h>
46 #include <stdlib.h>
47 #include <sys/queue.h>
48 #include <tee_api_defines.h>
49 #include <trace.h>
50 #include <types_ext.h>
51 #include <utee_defines.h>
52 #include <util.h>
53 
54 #define PAGER_AE_KEY_BITS	256
55 
56 struct pager_aes_gcm_iv {
57 	uint32_t iv[3];
58 };
59 
60 #define PAGER_AES_GCM_TAG_LEN	16
61 
62 struct pager_rw_pstate {
63 	uint64_t iv;
64 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
65 };
66 
67 enum area_type {
68 	AREA_TYPE_RO,
69 	AREA_TYPE_RW,
70 	AREA_TYPE_LOCK,
71 };
72 
73 struct tee_pager_area {
74 	union {
75 		const uint8_t *hashes;
76 		struct pager_rw_pstate *rwp;
77 	} u;
78 	uint8_t *store;
79 	enum area_type type;
80 	uint32_t flags;
81 	vaddr_t base;
82 	size_t size;
83 	struct pgt *pgt;
84 	TAILQ_ENTRY(tee_pager_area) link;
85 };
86 
87 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
88 
89 static struct tee_pager_area_head tee_pager_area_head =
90 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
91 
92 #define INVALID_PGIDX	UINT_MAX
93 
94 /*
95  * struct tee_pager_pmem - Represents a physical page used for paging.
96  *
97  * @pgidx	an index of the entry in area->ti.
98  * @va_alias	Virtual address where the physical page always is aliased.
99  *		Used during remapping of the page when the content need to
100  *		be updated before it's available at the new location.
101  * @area	a pointer to the pager area
102  */
103 struct tee_pager_pmem {
104 	unsigned pgidx;
105 	void *va_alias;
106 	struct tee_pager_area *area;
107 	TAILQ_ENTRY(tee_pager_pmem) link;
108 };
109 
110 /* The list of physical pages. The first page in the list is the oldest */
111 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
112 
113 static struct tee_pager_pmem_head tee_pager_pmem_head =
114 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
115 
116 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
117 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
118 
119 static struct internal_aes_gcm_key pager_ae_key;
120 
121 /* number of pages hidden */
122 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
123 
124 /* Number of registered physical pages, used hiding pages. */
125 static size_t tee_pager_npages;
126 
127 #ifdef CFG_WITH_STATS
128 static struct tee_pager_stats pager_stats;
129 
130 static inline void incr_ro_hits(void)
131 {
132 	pager_stats.ro_hits++;
133 }
134 
135 static inline void incr_rw_hits(void)
136 {
137 	pager_stats.rw_hits++;
138 }
139 
140 static inline void incr_hidden_hits(void)
141 {
142 	pager_stats.hidden_hits++;
143 }
144 
145 static inline void incr_zi_released(void)
146 {
147 	pager_stats.zi_released++;
148 }
149 
150 static inline void incr_npages_all(void)
151 {
152 	pager_stats.npages_all++;
153 }
154 
155 static inline void set_npages(void)
156 {
157 	pager_stats.npages = tee_pager_npages;
158 }
159 
160 void tee_pager_get_stats(struct tee_pager_stats *stats)
161 {
162 	*stats = pager_stats;
163 
164 	pager_stats.hidden_hits = 0;
165 	pager_stats.ro_hits = 0;
166 	pager_stats.rw_hits = 0;
167 	pager_stats.zi_released = 0;
168 }
169 
170 #else /* CFG_WITH_STATS */
171 static inline void incr_ro_hits(void) { }
172 static inline void incr_rw_hits(void) { }
173 static inline void incr_hidden_hits(void) { }
174 static inline void incr_zi_released(void) { }
175 static inline void incr_npages_all(void) { }
176 static inline void set_npages(void) { }
177 
178 void tee_pager_get_stats(struct tee_pager_stats *stats)
179 {
180 	memset(stats, 0, sizeof(struct tee_pager_stats));
181 }
182 #endif /* CFG_WITH_STATS */
183 
184 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
185 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
186 #define TBL_SHIFT	SMALL_PAGE_SHIFT
187 
188 #define EFFECTIVE_VA_SIZE \
189 	(ROUNDUP(TEE_RAM_VA_START + CFG_TEE_RAM_VA_SIZE, \
190 		 CORE_MMU_PGDIR_SIZE) - \
191 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
192 
193 static struct pager_table {
194 	struct pgt pgt;
195 	struct core_mmu_table_info tbl_info;
196 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
197 
198 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
199 
200 /* Defines the range of the alias area */
201 static tee_mm_entry_t *pager_alias_area;
202 /*
203  * Physical pages are added in a stack like fashion to the alias area,
204  * @pager_alias_next_free gives the address of next free entry if
205  * @pager_alias_next_free is != 0
206  */
207 static uintptr_t pager_alias_next_free;
208 
209 #ifdef CFG_TEE_CORE_DEBUG
210 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
211 
212 static uint32_t pager_lock_dldetect(const char *func, const int line,
213 				    struct abort_info *ai)
214 {
215 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
216 	unsigned int retries = 0;
217 	unsigned int reminder = 0;
218 
219 	while (!cpu_spin_trylock(&pager_spinlock)) {
220 		retries++;
221 		if (!retries) {
222 			/* wrapped, time to report */
223 			trace_printf(func, line, TRACE_ERROR, true,
224 				     "possible spinlock deadlock reminder %u",
225 				     reminder);
226 			if (reminder < UINT_MAX)
227 				reminder++;
228 			if (ai)
229 				abort_print(ai);
230 		}
231 	}
232 
233 	return exceptions;
234 }
235 #else
236 static uint32_t pager_lock(struct abort_info __unused *ai)
237 {
238 	return cpu_spin_lock_xsave(&pager_spinlock);
239 }
240 #endif
241 
242 static uint32_t pager_lock_check_stack(size_t stack_size)
243 {
244 	if (stack_size) {
245 		int8_t buf[stack_size];
246 		size_t n;
247 
248 		/*
249 		 * Make sure to touch all pages of the stack that we expect
250 		 * to use with this lock held. We need to take eventual
251 		 * page faults before the lock is taken or we'll deadlock
252 		 * the pager. The pages that are populated in this way will
253 		 * eventually be released at certain save transitions of
254 		 * the thread.
255 		 */
256 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
257 			write8(1, (vaddr_t)buf + n);
258 		write8(1, (vaddr_t)buf + stack_size - 1);
259 	}
260 
261 	return pager_lock(NULL);
262 }
263 
264 static void pager_unlock(uint32_t exceptions)
265 {
266 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
267 }
268 
269 void *tee_pager_phys_to_virt(paddr_t pa)
270 {
271 	struct core_mmu_table_info ti;
272 	unsigned idx;
273 	uint32_t a;
274 	paddr_t p;
275 	vaddr_t v;
276 	size_t n;
277 
278 	/*
279 	 * Most addresses are mapped lineary, try that first if possible.
280 	 */
281 	if (!tee_pager_get_table_info(pa, &ti))
282 		return NULL; /* impossible pa */
283 	idx = core_mmu_va2idx(&ti, pa);
284 	core_mmu_get_entry(&ti, idx, &p, &a);
285 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
286 		return (void *)core_mmu_idx2va(&ti, idx);
287 
288 	n = 0;
289 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
290 	while (true) {
291 		while (idx < TBL_NUM_ENTRIES) {
292 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
293 			if (v >= (TEE_RAM_VA_START + CFG_TEE_RAM_VA_SIZE))
294 				return NULL;
295 
296 			core_mmu_get_entry(&pager_tables[n].tbl_info,
297 					   idx, &p, &a);
298 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
299 				return (void *)v;
300 			idx++;
301 		}
302 
303 		n++;
304 		if (n >= ARRAY_SIZE(pager_tables))
305 			return NULL;
306 		idx = 0;
307 	}
308 
309 	return NULL;
310 }
311 
312 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
313 {
314 	size_t n;
315 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
316 
317 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
318 	    CORE_MMU_PGDIR_SHIFT;
319 	if (n >= ARRAY_SIZE(pager_tables))
320 		return NULL;
321 
322 	assert(va >= pager_tables[n].tbl_info.va_base &&
323 	       va <= (pager_tables[n].tbl_info.va_base | mask));
324 
325 	return pager_tables + n;
326 }
327 
328 static struct pager_table *find_pager_table(vaddr_t va)
329 {
330 	struct pager_table *pt = find_pager_table_may_fail(va);
331 
332 	assert(pt);
333 	return pt;
334 }
335 
336 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
337 {
338 	struct pager_table *pt = find_pager_table_may_fail(va);
339 
340 	if (!pt)
341 		return false;
342 
343 	*ti = pt->tbl_info;
344 	return true;
345 }
346 
347 static struct core_mmu_table_info *find_table_info(vaddr_t va)
348 {
349 	return &find_pager_table(va)->tbl_info;
350 }
351 
352 static struct pgt *find_core_pgt(vaddr_t va)
353 {
354 	return &find_pager_table(va)->pgt;
355 }
356 
357 static void set_alias_area(tee_mm_entry_t *mm)
358 {
359 	struct pager_table *pt;
360 	unsigned idx;
361 	vaddr_t smem = tee_mm_get_smem(mm);
362 	size_t nbytes = tee_mm_get_bytes(mm);
363 	vaddr_t v;
364 
365 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
366 
367 	assert(!pager_alias_area);
368 	pager_alias_area = mm;
369 	pager_alias_next_free = smem;
370 
371 	/* Clear all mapping in the alias area */
372 	pt = find_pager_table(smem);
373 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
374 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
375 		while (idx < TBL_NUM_ENTRIES) {
376 			v = core_mmu_idx2va(&pt->tbl_info, idx);
377 			if (v >= (smem + nbytes))
378 				goto out;
379 
380 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
381 			idx++;
382 		}
383 
384 		pt++;
385 		idx = 0;
386 	}
387 
388 out:
389 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
390 }
391 
392 static void generate_ae_key(void)
393 {
394 	uint8_t key[PAGER_AE_KEY_BITS / 8];
395 
396 	if (rng_generate(key, sizeof(key)) != TEE_SUCCESS)
397 		panic("failed to generate random");
398 	if (internal_aes_gcm_expand_enc_key(key, sizeof(key),
399 					    &pager_ae_key))
400 		panic("failed to expand key");
401 }
402 
403 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
404 {
405 	size_t n;
406 	paddr_t pa;
407 	size_t usage = 0;
408 
409 	for (n = 0; n < ti->num_entries; n++) {
410 		core_mmu_get_entry(ti, n, &pa, NULL);
411 		if (pa)
412 			usage++;
413 	}
414 	return usage;
415 }
416 
417 static void area_get_entry(struct tee_pager_area *area, size_t idx,
418 			   paddr_t *pa, uint32_t *attr)
419 {
420 	assert(area->pgt);
421 	assert(idx < TBL_NUM_ENTRIES);
422 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
423 }
424 
425 static void area_set_entry(struct tee_pager_area *area, size_t idx,
426 			   paddr_t pa, uint32_t attr)
427 {
428 	assert(area->pgt);
429 	assert(idx < TBL_NUM_ENTRIES);
430 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
431 }
432 
433 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
434 {
435 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
436 }
437 
438 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
439 {
440 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
441 }
442 
443 void tee_pager_early_init(void)
444 {
445 	size_t n;
446 
447 	/*
448 	 * Note that this depends on add_pager_vaspace() adding vaspace
449 	 * after end of memory.
450 	 */
451 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
452 		if (!core_mmu_find_table(TEE_RAM_VA_START +
453 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
454 					 &pager_tables[n].tbl_info))
455 			panic("can't find mmu tables");
456 
457 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
458 			panic("Unsupported page size in translation table");
459 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
460 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
461 
462 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
463 		pgt_set_used_entries(&pager_tables[n].pgt,
464 				tbl_usage_count(&pager_tables[n].tbl_info));
465 	}
466 }
467 
468 void tee_pager_init(tee_mm_entry_t *mm_alias)
469 {
470 	set_alias_area(mm_alias);
471 	generate_ae_key();
472 }
473 
474 static void *pager_add_alias_page(paddr_t pa)
475 {
476 	unsigned idx;
477 	struct core_mmu_table_info *ti;
478 	/* Alias pages mapped without write permission: runtime will care */
479 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
480 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
481 			TEE_MATTR_SECURE | TEE_MATTR_PR;
482 
483 	DMSG("0x%" PRIxPA, pa);
484 
485 	ti = find_table_info(pager_alias_next_free);
486 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
487 	core_mmu_set_entry(ti, idx, pa, attr);
488 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
489 	pager_alias_next_free += SMALL_PAGE_SIZE;
490 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
491 				      tee_mm_get_bytes(pager_alias_area)))
492 		pager_alias_next_free = 0;
493 	return (void *)core_mmu_idx2va(ti, idx);
494 }
495 
496 static struct tee_pager_area *alloc_area(struct pgt *pgt,
497 					 vaddr_t base, size_t size,
498 					 uint32_t flags, const void *store,
499 					 const void *hashes)
500 {
501 	struct tee_pager_area *area = calloc(1, sizeof(*area));
502 	enum area_type at;
503 	tee_mm_entry_t *mm_store = NULL;
504 
505 	if (!area)
506 		return NULL;
507 
508 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
509 		if (flags & TEE_MATTR_LOCKED) {
510 			at = AREA_TYPE_LOCK;
511 			goto out;
512 		}
513 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
514 		if (!mm_store)
515 			goto bad;
516 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
517 					   MEM_AREA_TA_RAM);
518 		if (!area->store)
519 			goto bad;
520 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
521 				     sizeof(struct pager_rw_pstate));
522 		if (!area->u.rwp)
523 			goto bad;
524 		at = AREA_TYPE_RW;
525 	} else {
526 		area->store = (void *)store;
527 		area->u.hashes = hashes;
528 		at = AREA_TYPE_RO;
529 	}
530 out:
531 	area->pgt = pgt;
532 	area->base = base;
533 	area->size = size;
534 	area->flags = flags;
535 	area->type = at;
536 	return area;
537 bad:
538 	tee_mm_free(mm_store);
539 	free(area->u.rwp);
540 	free(area);
541 	return NULL;
542 }
543 
544 static void area_insert_tail(struct tee_pager_area *area)
545 {
546 	uint32_t exceptions = pager_lock_check_stack(8);
547 
548 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
549 
550 	pager_unlock(exceptions);
551 }
552 KEEP_PAGER(area_insert_tail);
553 
554 void tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
555 			     const void *store, const void *hashes)
556 {
557 	struct tee_pager_area *area;
558 	vaddr_t b = base;
559 	size_t s = size;
560 	size_t s2;
561 
562 
563 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
564 		base, base + size, flags, store, hashes);
565 
566 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
567 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
568 		panic();
569 	}
570 
571 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
572 		panic("write pages cannot provide store or hashes");
573 
574 	if ((flags & TEE_MATTR_PW) && (store || hashes))
575 		panic("non-write pages must provide store and hashes");
576 
577 	while (s) {
578 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
579 		area = alloc_area(find_core_pgt(b), b, s2, flags,
580 				  (const uint8_t *)store + b - base,
581 				  (const uint8_t *)hashes + (b - base) /
582 							SMALL_PAGE_SIZE *
583 							TEE_SHA256_HASH_SIZE);
584 		if (!area)
585 			panic("alloc_area");
586 		area_insert_tail(area);
587 		b += s2;
588 		s -= s2;
589 	}
590 }
591 
592 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
593 					vaddr_t va)
594 {
595 	struct tee_pager_area *area;
596 
597 	if (!areas)
598 		return NULL;
599 
600 	TAILQ_FOREACH(area, areas, link) {
601 		if (core_is_buffer_inside(va, 1, area->base, area->size))
602 			return area;
603 	}
604 	return NULL;
605 }
606 
607 #ifdef CFG_PAGED_USER_TA
608 static struct tee_pager_area *find_uta_area(vaddr_t va)
609 {
610 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
611 
612 	if (!ctx || !is_user_ta_ctx(ctx))
613 		return NULL;
614 	return find_area(to_user_ta_ctx(ctx)->areas, va);
615 }
616 #else
617 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
618 {
619 	return NULL;
620 }
621 #endif /*CFG_PAGED_USER_TA*/
622 
623 
624 static uint32_t get_area_mattr(uint32_t area_flags)
625 {
626 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
627 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
628 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
629 
630 	if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
631 		attr |= TEE_MATTR_GLOBAL;
632 
633 	return attr;
634 }
635 
636 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
637 {
638 	struct core_mmu_table_info *ti;
639 	paddr_t pa;
640 	unsigned idx;
641 
642 	ti = find_table_info((vaddr_t)pmem->va_alias);
643 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
644 	core_mmu_get_entry(ti, idx, &pa, NULL);
645 	return pa;
646 }
647 
648 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
649 			void *dst)
650 {
651 	struct pager_aes_gcm_iv iv = {
652 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
653 	};
654 	size_t tag_len = sizeof(rwp->tag);
655 
656 	return !internal_aes_gcm_dec(&pager_ae_key, &iv, sizeof(iv),
657 				     NULL, 0, src, SMALL_PAGE_SIZE, dst,
658 				     rwp->tag, tag_len);
659 }
660 
661 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
662 {
663 	struct pager_aes_gcm_iv iv;
664 	size_t tag_len = sizeof(rwp->tag);
665 
666 	assert((rwp->iv + 1) > rwp->iv);
667 	rwp->iv++;
668 	/*
669 	 * IV is constructed as recommended in section "8.2.1 Deterministic
670 	 * Construction" of "Recommendation for Block Cipher Modes of
671 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
672 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
673 	 */
674 	iv.iv[0] = (vaddr_t)rwp;
675 	iv.iv[1] = rwp->iv >> 32;
676 	iv.iv[2] = rwp->iv;
677 
678 	if (internal_aes_gcm_enc(&pager_ae_key, &iv, sizeof(iv), NULL, 0,
679 				 src, SMALL_PAGE_SIZE, dst, rwp->tag, &tag_len))
680 		panic("gcm failed");
681 }
682 
683 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
684 			void *va_alias)
685 {
686 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
687 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
688 	struct core_mmu_table_info *ti;
689 	uint32_t attr_alias;
690 	paddr_t pa_alias;
691 	unsigned int idx_alias;
692 
693 	/* Insure we are allowed to write to aliased virtual page */
694 	ti = find_table_info((vaddr_t)va_alias);
695 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
696 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
697 	if (!(attr_alias & TEE_MATTR_PW)) {
698 		attr_alias |= TEE_MATTR_PW;
699 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
700 		tlbi_mva_allasid((vaddr_t)va_alias);
701 	}
702 
703 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
704 	switch (area->type) {
705 	case AREA_TYPE_RO:
706 		{
707 			const void *hash = area->u.hashes +
708 					   idx * TEE_SHA256_HASH_SIZE;
709 
710 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
711 			incr_ro_hits();
712 
713 			if (hash_sha256_check(hash, va_alias,
714 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
715 				EMSG("PH 0x%" PRIxVA " failed", page_va);
716 				panic();
717 			}
718 		}
719 		/* Forbid write to aliases for read-only (maybe exec) pages */
720 		attr_alias &= ~TEE_MATTR_PW;
721 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
722 		tlbi_mva_allasid((vaddr_t)va_alias);
723 		break;
724 	case AREA_TYPE_RW:
725 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
726 			va_alias, page_va, area->u.rwp[idx].iv);
727 		if (!area->u.rwp[idx].iv)
728 			memset(va_alias, 0, SMALL_PAGE_SIZE);
729 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
730 				       va_alias)) {
731 			EMSG("PH 0x%" PRIxVA " failed", page_va);
732 			panic();
733 		}
734 		incr_rw_hits();
735 		break;
736 	case AREA_TYPE_LOCK:
737 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
738 		memset(va_alias, 0, SMALL_PAGE_SIZE);
739 		break;
740 	default:
741 		panic();
742 	}
743 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
744 }
745 
746 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
747 {
748 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
749 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
750 
751 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
752 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
753 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
754 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
755 
756 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
757 		asan_tag_access(pmem->va_alias,
758 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
759 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
760 			     stored_page);
761 		asan_tag_no_access(pmem->va_alias,
762 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
763 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
764 			pmem->area->base + idx * SMALL_PAGE_SIZE,
765 			pmem->area->u.rwp[idx].iv);
766 	}
767 }
768 
769 #ifdef CFG_PAGED_USER_TA
770 static void free_area(struct tee_pager_area *area)
771 {
772 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
773 				virt_to_phys(area->store)));
774 	if (area->type == AREA_TYPE_RW)
775 		free(area->u.rwp);
776 	free(area);
777 }
778 
779 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
780 			       size_t size)
781 {
782 	struct tee_pager_area *area;
783 	uint32_t flags;
784 	vaddr_t b = base;
785 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
786 
787 	if (!utc->areas) {
788 		utc->areas = malloc(sizeof(*utc->areas));
789 		if (!utc->areas)
790 			return false;
791 		TAILQ_INIT(utc->areas);
792 	}
793 
794 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
795 
796 	while (s) {
797 		size_t s2;
798 
799 		if (find_area(utc->areas, b))
800 			return false;
801 
802 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
803 
804 		/* Table info will be set when the context is activated. */
805 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
806 		if (!area)
807 			return false;
808 		TAILQ_INSERT_TAIL(utc->areas, area, link);
809 		b += s2;
810 		s -= s2;
811 	}
812 
813 	return true;
814 }
815 
816 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
817 {
818 	struct thread_specific_data *tsd = thread_get_tsd();
819 	struct tee_pager_area *area;
820 	struct core_mmu_table_info dir_info = { NULL };
821 
822 	if (&utc->ctx != tsd->ctx) {
823 		/*
824 		 * Changes are to an utc that isn't active. Just add the
825 		 * areas page tables will be dealt with later.
826 		 */
827 		return pager_add_uta_area(utc, base, size);
828 	}
829 
830 	/*
831 	 * Assign page tables before adding areas to be able to tell which
832 	 * are newly added and should be removed in case of failure.
833 	 */
834 	tee_pager_assign_uta_tables(utc);
835 	if (!pager_add_uta_area(utc, base, size)) {
836 		struct tee_pager_area *next_a;
837 
838 		/* Remove all added areas */
839 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
840 			if (!area->pgt) {
841 				TAILQ_REMOVE(utc->areas, area, link);
842 				free_area(area);
843 			}
844 		}
845 		return false;
846 	}
847 
848 	/*
849 	 * Assign page tables to the new areas and make sure that the page
850 	 * tables are registered in the upper table.
851 	 */
852 	tee_pager_assign_uta_tables(utc);
853 	core_mmu_get_user_pgdir(&dir_info);
854 	TAILQ_FOREACH(area, utc->areas, link) {
855 		paddr_t pa;
856 		size_t idx;
857 		uint32_t attr;
858 
859 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
860 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
861 
862 		/*
863 		 * Check if the page table already is used, if it is, it's
864 		 * already registered.
865 		 */
866 		if (area->pgt->num_used_entries) {
867 			assert(attr & TEE_MATTR_TABLE);
868 			assert(pa == virt_to_phys(area->pgt->tbl));
869 			continue;
870 		}
871 
872 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
873 		pa = virt_to_phys(area->pgt->tbl);
874 		assert(pa);
875 		/*
876 		 * Note that the update of the table entry is guaranteed to
877 		 * be atomic.
878 		 */
879 		core_mmu_set_entry(&dir_info, idx, pa, attr);
880 	}
881 
882 	return true;
883 }
884 
885 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
886 				   struct pgt *pgt)
887 {
888 	assert(pgt);
889 	ti->table = pgt->tbl;
890 	ti->va_base = pgt->vabase;
891 	ti->level = TBL_LEVEL;
892 	ti->shift = TBL_SHIFT;
893 	ti->num_entries = TBL_NUM_ENTRIES;
894 }
895 
896 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
897 			   vaddr_t new_base)
898 {
899 	uint32_t exceptions = pager_lock_check_stack(64);
900 
901 	/*
902 	 * If there's no pgt assigned to the old area there's no pages to
903 	 * deal with either, just update with a new pgt and base.
904 	 */
905 	if (area->pgt) {
906 		struct core_mmu_table_info old_ti;
907 		struct core_mmu_table_info new_ti;
908 		struct tee_pager_pmem *pmem;
909 
910 		init_tbl_info_from_pgt(&old_ti, area->pgt);
911 		init_tbl_info_from_pgt(&new_ti, new_pgt);
912 
913 
914 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
915 			vaddr_t va;
916 			paddr_t pa;
917 			uint32_t attr;
918 
919 			if (pmem->area != area)
920 				continue;
921 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
922 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
923 
924 			assert(pa == get_pmem_pa(pmem));
925 			assert(attr);
926 			assert(area->pgt->num_used_entries);
927 			area->pgt->num_used_entries--;
928 
929 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
930 			va = va - area->base + new_base;
931 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
932 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
933 			new_pgt->num_used_entries++;
934 		}
935 	}
936 
937 	area->pgt = new_pgt;
938 	area->base = new_base;
939 	pager_unlock(exceptions);
940 }
941 KEEP_PAGER(transpose_area);
942 
943 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
944 				   vaddr_t src_base,
945 				   struct user_ta_ctx *dst_utc,
946 				   vaddr_t dst_base, struct pgt **dst_pgt,
947 				   size_t size)
948 {
949 	struct tee_pager_area *area;
950 	struct tee_pager_area *next_a;
951 
952 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
953 		vaddr_t new_area_base;
954 		size_t new_idx;
955 
956 		if (!core_is_buffer_inside(area->base, area->size,
957 					  src_base, size))
958 			continue;
959 
960 		TAILQ_REMOVE(src_utc->areas, area, link);
961 
962 		new_area_base = dst_base + (src_base - area->base);
963 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
964 			  CORE_MMU_PGDIR_SIZE;
965 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
966 		       dst_pgt[new_idx]->vabase);
967 		transpose_area(area, dst_pgt[new_idx], new_area_base);
968 
969 		/*
970 		 * Assert that this will not cause any conflicts in the new
971 		 * utc.  This should already be guaranteed, but a bug here
972 		 * could be tricky to find.
973 		 */
974 		assert(!find_area(dst_utc->areas, area->base));
975 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
976 	}
977 }
978 
979 static void rem_area(struct tee_pager_area_head *area_head,
980 		     struct tee_pager_area *area)
981 {
982 	struct tee_pager_pmem *pmem;
983 	uint32_t exceptions;
984 
985 	exceptions = pager_lock_check_stack(64);
986 
987 	TAILQ_REMOVE(area_head, area, link);
988 
989 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
990 		if (pmem->area == area) {
991 			area_set_entry(area, pmem->pgidx, 0, 0);
992 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
993 			pgt_dec_used_entries(area->pgt);
994 			pmem->area = NULL;
995 			pmem->pgidx = INVALID_PGIDX;
996 		}
997 	}
998 
999 	pager_unlock(exceptions);
1000 	free_area(area);
1001 }
1002 KEEP_PAGER(rem_area);
1003 
1004 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
1005 			      size_t size)
1006 {
1007 	struct tee_pager_area *area;
1008 	struct tee_pager_area *next_a;
1009 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
1010 
1011 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
1012 		if (core_is_buffer_inside(area->base, area->size, base, s))
1013 			rem_area(utc->areas, area);
1014 	}
1015 }
1016 
1017 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
1018 {
1019 	struct tee_pager_area *area;
1020 
1021 	if (!utc->areas)
1022 		return;
1023 
1024 	while (true) {
1025 		area = TAILQ_FIRST(utc->areas);
1026 		if (!area)
1027 			break;
1028 		TAILQ_REMOVE(utc->areas, area, link);
1029 		free_area(area);
1030 	}
1031 
1032 	free(utc->areas);
1033 }
1034 
1035 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
1036 				 size_t size, uint32_t flags)
1037 {
1038 	bool ret;
1039 	vaddr_t b = base;
1040 	size_t s = size;
1041 	size_t s2;
1042 	struct tee_pager_area *area = find_area(utc->areas, b);
1043 	uint32_t exceptions;
1044 	struct tee_pager_pmem *pmem;
1045 	paddr_t pa;
1046 	uint32_t a;
1047 	uint32_t f;
1048 
1049 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1050 	if (f & TEE_MATTR_UW)
1051 		f |= TEE_MATTR_PW;
1052 	f = get_area_mattr(f);
1053 
1054 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1055 
1056 	while (s) {
1057 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1058 		if (!area || area->base != b || area->size != s2) {
1059 			ret = false;
1060 			goto out;
1061 		}
1062 		b += s2;
1063 		s -= s2;
1064 
1065 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1066 			if (pmem->area != area)
1067 				continue;
1068 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
1069 			if (a & TEE_MATTR_VALID_BLOCK)
1070 				assert(pa == get_pmem_pa(pmem));
1071 			else
1072 				pa = get_pmem_pa(pmem);
1073 			if (a == f)
1074 				continue;
1075 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1076 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1077 			if (!(flags & TEE_MATTR_UW))
1078 				tee_pager_save_page(pmem, a);
1079 
1080 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
1081 			/*
1082 			 * Make sure the table update is visible before
1083 			 * continuing.
1084 			 */
1085 			dsb_ishst();
1086 
1087 			if (flags & TEE_MATTR_UX) {
1088 				void *va = (void *)area_idx2va(pmem->area,
1089 							       pmem->pgidx);
1090 
1091 				cache_op_inner(DCACHE_AREA_CLEAN, va,
1092 						SMALL_PAGE_SIZE);
1093 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
1094 						SMALL_PAGE_SIZE);
1095 			}
1096 		}
1097 
1098 		area->flags = f;
1099 		area = TAILQ_NEXT(area, link);
1100 	}
1101 
1102 	ret = true;
1103 out:
1104 	pager_unlock(exceptions);
1105 	return ret;
1106 }
1107 KEEP_PAGER(tee_pager_set_uta_area_attr);
1108 #endif /*CFG_PAGED_USER_TA*/
1109 
1110 static bool tee_pager_unhide_page(vaddr_t page_va)
1111 {
1112 	struct tee_pager_pmem *pmem;
1113 
1114 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1115 		paddr_t pa;
1116 		uint32_t attr;
1117 
1118 		if (pmem->pgidx == INVALID_PGIDX)
1119 			continue;
1120 
1121 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1122 
1123 		if (!(attr &
1124 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
1125 			continue;
1126 
1127 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
1128 			uint32_t a = get_area_mattr(pmem->area->flags);
1129 
1130 			/* page is hidden, show and move to back */
1131 			if (pa != get_pmem_pa(pmem))
1132 				panic("unexpected pa");
1133 
1134 			/*
1135 			 * If it's not a dirty block, then it should be
1136 			 * read only.
1137 			 */
1138 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
1139 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1140 			else
1141 				FMSG("Unhide %#" PRIxVA, page_va);
1142 
1143 			if (page_va == 0x8000a000)
1144 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
1145 					page_va, a);
1146 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
1147 			/*
1148 			 * Note that TLB invalidation isn't needed since
1149 			 * there wasn't a valid mapping before. We should
1150 			 * use a barrier though, to make sure that the
1151 			 * change is visible.
1152 			 */
1153 			dsb_ishst();
1154 
1155 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1156 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1157 			incr_hidden_hits();
1158 			return true;
1159 		}
1160 	}
1161 
1162 	return false;
1163 }
1164 
1165 static void tee_pager_hide_pages(void)
1166 {
1167 	struct tee_pager_pmem *pmem;
1168 	size_t n = 0;
1169 
1170 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1171 		paddr_t pa;
1172 		uint32_t attr;
1173 		uint32_t a;
1174 
1175 		if (n >= TEE_PAGER_NHIDE)
1176 			break;
1177 		n++;
1178 
1179 		/* we cannot hide pages when pmem->area is not defined. */
1180 		if (!pmem->area)
1181 			continue;
1182 
1183 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1184 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1185 			continue;
1186 
1187 		assert(pa == get_pmem_pa(pmem));
1188 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1189 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1190 			FMSG("Hide %#" PRIxVA,
1191 			     area_idx2va(pmem->area, pmem->pgidx));
1192 		} else
1193 			a = TEE_MATTR_HIDDEN_BLOCK;
1194 
1195 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1196 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1197 	}
1198 }
1199 
1200 /*
1201  * Find mapped pmem, hide and move to pageble pmem.
1202  * Return false if page was not mapped, and true if page was mapped.
1203  */
1204 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1205 				       vaddr_t page_va)
1206 {
1207 	struct tee_pager_pmem *pmem;
1208 	unsigned pgidx;
1209 	paddr_t pa;
1210 	uint32_t attr;
1211 
1212 	pgidx = area_va2idx(area, page_va);
1213 	area_get_entry(area, pgidx, &pa, &attr);
1214 
1215 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1216 
1217 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1218 		if (pmem->area != area || pmem->pgidx != pgidx)
1219 			continue;
1220 
1221 		assert(pa == get_pmem_pa(pmem));
1222 		area_set_entry(area, pgidx, 0, 0);
1223 		pgt_dec_used_entries(area->pgt);
1224 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1225 		pmem->area = NULL;
1226 		pmem->pgidx = INVALID_PGIDX;
1227 		tee_pager_npages++;
1228 		set_npages();
1229 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1230 		incr_zi_released();
1231 		return true;
1232 	}
1233 
1234 	return false;
1235 }
1236 
1237 /* Finds the oldest page and unmats it from its old virtual address */
1238 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1239 {
1240 	struct tee_pager_pmem *pmem;
1241 
1242 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1243 	if (!pmem) {
1244 		EMSG("No pmem entries");
1245 		return NULL;
1246 	}
1247 	if (pmem->pgidx != INVALID_PGIDX) {
1248 		uint32_t a;
1249 
1250 		assert(pmem->area && pmem->area->pgt);
1251 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1252 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1253 		pgt_dec_used_entries(pmem->area->pgt);
1254 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1255 		tee_pager_save_page(pmem, a);
1256 	}
1257 
1258 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1259 	pmem->pgidx = INVALID_PGIDX;
1260 	pmem->area = NULL;
1261 	if (area->type == AREA_TYPE_LOCK) {
1262 		/* Move page to lock list */
1263 		if (tee_pager_npages <= 0)
1264 			panic("running out of page");
1265 		tee_pager_npages--;
1266 		set_npages();
1267 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1268 	} else {
1269 		/* move page to back */
1270 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1271 	}
1272 
1273 	return pmem;
1274 }
1275 
1276 static bool pager_update_permissions(struct tee_pager_area *area,
1277 			struct abort_info *ai, bool *handled)
1278 {
1279 	unsigned int pgidx = area_va2idx(area, ai->va);
1280 	uint32_t attr;
1281 	paddr_t pa;
1282 
1283 	*handled = false;
1284 
1285 	area_get_entry(area, pgidx, &pa, &attr);
1286 
1287 	/* Not mapped */
1288 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1289 		return false;
1290 
1291 	/* Not readable, should not happen */
1292 	if (abort_is_user_exception(ai)) {
1293 		if (!(attr & TEE_MATTR_UR))
1294 			return true;
1295 	} else {
1296 		if (!(attr & TEE_MATTR_PR)) {
1297 			abort_print_error(ai);
1298 			panic();
1299 		}
1300 	}
1301 
1302 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1303 	case CORE_MMU_FAULT_TRANSLATION:
1304 	case CORE_MMU_FAULT_READ_PERMISSION:
1305 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1306 			/* Check attempting to execute from an NOX page */
1307 			if (abort_is_user_exception(ai)) {
1308 				if (!(attr & TEE_MATTR_UX))
1309 					return true;
1310 			} else {
1311 				if (!(attr & TEE_MATTR_PX)) {
1312 					abort_print_error(ai);
1313 					panic();
1314 				}
1315 			}
1316 		}
1317 		/* Since the page is mapped now it's OK */
1318 		break;
1319 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1320 		/* Check attempting to write to an RO page */
1321 		if (abort_is_user_exception(ai)) {
1322 			if (!(area->flags & TEE_MATTR_UW))
1323 				return true;
1324 			if (!(attr & TEE_MATTR_UW)) {
1325 				FMSG("Dirty %p",
1326 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1327 				area_set_entry(area, pgidx, pa,
1328 					       get_area_mattr(area->flags));
1329 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1330 			}
1331 
1332 		} else {
1333 			if (!(area->flags & TEE_MATTR_PW)) {
1334 				abort_print_error(ai);
1335 				panic();
1336 			}
1337 			if (!(attr & TEE_MATTR_PW)) {
1338 				FMSG("Dirty %p",
1339 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1340 				area_set_entry(area, pgidx, pa,
1341 					       get_area_mattr(area->flags));
1342 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1343 			}
1344 		}
1345 		/* Since permissions has been updated now it's OK */
1346 		break;
1347 	default:
1348 		/* Some fault we can't deal with */
1349 		if (abort_is_user_exception(ai))
1350 			return true;
1351 		abort_print_error(ai);
1352 		panic();
1353 	}
1354 	*handled = true;
1355 	return true;
1356 }
1357 
1358 #ifdef CFG_TEE_CORE_DEBUG
1359 static void stat_handle_fault(void)
1360 {
1361 	static size_t num_faults;
1362 	static size_t min_npages = SIZE_MAX;
1363 	static size_t total_min_npages = SIZE_MAX;
1364 
1365 	num_faults++;
1366 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1367 		DMSG("nfaults %zu npages %zu (min %zu)",
1368 		     num_faults, tee_pager_npages, min_npages);
1369 		min_npages = tee_pager_npages; /* reset */
1370 	}
1371 	if (tee_pager_npages < min_npages)
1372 		min_npages = tee_pager_npages;
1373 	if (tee_pager_npages < total_min_npages)
1374 		total_min_npages = tee_pager_npages;
1375 }
1376 #else
1377 static void stat_handle_fault(void)
1378 {
1379 }
1380 #endif
1381 
1382 bool tee_pager_handle_fault(struct abort_info *ai)
1383 {
1384 	struct tee_pager_area *area;
1385 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1386 	uint32_t exceptions;
1387 	bool ret;
1388 
1389 #ifdef TEE_PAGER_DEBUG_PRINT
1390 	abort_print(ai);
1391 #endif
1392 
1393 	/*
1394 	 * We're updating pages that can affect several active CPUs at a
1395 	 * time below. We end up here because a thread tries to access some
1396 	 * memory that isn't available. We have to be careful when making
1397 	 * that memory available as other threads may succeed in accessing
1398 	 * that address the moment after we've made it available.
1399 	 *
1400 	 * That means that we can't just map the memory and populate the
1401 	 * page, instead we use the aliased mapping to populate the page
1402 	 * and once everything is ready we map it.
1403 	 */
1404 	exceptions = pager_lock(ai);
1405 
1406 	stat_handle_fault();
1407 
1408 	/* check if the access is valid */
1409 	if (abort_is_user_exception(ai)) {
1410 		area = find_uta_area(ai->va);
1411 
1412 	} else {
1413 		area = find_area(&tee_pager_area_head, ai->va);
1414 		if (!area)
1415 			area = find_uta_area(ai->va);
1416 	}
1417 	if (!area || !area->pgt) {
1418 		ret = false;
1419 		goto out;
1420 	}
1421 
1422 	if (!tee_pager_unhide_page(page_va)) {
1423 		struct tee_pager_pmem *pmem = NULL;
1424 		uint32_t attr;
1425 		paddr_t pa;
1426 
1427 		/*
1428 		 * The page wasn't hidden, but some other core may have
1429 		 * updated the table entry before we got here or we need
1430 		 * to make a read-only page read-write (dirty).
1431 		 */
1432 		if (pager_update_permissions(area, ai, &ret)) {
1433 			/*
1434 			 * Nothing more to do with the abort. The problem
1435 			 * could already have been dealt with from another
1436 			 * core or if ret is false the TA will be paniced.
1437 			 */
1438 			goto out;
1439 		}
1440 
1441 		pmem = tee_pager_get_page(area);
1442 		if (!pmem) {
1443 			abort_print(ai);
1444 			panic();
1445 		}
1446 
1447 		/* load page code & data */
1448 		tee_pager_load_page(area, page_va, pmem->va_alias);
1449 
1450 
1451 		pmem->area = area;
1452 		pmem->pgidx = area_va2idx(area, ai->va);
1453 		attr = get_area_mattr(area->flags) &
1454 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1455 		pa = get_pmem_pa(pmem);
1456 
1457 		/*
1458 		 * We've updated the page using the aliased mapping and
1459 		 * some cache maintenence is now needed if it's an
1460 		 * executable page.
1461 		 *
1462 		 * Since the d-cache is a Physically-indexed,
1463 		 * physically-tagged (PIPT) cache we can clean either the
1464 		 * aliased address or the real virtual address. In this
1465 		 * case we choose the real virtual address.
1466 		 *
1467 		 * The i-cache can also be PIPT, but may be something else
1468 		 * too like VIPT. The current code requires the caches to
1469 		 * implement the IVIPT extension, that is:
1470 		 * "instruction cache maintenance is required only after
1471 		 * writing new data to a physical address that holds an
1472 		 * instruction."
1473 		 *
1474 		 * To portably invalidate the icache the page has to
1475 		 * be mapped at the final virtual address but not
1476 		 * executable.
1477 		 */
1478 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1479 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1480 					TEE_MATTR_PW | TEE_MATTR_UW;
1481 
1482 			/* Set a temporary read-only mapping */
1483 			area_set_entry(pmem->area, pmem->pgidx, pa,
1484 				       attr & ~mask);
1485 			tlbi_mva_allasid(page_va);
1486 
1487 			/*
1488 			 * Doing these operations to LoUIS (Level of
1489 			 * unification, Inner Shareable) would be enough
1490 			 */
1491 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1492 				       SMALL_PAGE_SIZE);
1493 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1494 				       SMALL_PAGE_SIZE);
1495 
1496 			/* Set the final mapping */
1497 			area_set_entry(area, pmem->pgidx, pa, attr);
1498 			tlbi_mva_allasid(page_va);
1499 		} else {
1500 			area_set_entry(area, pmem->pgidx, pa, attr);
1501 			/*
1502 			 * No need to flush TLB for this entry, it was
1503 			 * invalid. We should use a barrier though, to make
1504 			 * sure that the change is visible.
1505 			 */
1506 			dsb_ishst();
1507 		}
1508 		pgt_inc_used_entries(area->pgt);
1509 
1510 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1511 
1512 	}
1513 
1514 	tee_pager_hide_pages();
1515 	ret = true;
1516 out:
1517 	pager_unlock(exceptions);
1518 	return ret;
1519 }
1520 
1521 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1522 {
1523 	size_t n;
1524 
1525 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1526 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1527 
1528 	/* setup memory */
1529 	for (n = 0; n < npages; n++) {
1530 		struct core_mmu_table_info *ti;
1531 		struct tee_pager_pmem *pmem;
1532 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1533 		unsigned int pgidx;
1534 		paddr_t pa;
1535 		uint32_t attr;
1536 
1537 		ti = find_table_info(va);
1538 		pgidx = core_mmu_va2idx(ti, va);
1539 		/*
1540 		 * Note that we can only support adding pages in the
1541 		 * valid range of this table info, currently not a problem.
1542 		 */
1543 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1544 
1545 		/* Ignore unmapped pages/blocks */
1546 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1547 			continue;
1548 
1549 		pmem = malloc(sizeof(struct tee_pager_pmem));
1550 		if (!pmem)
1551 			panic("out of mem");
1552 
1553 		pmem->va_alias = pager_add_alias_page(pa);
1554 
1555 		if (unmap) {
1556 			pmem->area = NULL;
1557 			pmem->pgidx = INVALID_PGIDX;
1558 			core_mmu_set_entry(ti, pgidx, 0, 0);
1559 			pgt_dec_used_entries(find_core_pgt(va));
1560 		} else {
1561 			/*
1562 			 * The page is still mapped, let's assign the area
1563 			 * and update the protection bits accordingly.
1564 			 */
1565 			pmem->area = find_area(&tee_pager_area_head, va);
1566 			assert(pmem->area->pgt == find_core_pgt(va));
1567 			pmem->pgidx = pgidx;
1568 			assert(pa == get_pmem_pa(pmem));
1569 			area_set_entry(pmem->area, pgidx, pa,
1570 				       get_area_mattr(pmem->area->flags));
1571 		}
1572 
1573 		tee_pager_npages++;
1574 		incr_npages_all();
1575 		set_npages();
1576 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1577 	}
1578 
1579 	/*
1580 	 * As this is done at inits, invalidate all TLBs once instead of
1581 	 * targeting only the modified entries.
1582 	 */
1583 	tlbi_all();
1584 }
1585 
1586 #ifdef CFG_PAGED_USER_TA
1587 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1588 {
1589 	struct pgt *p = pgt;
1590 
1591 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1592 		p = SLIST_NEXT(p, link);
1593 	return p;
1594 }
1595 
1596 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1597 {
1598 	struct tee_pager_area *area;
1599 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1600 
1601 	TAILQ_FOREACH(area, utc->areas, link) {
1602 		if (!area->pgt)
1603 			area->pgt = find_pgt(pgt, area->base);
1604 		else
1605 			assert(area->pgt == find_pgt(pgt, area->base));
1606 		if (!area->pgt)
1607 			panic();
1608 	}
1609 }
1610 
1611 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1612 {
1613 	uint32_t attr;
1614 
1615 	assert(pmem->area && pmem->area->pgt);
1616 
1617 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1618 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1619 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1620 	tee_pager_save_page(pmem, attr);
1621 	assert(pmem->area->pgt->num_used_entries);
1622 	pmem->area->pgt->num_used_entries--;
1623 	pmem->pgidx = INVALID_PGIDX;
1624 	pmem->area = NULL;
1625 }
1626 
1627 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1628 {
1629 	struct tee_pager_pmem *pmem;
1630 	struct tee_pager_area *area;
1631 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1632 
1633 	if (!pgt->num_used_entries)
1634 		goto out;
1635 
1636 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1637 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1638 			continue;
1639 		if (pmem->area->pgt == pgt)
1640 			pager_save_and_release_entry(pmem);
1641 	}
1642 	assert(!pgt->num_used_entries);
1643 
1644 out:
1645 	if (is_user_ta_ctx(pgt->ctx)) {
1646 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1647 			if (area->pgt == pgt)
1648 				area->pgt = NULL;
1649 		}
1650 	}
1651 
1652 	pager_unlock(exceptions);
1653 }
1654 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1655 #endif /*CFG_PAGED_USER_TA*/
1656 
1657 void tee_pager_release_phys(void *addr, size_t size)
1658 {
1659 	bool unmaped = false;
1660 	vaddr_t va = (vaddr_t)addr;
1661 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1662 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1663 	struct tee_pager_area *area;
1664 	uint32_t exceptions;
1665 
1666 	if (end <= begin)
1667 		return;
1668 
1669 	exceptions = pager_lock_check_stack(128);
1670 
1671 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1672 		area = find_area(&tee_pager_area_head, va);
1673 		if (!area)
1674 			panic();
1675 		unmaped |= tee_pager_release_one_phys(area, va);
1676 	}
1677 
1678 	if (unmaped)
1679 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1680 
1681 	pager_unlock(exceptions);
1682 }
1683 KEEP_PAGER(tee_pager_release_phys);
1684 
1685 void *tee_pager_alloc(size_t size, uint32_t flags)
1686 {
1687 	tee_mm_entry_t *mm;
1688 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1689 	uint8_t *smem;
1690 	size_t bytes;
1691 
1692 	if (!size)
1693 		return NULL;
1694 
1695 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1696 	if (!mm)
1697 		return NULL;
1698 
1699 	bytes = tee_mm_get_bytes(mm);
1700 	smem = (uint8_t *)tee_mm_get_smem(mm);
1701 	tee_pager_add_core_area((vaddr_t)smem, bytes, f, NULL, NULL);
1702 	asan_tag_access(smem, smem + bytes);
1703 
1704 	return smem;
1705 }
1706