xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 1bb929836182ecb96d2d9d268daa807c67596396)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  * this list of conditions and the following disclaimer in the documentation
15  * and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
21  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <arm.h>
31 #include <assert.h>
32 #include <crypto/crypto.h>
33 #include <crypto/internal_aes-gcm.h>
34 #include <io.h>
35 #include <keep.h>
36 #include <kernel/abort.h>
37 #include <kernel/asan.h>
38 #include <kernel/panic.h>
39 #include <kernel/spinlock.h>
40 #include <kernel/tee_misc.h>
41 #include <kernel/tee_ta_manager.h>
42 #include <kernel/thread.h>
43 #include <kernel/tlb_helpers.h>
44 #include <mm/core_memprot.h>
45 #include <mm/tee_mm.h>
46 #include <mm/tee_pager.h>
47 #include <stdlib.h>
48 #include <sys/queue.h>
49 #include <tee_api_defines.h>
50 #include <trace.h>
51 #include <types_ext.h>
52 #include <utee_defines.h>
53 #include <util.h>
54 
55 #define PAGER_AE_KEY_BITS	256
56 
57 struct pager_aes_gcm_iv {
58 	uint32_t iv[3];
59 };
60 
61 #define PAGER_AES_GCM_TAG_LEN	16
62 
63 struct pager_rw_pstate {
64 	uint64_t iv;
65 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
66 };
67 
68 enum area_type {
69 	AREA_TYPE_RO,
70 	AREA_TYPE_RW,
71 	AREA_TYPE_LOCK,
72 };
73 
74 struct tee_pager_area {
75 	union {
76 		const uint8_t *hashes;
77 		struct pager_rw_pstate *rwp;
78 	} u;
79 	uint8_t *store;
80 	enum area_type type;
81 	uint32_t flags;
82 	vaddr_t base;
83 	size_t size;
84 	struct pgt *pgt;
85 	TAILQ_ENTRY(tee_pager_area) link;
86 };
87 
88 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
89 
90 static struct tee_pager_area_head tee_pager_area_head =
91 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
92 
93 #define INVALID_PGIDX	UINT_MAX
94 
95 /*
96  * struct tee_pager_pmem - Represents a physical page used for paging.
97  *
98  * @pgidx	an index of the entry in area->ti.
99  * @va_alias	Virtual address where the physical page always is aliased.
100  *		Used during remapping of the page when the content need to
101  *		be updated before it's available at the new location.
102  * @area	a pointer to the pager area
103  */
104 struct tee_pager_pmem {
105 	unsigned pgidx;
106 	void *va_alias;
107 	struct tee_pager_area *area;
108 	TAILQ_ENTRY(tee_pager_pmem) link;
109 };
110 
111 /* The list of physical pages. The first page in the list is the oldest */
112 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
113 
114 static struct tee_pager_pmem_head tee_pager_pmem_head =
115 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
116 
117 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
118 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
119 
120 static struct internal_aes_gcm_key pager_ae_key;
121 
122 /* number of pages hidden */
123 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
124 
125 /* Number of registered physical pages, used hiding pages. */
126 static size_t tee_pager_npages;
127 
128 #ifdef CFG_WITH_STATS
129 static struct tee_pager_stats pager_stats;
130 
131 static inline void incr_ro_hits(void)
132 {
133 	pager_stats.ro_hits++;
134 }
135 
136 static inline void incr_rw_hits(void)
137 {
138 	pager_stats.rw_hits++;
139 }
140 
141 static inline void incr_hidden_hits(void)
142 {
143 	pager_stats.hidden_hits++;
144 }
145 
146 static inline void incr_zi_released(void)
147 {
148 	pager_stats.zi_released++;
149 }
150 
151 static inline void incr_npages_all(void)
152 {
153 	pager_stats.npages_all++;
154 }
155 
156 static inline void set_npages(void)
157 {
158 	pager_stats.npages = tee_pager_npages;
159 }
160 
161 void tee_pager_get_stats(struct tee_pager_stats *stats)
162 {
163 	*stats = pager_stats;
164 
165 	pager_stats.hidden_hits = 0;
166 	pager_stats.ro_hits = 0;
167 	pager_stats.rw_hits = 0;
168 	pager_stats.zi_released = 0;
169 }
170 
171 #else /* CFG_WITH_STATS */
172 static inline void incr_ro_hits(void) { }
173 static inline void incr_rw_hits(void) { }
174 static inline void incr_hidden_hits(void) { }
175 static inline void incr_zi_released(void) { }
176 static inline void incr_npages_all(void) { }
177 static inline void set_npages(void) { }
178 
179 void tee_pager_get_stats(struct tee_pager_stats *stats)
180 {
181 	memset(stats, 0, sizeof(struct tee_pager_stats));
182 }
183 #endif /* CFG_WITH_STATS */
184 
185 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
186 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
187 #define TBL_SHIFT	SMALL_PAGE_SHIFT
188 
189 #define EFFECTIVE_VA_SIZE \
190 	(ROUNDUP(TEE_RAM_VA_START + CFG_TEE_RAM_VA_SIZE, \
191 		 CORE_MMU_PGDIR_SIZE) - \
192 	 ROUNDDOWN(TEE_RAM_VA_START, CORE_MMU_PGDIR_SIZE))
193 
194 static struct pager_table {
195 	struct pgt pgt;
196 	struct core_mmu_table_info tbl_info;
197 } pager_tables[EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE];
198 
199 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
200 
201 /* Defines the range of the alias area */
202 static tee_mm_entry_t *pager_alias_area;
203 /*
204  * Physical pages are added in a stack like fashion to the alias area,
205  * @pager_alias_next_free gives the address of next free entry if
206  * @pager_alias_next_free is != 0
207  */
208 static uintptr_t pager_alias_next_free;
209 
210 #ifdef CFG_TEE_CORE_DEBUG
211 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
212 
213 static uint32_t pager_lock_dldetect(const char *func, const int line,
214 				    struct abort_info *ai)
215 {
216 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
217 	unsigned int retries = 0;
218 	unsigned int reminder = 0;
219 
220 	while (!cpu_spin_trylock(&pager_spinlock)) {
221 		retries++;
222 		if (!retries) {
223 			/* wrapped, time to report */
224 			trace_printf(func, line, TRACE_ERROR, true,
225 				     "possible spinlock deadlock reminder %u",
226 				     reminder);
227 			if (reminder < UINT_MAX)
228 				reminder++;
229 			if (ai)
230 				abort_print(ai);
231 		}
232 	}
233 
234 	return exceptions;
235 }
236 #else
237 static uint32_t pager_lock(struct abort_info __unused *ai)
238 {
239 	return cpu_spin_lock_xsave(&pager_spinlock);
240 }
241 #endif
242 
243 static uint32_t pager_lock_check_stack(size_t stack_size)
244 {
245 	if (stack_size) {
246 		int8_t buf[stack_size];
247 		size_t n;
248 
249 		/*
250 		 * Make sure to touch all pages of the stack that we expect
251 		 * to use with this lock held. We need to take eventual
252 		 * page faults before the lock is taken or we'll deadlock
253 		 * the pager. The pages that are populated in this way will
254 		 * eventually be released at certain save transitions of
255 		 * the thread.
256 		 */
257 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
258 			write8(1, (vaddr_t)buf + n);
259 		write8(1, (vaddr_t)buf + stack_size - 1);
260 	}
261 
262 	return pager_lock(NULL);
263 }
264 
265 static void pager_unlock(uint32_t exceptions)
266 {
267 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
268 }
269 
270 void *tee_pager_phys_to_virt(paddr_t pa)
271 {
272 	struct core_mmu_table_info ti;
273 	unsigned idx;
274 	uint32_t a;
275 	paddr_t p;
276 	vaddr_t v;
277 	size_t n;
278 
279 	/*
280 	 * Most addresses are mapped lineary, try that first if possible.
281 	 */
282 	if (!tee_pager_get_table_info(pa, &ti))
283 		return NULL; /* impossible pa */
284 	idx = core_mmu_va2idx(&ti, pa);
285 	core_mmu_get_entry(&ti, idx, &p, &a);
286 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
287 		return (void *)core_mmu_idx2va(&ti, idx);
288 
289 	n = 0;
290 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
291 	while (true) {
292 		while (idx < TBL_NUM_ENTRIES) {
293 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
294 			if (v >= (TEE_RAM_VA_START + CFG_TEE_RAM_VA_SIZE))
295 				return NULL;
296 
297 			core_mmu_get_entry(&pager_tables[n].tbl_info,
298 					   idx, &p, &a);
299 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
300 				return (void *)v;
301 			idx++;
302 		}
303 
304 		n++;
305 		if (n >= ARRAY_SIZE(pager_tables))
306 			return NULL;
307 		idx = 0;
308 	}
309 
310 	return NULL;
311 }
312 
313 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
314 {
315 	size_t n;
316 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
317 
318 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
319 	    CORE_MMU_PGDIR_SHIFT;
320 	if (n >= ARRAY_SIZE(pager_tables))
321 		return NULL;
322 
323 	assert(va >= pager_tables[n].tbl_info.va_base &&
324 	       va <= (pager_tables[n].tbl_info.va_base | mask));
325 
326 	return pager_tables + n;
327 }
328 
329 static struct pager_table *find_pager_table(vaddr_t va)
330 {
331 	struct pager_table *pt = find_pager_table_may_fail(va);
332 
333 	assert(pt);
334 	return pt;
335 }
336 
337 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
338 {
339 	struct pager_table *pt = find_pager_table_may_fail(va);
340 
341 	if (!pt)
342 		return false;
343 
344 	*ti = pt->tbl_info;
345 	return true;
346 }
347 
348 static struct core_mmu_table_info *find_table_info(vaddr_t va)
349 {
350 	return &find_pager_table(va)->tbl_info;
351 }
352 
353 static struct pgt *find_core_pgt(vaddr_t va)
354 {
355 	return &find_pager_table(va)->pgt;
356 }
357 
358 static void set_alias_area(tee_mm_entry_t *mm)
359 {
360 	struct pager_table *pt;
361 	unsigned idx;
362 	vaddr_t smem = tee_mm_get_smem(mm);
363 	size_t nbytes = tee_mm_get_bytes(mm);
364 	vaddr_t v;
365 
366 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
367 
368 	assert(!pager_alias_area);
369 	pager_alias_area = mm;
370 	pager_alias_next_free = smem;
371 
372 	/* Clear all mapping in the alias area */
373 	pt = find_pager_table(smem);
374 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
375 	while (pt <= (pager_tables + ARRAY_SIZE(pager_tables) - 1)) {
376 		while (idx < TBL_NUM_ENTRIES) {
377 			v = core_mmu_idx2va(&pt->tbl_info, idx);
378 			if (v >= (smem + nbytes))
379 				goto out;
380 
381 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
382 			idx++;
383 		}
384 
385 		pt++;
386 		idx = 0;
387 	}
388 
389 out:
390 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
391 }
392 
393 static void generate_ae_key(void)
394 {
395 	uint8_t key[PAGER_AE_KEY_BITS / 8];
396 
397 	if (rng_generate(key, sizeof(key)) != TEE_SUCCESS)
398 		panic("failed to generate random");
399 	if (internal_aes_gcm_expand_enc_key(key, sizeof(key),
400 					    &pager_ae_key))
401 		panic("failed to expand key");
402 }
403 
404 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
405 {
406 	size_t n;
407 	paddr_t pa;
408 	size_t usage = 0;
409 
410 	for (n = 0; n < ti->num_entries; n++) {
411 		core_mmu_get_entry(ti, n, &pa, NULL);
412 		if (pa)
413 			usage++;
414 	}
415 	return usage;
416 }
417 
418 static void area_get_entry(struct tee_pager_area *area, size_t idx,
419 			   paddr_t *pa, uint32_t *attr)
420 {
421 	assert(area->pgt);
422 	assert(idx < TBL_NUM_ENTRIES);
423 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
424 }
425 
426 static void area_set_entry(struct tee_pager_area *area, size_t idx,
427 			   paddr_t pa, uint32_t attr)
428 {
429 	assert(area->pgt);
430 	assert(idx < TBL_NUM_ENTRIES);
431 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
432 }
433 
434 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
435 {
436 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
437 }
438 
439 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
440 {
441 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
442 }
443 
444 void tee_pager_early_init(void)
445 {
446 	size_t n;
447 
448 	/*
449 	 * Note that this depends on add_pager_vaspace() adding vaspace
450 	 * after end of memory.
451 	 */
452 	for (n = 0; n < ARRAY_SIZE(pager_tables); n++) {
453 		if (!core_mmu_find_table(TEE_RAM_VA_START +
454 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
455 					 &pager_tables[n].tbl_info))
456 			panic("can't find mmu tables");
457 
458 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
459 			panic("Unsupported page size in translation table");
460 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
461 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
462 
463 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
464 		pgt_set_used_entries(&pager_tables[n].pgt,
465 				tbl_usage_count(&pager_tables[n].tbl_info));
466 	}
467 }
468 
469 void tee_pager_init(tee_mm_entry_t *mm_alias)
470 {
471 	set_alias_area(mm_alias);
472 	generate_ae_key();
473 }
474 
475 static void *pager_add_alias_page(paddr_t pa)
476 {
477 	unsigned idx;
478 	struct core_mmu_table_info *ti;
479 	/* Alias pages mapped without write permission: runtime will care */
480 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
481 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
482 			TEE_MATTR_SECURE | TEE_MATTR_PR;
483 
484 	DMSG("0x%" PRIxPA, pa);
485 
486 	ti = find_table_info(pager_alias_next_free);
487 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
488 	core_mmu_set_entry(ti, idx, pa, attr);
489 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
490 	pager_alias_next_free += SMALL_PAGE_SIZE;
491 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
492 				      tee_mm_get_bytes(pager_alias_area)))
493 		pager_alias_next_free = 0;
494 	return (void *)core_mmu_idx2va(ti, idx);
495 }
496 
497 static struct tee_pager_area *alloc_area(struct pgt *pgt,
498 					 vaddr_t base, size_t size,
499 					 uint32_t flags, const void *store,
500 					 const void *hashes)
501 {
502 	struct tee_pager_area *area = calloc(1, sizeof(*area));
503 	enum area_type at;
504 	tee_mm_entry_t *mm_store = NULL;
505 
506 	if (!area)
507 		return NULL;
508 
509 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
510 		if (flags & TEE_MATTR_LOCKED) {
511 			at = AREA_TYPE_LOCK;
512 			goto out;
513 		}
514 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
515 		if (!mm_store)
516 			goto bad;
517 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
518 					   MEM_AREA_TA_RAM);
519 		if (!area->store)
520 			goto bad;
521 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
522 				     sizeof(struct pager_rw_pstate));
523 		if (!area->u.rwp)
524 			goto bad;
525 		at = AREA_TYPE_RW;
526 	} else {
527 		area->store = (void *)store;
528 		area->u.hashes = hashes;
529 		at = AREA_TYPE_RO;
530 	}
531 out:
532 	area->pgt = pgt;
533 	area->base = base;
534 	area->size = size;
535 	area->flags = flags;
536 	area->type = at;
537 	return area;
538 bad:
539 	tee_mm_free(mm_store);
540 	free(area->u.rwp);
541 	free(area);
542 	return NULL;
543 }
544 
545 static void area_insert_tail(struct tee_pager_area *area)
546 {
547 	uint32_t exceptions = pager_lock_check_stack(8);
548 
549 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
550 
551 	pager_unlock(exceptions);
552 }
553 KEEP_PAGER(area_insert_tail);
554 
555 void tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
556 			     const void *store, const void *hashes)
557 {
558 	struct tee_pager_area *area;
559 	vaddr_t b = base;
560 	size_t s = size;
561 	size_t s2;
562 
563 
564 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
565 		base, base + size, flags, store, hashes);
566 
567 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
568 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
569 		panic();
570 	}
571 
572 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
573 		panic("write pages cannot provide store or hashes");
574 
575 	if ((flags & TEE_MATTR_PW) && (store || hashes))
576 		panic("non-write pages must provide store and hashes");
577 
578 	while (s) {
579 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
580 		area = alloc_area(find_core_pgt(b), b, s2, flags,
581 				  (const uint8_t *)store + b - base,
582 				  (const uint8_t *)hashes + (b - base) /
583 							SMALL_PAGE_SIZE *
584 							TEE_SHA256_HASH_SIZE);
585 		if (!area)
586 			panic("alloc_area");
587 		area_insert_tail(area);
588 		b += s2;
589 		s -= s2;
590 	}
591 }
592 
593 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
594 					vaddr_t va)
595 {
596 	struct tee_pager_area *area;
597 
598 	if (!areas)
599 		return NULL;
600 
601 	TAILQ_FOREACH(area, areas, link) {
602 		if (core_is_buffer_inside(va, 1, area->base, area->size))
603 			return area;
604 	}
605 	return NULL;
606 }
607 
608 #ifdef CFG_PAGED_USER_TA
609 static struct tee_pager_area *find_uta_area(vaddr_t va)
610 {
611 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
612 
613 	if (!ctx || !is_user_ta_ctx(ctx))
614 		return NULL;
615 	return find_area(to_user_ta_ctx(ctx)->areas, va);
616 }
617 #else
618 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
619 {
620 	return NULL;
621 }
622 #endif /*CFG_PAGED_USER_TA*/
623 
624 
625 static uint32_t get_area_mattr(uint32_t area_flags)
626 {
627 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
628 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
629 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
630 
631 	return attr;
632 }
633 
634 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
635 {
636 	struct core_mmu_table_info *ti;
637 	paddr_t pa;
638 	unsigned idx;
639 
640 	ti = find_table_info((vaddr_t)pmem->va_alias);
641 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
642 	core_mmu_get_entry(ti, idx, &pa, NULL);
643 	return pa;
644 }
645 
646 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
647 			void *dst)
648 {
649 	struct pager_aes_gcm_iv iv = {
650 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
651 	};
652 	size_t tag_len = sizeof(rwp->tag);
653 
654 	return !internal_aes_gcm_dec(&pager_ae_key, &iv, sizeof(iv),
655 				     NULL, 0, src, SMALL_PAGE_SIZE, dst,
656 				     rwp->tag, tag_len);
657 }
658 
659 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
660 {
661 	struct pager_aes_gcm_iv iv;
662 	size_t tag_len = sizeof(rwp->tag);
663 
664 	assert((rwp->iv + 1) > rwp->iv);
665 	rwp->iv++;
666 	/*
667 	 * IV is constructed as recommended in section "8.2.1 Deterministic
668 	 * Construction" of "Recommendation for Block Cipher Modes of
669 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
670 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
671 	 */
672 	iv.iv[0] = (vaddr_t)rwp;
673 	iv.iv[1] = rwp->iv >> 32;
674 	iv.iv[2] = rwp->iv;
675 
676 	if (internal_aes_gcm_enc(&pager_ae_key, &iv, sizeof(iv), NULL, 0,
677 				 src, SMALL_PAGE_SIZE, dst, rwp->tag, &tag_len))
678 		panic("gcm failed");
679 }
680 
681 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
682 			void *va_alias)
683 {
684 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
685 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
686 	struct core_mmu_table_info *ti;
687 	uint32_t attr_alias;
688 	paddr_t pa_alias;
689 	unsigned int idx_alias;
690 
691 	/* Insure we are allowed to write to aliased virtual page */
692 	ti = find_table_info((vaddr_t)va_alias);
693 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
694 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
695 	if (!(attr_alias & TEE_MATTR_PW)) {
696 		attr_alias |= TEE_MATTR_PW;
697 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
698 		tlbi_mva_allasid((vaddr_t)va_alias);
699 	}
700 
701 	asan_tag_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
702 	switch (area->type) {
703 	case AREA_TYPE_RO:
704 		{
705 			const void *hash = area->u.hashes +
706 					   idx * TEE_SHA256_HASH_SIZE;
707 
708 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
709 			incr_ro_hits();
710 
711 			if (hash_sha256_check(hash, va_alias,
712 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
713 				EMSG("PH 0x%" PRIxVA " failed", page_va);
714 				panic();
715 			}
716 		}
717 		/* Forbid write to aliases for read-only (maybe exec) pages */
718 		attr_alias &= ~TEE_MATTR_PW;
719 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
720 		tlbi_mva_allasid((vaddr_t)va_alias);
721 		break;
722 	case AREA_TYPE_RW:
723 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
724 			va_alias, page_va, area->u.rwp[idx].iv);
725 		if (!area->u.rwp[idx].iv)
726 			memset(va_alias, 0, SMALL_PAGE_SIZE);
727 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
728 				       va_alias)) {
729 			EMSG("PH 0x%" PRIxVA " failed", page_va);
730 			panic();
731 		}
732 		incr_rw_hits();
733 		break;
734 	case AREA_TYPE_LOCK:
735 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
736 		memset(va_alias, 0, SMALL_PAGE_SIZE);
737 		break;
738 	default:
739 		panic();
740 	}
741 	asan_tag_no_access(va_alias, (uint8_t *)va_alias + SMALL_PAGE_SIZE);
742 }
743 
744 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
745 {
746 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
747 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
748 
749 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
750 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
751 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
752 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
753 
754 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
755 		asan_tag_access(pmem->va_alias,
756 				(uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
757 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
758 			     stored_page);
759 		asan_tag_no_access(pmem->va_alias,
760 				   (uint8_t *)pmem->va_alias + SMALL_PAGE_SIZE);
761 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
762 			pmem->area->base + idx * SMALL_PAGE_SIZE,
763 			pmem->area->u.rwp[idx].iv);
764 	}
765 }
766 
767 #ifdef CFG_PAGED_USER_TA
768 static void free_area(struct tee_pager_area *area)
769 {
770 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
771 				virt_to_phys(area->store)));
772 	if (area->type == AREA_TYPE_RW)
773 		free(area->u.rwp);
774 	free(area);
775 }
776 
777 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
778 			       size_t size)
779 {
780 	struct tee_pager_area *area;
781 	uint32_t flags;
782 	vaddr_t b = base;
783 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
784 
785 	if (!utc->areas) {
786 		utc->areas = malloc(sizeof(*utc->areas));
787 		if (!utc->areas)
788 			return false;
789 		TAILQ_INIT(utc->areas);
790 	}
791 
792 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
793 
794 	while (s) {
795 		size_t s2;
796 
797 		if (find_area(utc->areas, b))
798 			return false;
799 
800 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
801 
802 		/* Table info will be set when the context is activated. */
803 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
804 		if (!area)
805 			return false;
806 		TAILQ_INSERT_TAIL(utc->areas, area, link);
807 		b += s2;
808 		s -= s2;
809 	}
810 
811 	return true;
812 }
813 
814 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
815 {
816 	struct thread_specific_data *tsd = thread_get_tsd();
817 	struct tee_pager_area *area;
818 	struct core_mmu_table_info dir_info = { NULL };
819 
820 	if (&utc->ctx != tsd->ctx) {
821 		/*
822 		 * Changes are to an utc that isn't active. Just add the
823 		 * areas page tables will be dealt with later.
824 		 */
825 		return pager_add_uta_area(utc, base, size);
826 	}
827 
828 	/*
829 	 * Assign page tables before adding areas to be able to tell which
830 	 * are newly added and should be removed in case of failure.
831 	 */
832 	tee_pager_assign_uta_tables(utc);
833 	if (!pager_add_uta_area(utc, base, size)) {
834 		struct tee_pager_area *next_a;
835 
836 		/* Remove all added areas */
837 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
838 			if (!area->pgt) {
839 				TAILQ_REMOVE(utc->areas, area, link);
840 				free_area(area);
841 			}
842 		}
843 		return false;
844 	}
845 
846 	/*
847 	 * Assign page tables to the new areas and make sure that the page
848 	 * tables are registered in the upper table.
849 	 */
850 	tee_pager_assign_uta_tables(utc);
851 	core_mmu_get_user_pgdir(&dir_info);
852 	TAILQ_FOREACH(area, utc->areas, link) {
853 		paddr_t pa;
854 		size_t idx;
855 		uint32_t attr;
856 
857 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
858 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
859 
860 		/*
861 		 * Check if the page table already is used, if it is, it's
862 		 * already registered.
863 		 */
864 		if (area->pgt->num_used_entries) {
865 			assert(attr & TEE_MATTR_TABLE);
866 			assert(pa == virt_to_phys(area->pgt->tbl));
867 			continue;
868 		}
869 
870 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
871 		pa = virt_to_phys(area->pgt->tbl);
872 		assert(pa);
873 		/*
874 		 * Note that the update of the table entry is guaranteed to
875 		 * be atomic.
876 		 */
877 		core_mmu_set_entry(&dir_info, idx, pa, attr);
878 	}
879 
880 	return true;
881 }
882 
883 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
884 				   struct pgt *pgt)
885 {
886 	assert(pgt);
887 	ti->table = pgt->tbl;
888 	ti->va_base = pgt->vabase;
889 	ti->level = TBL_LEVEL;
890 	ti->shift = TBL_SHIFT;
891 	ti->num_entries = TBL_NUM_ENTRIES;
892 }
893 
894 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
895 			   vaddr_t new_base)
896 {
897 	uint32_t exceptions = pager_lock_check_stack(64);
898 
899 	/*
900 	 * If there's no pgt assigned to the old area there's no pages to
901 	 * deal with either, just update with a new pgt and base.
902 	 */
903 	if (area->pgt) {
904 		struct core_mmu_table_info old_ti;
905 		struct core_mmu_table_info new_ti;
906 		struct tee_pager_pmem *pmem;
907 
908 		init_tbl_info_from_pgt(&old_ti, area->pgt);
909 		init_tbl_info_from_pgt(&new_ti, new_pgt);
910 
911 
912 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
913 			vaddr_t va;
914 			paddr_t pa;
915 			uint32_t attr;
916 
917 			if (pmem->area != area)
918 				continue;
919 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
920 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
921 
922 			assert(pa == get_pmem_pa(pmem));
923 			assert(attr);
924 			assert(area->pgt->num_used_entries);
925 			area->pgt->num_used_entries--;
926 
927 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
928 			va = va - area->base + new_base;
929 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
930 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
931 			new_pgt->num_used_entries++;
932 		}
933 	}
934 
935 	area->pgt = new_pgt;
936 	area->base = new_base;
937 	pager_unlock(exceptions);
938 }
939 KEEP_PAGER(transpose_area);
940 
941 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
942 				   vaddr_t src_base,
943 				   struct user_ta_ctx *dst_utc,
944 				   vaddr_t dst_base, struct pgt **dst_pgt,
945 				   size_t size)
946 {
947 	struct tee_pager_area *area;
948 	struct tee_pager_area *next_a;
949 
950 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
951 		vaddr_t new_area_base;
952 		size_t new_idx;
953 
954 		if (!core_is_buffer_inside(area->base, area->size,
955 					  src_base, size))
956 			continue;
957 
958 		TAILQ_REMOVE(src_utc->areas, area, link);
959 
960 		new_area_base = dst_base + (src_base - area->base);
961 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
962 			  CORE_MMU_PGDIR_SIZE;
963 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
964 		       dst_pgt[new_idx]->vabase);
965 		transpose_area(area, dst_pgt[new_idx], new_area_base);
966 
967 		/*
968 		 * Assert that this will not cause any conflicts in the new
969 		 * utc.  This should already be guaranteed, but a bug here
970 		 * could be tricky to find.
971 		 */
972 		assert(!find_area(dst_utc->areas, area->base));
973 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
974 	}
975 }
976 
977 static void rem_area(struct tee_pager_area_head *area_head,
978 		     struct tee_pager_area *area)
979 {
980 	struct tee_pager_pmem *pmem;
981 	uint32_t exceptions;
982 
983 	exceptions = pager_lock_check_stack(64);
984 
985 	TAILQ_REMOVE(area_head, area, link);
986 
987 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
988 		if (pmem->area == area) {
989 			area_set_entry(area, pmem->pgidx, 0, 0);
990 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
991 			pgt_dec_used_entries(area->pgt);
992 			pmem->area = NULL;
993 			pmem->pgidx = INVALID_PGIDX;
994 		}
995 	}
996 
997 	pager_unlock(exceptions);
998 	free_area(area);
999 }
1000 KEEP_PAGER(rem_area);
1001 
1002 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
1003 			      size_t size)
1004 {
1005 	struct tee_pager_area *area;
1006 	struct tee_pager_area *next_a;
1007 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
1008 
1009 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
1010 		if (core_is_buffer_inside(area->base, area->size, base, s))
1011 			rem_area(utc->areas, area);
1012 	}
1013 }
1014 
1015 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
1016 {
1017 	struct tee_pager_area *area;
1018 
1019 	if (!utc->areas)
1020 		return;
1021 
1022 	while (true) {
1023 		area = TAILQ_FIRST(utc->areas);
1024 		if (!area)
1025 			break;
1026 		TAILQ_REMOVE(utc->areas, area, link);
1027 		free_area(area);
1028 	}
1029 
1030 	free(utc->areas);
1031 }
1032 
1033 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
1034 				 size_t size, uint32_t flags)
1035 {
1036 	bool ret;
1037 	vaddr_t b = base;
1038 	size_t s = size;
1039 	size_t s2;
1040 	struct tee_pager_area *area = find_area(utc->areas, b);
1041 	uint32_t exceptions;
1042 	struct tee_pager_pmem *pmem;
1043 	paddr_t pa;
1044 	uint32_t a;
1045 	uint32_t f;
1046 
1047 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1048 	if (f & TEE_MATTR_UW)
1049 		f |= TEE_MATTR_PW;
1050 	f = get_area_mattr(f);
1051 
1052 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1053 
1054 	while (s) {
1055 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1056 		if (!area || area->base != b || area->size != s2) {
1057 			ret = false;
1058 			goto out;
1059 		}
1060 		b += s2;
1061 		s -= s2;
1062 
1063 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1064 			if (pmem->area != area)
1065 				continue;
1066 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
1067 			if (a & TEE_MATTR_VALID_BLOCK)
1068 				assert(pa == get_pmem_pa(pmem));
1069 			else
1070 				pa = get_pmem_pa(pmem);
1071 			if (a == f)
1072 				continue;
1073 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1074 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1075 			if (!(flags & TEE_MATTR_UW))
1076 				tee_pager_save_page(pmem, a);
1077 
1078 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
1079 			/*
1080 			 * Make sure the table update is visible before
1081 			 * continuing.
1082 			 */
1083 			dsb_ishst();
1084 
1085 			if (flags & TEE_MATTR_UX) {
1086 				void *va = (void *)area_idx2va(pmem->area,
1087 							       pmem->pgidx);
1088 
1089 				cache_op_inner(DCACHE_AREA_CLEAN, va,
1090 						SMALL_PAGE_SIZE);
1091 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
1092 						SMALL_PAGE_SIZE);
1093 			}
1094 		}
1095 
1096 		area->flags = f;
1097 		area = TAILQ_NEXT(area, link);
1098 	}
1099 
1100 	ret = true;
1101 out:
1102 	pager_unlock(exceptions);
1103 	return ret;
1104 }
1105 KEEP_PAGER(tee_pager_set_uta_area_attr);
1106 #endif /*CFG_PAGED_USER_TA*/
1107 
1108 static bool tee_pager_unhide_page(vaddr_t page_va)
1109 {
1110 	struct tee_pager_pmem *pmem;
1111 
1112 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1113 		paddr_t pa;
1114 		uint32_t attr;
1115 
1116 		if (pmem->pgidx == INVALID_PGIDX)
1117 			continue;
1118 
1119 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1120 
1121 		if (!(attr &
1122 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
1123 			continue;
1124 
1125 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
1126 			uint32_t a = get_area_mattr(pmem->area->flags);
1127 
1128 			/* page is hidden, show and move to back */
1129 			if (pa != get_pmem_pa(pmem))
1130 				panic("unexpected pa");
1131 
1132 			/*
1133 			 * If it's not a dirty block, then it should be
1134 			 * read only.
1135 			 */
1136 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
1137 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1138 			else
1139 				FMSG("Unhide %#" PRIxVA, page_va);
1140 
1141 			if (page_va == 0x8000a000)
1142 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
1143 					page_va, a);
1144 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
1145 			/*
1146 			 * Note that TLB invalidation isn't needed since
1147 			 * there wasn't a valid mapping before. We should
1148 			 * use a barrier though, to make sure that the
1149 			 * change is visible.
1150 			 */
1151 			dsb_ishst();
1152 
1153 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1154 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1155 			incr_hidden_hits();
1156 			return true;
1157 		}
1158 	}
1159 
1160 	return false;
1161 }
1162 
1163 static void tee_pager_hide_pages(void)
1164 {
1165 	struct tee_pager_pmem *pmem;
1166 	size_t n = 0;
1167 
1168 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1169 		paddr_t pa;
1170 		uint32_t attr;
1171 		uint32_t a;
1172 
1173 		if (n >= TEE_PAGER_NHIDE)
1174 			break;
1175 		n++;
1176 
1177 		/* we cannot hide pages when pmem->area is not defined. */
1178 		if (!pmem->area)
1179 			continue;
1180 
1181 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1182 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1183 			continue;
1184 
1185 		assert(pa == get_pmem_pa(pmem));
1186 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1187 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1188 			FMSG("Hide %#" PRIxVA,
1189 			     area_idx2va(pmem->area, pmem->pgidx));
1190 		} else
1191 			a = TEE_MATTR_HIDDEN_BLOCK;
1192 
1193 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1194 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1195 	}
1196 }
1197 
1198 /*
1199  * Find mapped pmem, hide and move to pageble pmem.
1200  * Return false if page was not mapped, and true if page was mapped.
1201  */
1202 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1203 				       vaddr_t page_va)
1204 {
1205 	struct tee_pager_pmem *pmem;
1206 	unsigned pgidx;
1207 	paddr_t pa;
1208 	uint32_t attr;
1209 
1210 	pgidx = area_va2idx(area, page_va);
1211 	area_get_entry(area, pgidx, &pa, &attr);
1212 
1213 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1214 
1215 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1216 		if (pmem->area != area || pmem->pgidx != pgidx)
1217 			continue;
1218 
1219 		assert(pa == get_pmem_pa(pmem));
1220 		area_set_entry(area, pgidx, 0, 0);
1221 		pgt_dec_used_entries(area->pgt);
1222 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1223 		pmem->area = NULL;
1224 		pmem->pgidx = INVALID_PGIDX;
1225 		tee_pager_npages++;
1226 		set_npages();
1227 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1228 		incr_zi_released();
1229 		return true;
1230 	}
1231 
1232 	return false;
1233 }
1234 
1235 /* Finds the oldest page and unmats it from its old virtual address */
1236 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1237 {
1238 	struct tee_pager_pmem *pmem;
1239 
1240 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1241 	if (!pmem) {
1242 		EMSG("No pmem entries");
1243 		return NULL;
1244 	}
1245 	if (pmem->pgidx != INVALID_PGIDX) {
1246 		uint32_t a;
1247 
1248 		assert(pmem->area && pmem->area->pgt);
1249 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1250 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1251 		pgt_dec_used_entries(pmem->area->pgt);
1252 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1253 		tee_pager_save_page(pmem, a);
1254 	}
1255 
1256 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1257 	pmem->pgidx = INVALID_PGIDX;
1258 	pmem->area = NULL;
1259 	if (area->type == AREA_TYPE_LOCK) {
1260 		/* Move page to lock list */
1261 		if (tee_pager_npages <= 0)
1262 			panic("running out of page");
1263 		tee_pager_npages--;
1264 		set_npages();
1265 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1266 	} else {
1267 		/* move page to back */
1268 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1269 	}
1270 
1271 	return pmem;
1272 }
1273 
1274 static bool pager_update_permissions(struct tee_pager_area *area,
1275 			struct abort_info *ai, bool *handled)
1276 {
1277 	unsigned int pgidx = area_va2idx(area, ai->va);
1278 	uint32_t attr;
1279 	paddr_t pa;
1280 
1281 	*handled = false;
1282 
1283 	area_get_entry(area, pgidx, &pa, &attr);
1284 
1285 	/* Not mapped */
1286 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1287 		return false;
1288 
1289 	/* Not readable, should not happen */
1290 	if (abort_is_user_exception(ai)) {
1291 		if (!(attr & TEE_MATTR_UR))
1292 			return true;
1293 	} else {
1294 		if (!(attr & TEE_MATTR_PR)) {
1295 			abort_print_error(ai);
1296 			panic();
1297 		}
1298 	}
1299 
1300 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1301 	case CORE_MMU_FAULT_TRANSLATION:
1302 	case CORE_MMU_FAULT_READ_PERMISSION:
1303 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1304 			/* Check attempting to execute from an NOX page */
1305 			if (abort_is_user_exception(ai)) {
1306 				if (!(attr & TEE_MATTR_UX))
1307 					return true;
1308 			} else {
1309 				if (!(attr & TEE_MATTR_PX)) {
1310 					abort_print_error(ai);
1311 					panic();
1312 				}
1313 			}
1314 		}
1315 		/* Since the page is mapped now it's OK */
1316 		break;
1317 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1318 		/* Check attempting to write to an RO page */
1319 		if (abort_is_user_exception(ai)) {
1320 			if (!(area->flags & TEE_MATTR_UW))
1321 				return true;
1322 			if (!(attr & TEE_MATTR_UW)) {
1323 				FMSG("Dirty %p",
1324 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1325 				area_set_entry(area, pgidx, pa,
1326 					       get_area_mattr(area->flags));
1327 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1328 			}
1329 
1330 		} else {
1331 			if (!(area->flags & TEE_MATTR_PW)) {
1332 				abort_print_error(ai);
1333 				panic();
1334 			}
1335 			if (!(attr & TEE_MATTR_PW)) {
1336 				FMSG("Dirty %p",
1337 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1338 				area_set_entry(area, pgidx, pa,
1339 					       get_area_mattr(area->flags));
1340 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1341 			}
1342 		}
1343 		/* Since permissions has been updated now it's OK */
1344 		break;
1345 	default:
1346 		/* Some fault we can't deal with */
1347 		if (abort_is_user_exception(ai))
1348 			return true;
1349 		abort_print_error(ai);
1350 		panic();
1351 	}
1352 	*handled = true;
1353 	return true;
1354 }
1355 
1356 #ifdef CFG_TEE_CORE_DEBUG
1357 static void stat_handle_fault(void)
1358 {
1359 	static size_t num_faults;
1360 	static size_t min_npages = SIZE_MAX;
1361 	static size_t total_min_npages = SIZE_MAX;
1362 
1363 	num_faults++;
1364 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1365 		DMSG("nfaults %zu npages %zu (min %zu)",
1366 		     num_faults, tee_pager_npages, min_npages);
1367 		min_npages = tee_pager_npages; /* reset */
1368 	}
1369 	if (tee_pager_npages < min_npages)
1370 		min_npages = tee_pager_npages;
1371 	if (tee_pager_npages < total_min_npages)
1372 		total_min_npages = tee_pager_npages;
1373 }
1374 #else
1375 static void stat_handle_fault(void)
1376 {
1377 }
1378 #endif
1379 
1380 bool tee_pager_handle_fault(struct abort_info *ai)
1381 {
1382 	struct tee_pager_area *area;
1383 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1384 	uint32_t exceptions;
1385 	bool ret;
1386 
1387 #ifdef TEE_PAGER_DEBUG_PRINT
1388 	abort_print(ai);
1389 #endif
1390 
1391 	/*
1392 	 * We're updating pages that can affect several active CPUs at a
1393 	 * time below. We end up here because a thread tries to access some
1394 	 * memory that isn't available. We have to be careful when making
1395 	 * that memory available as other threads may succeed in accessing
1396 	 * that address the moment after we've made it available.
1397 	 *
1398 	 * That means that we can't just map the memory and populate the
1399 	 * page, instead we use the aliased mapping to populate the page
1400 	 * and once everything is ready we map it.
1401 	 */
1402 	exceptions = pager_lock(ai);
1403 
1404 	stat_handle_fault();
1405 
1406 	/* check if the access is valid */
1407 	if (abort_is_user_exception(ai)) {
1408 		area = find_uta_area(ai->va);
1409 
1410 	} else {
1411 		area = find_area(&tee_pager_area_head, ai->va);
1412 		if (!area)
1413 			area = find_uta_area(ai->va);
1414 	}
1415 	if (!area || !area->pgt) {
1416 		ret = false;
1417 		goto out;
1418 	}
1419 
1420 	if (!tee_pager_unhide_page(page_va)) {
1421 		struct tee_pager_pmem *pmem = NULL;
1422 		uint32_t attr;
1423 		paddr_t pa;
1424 
1425 		/*
1426 		 * The page wasn't hidden, but some other core may have
1427 		 * updated the table entry before we got here or we need
1428 		 * to make a read-only page read-write (dirty).
1429 		 */
1430 		if (pager_update_permissions(area, ai, &ret)) {
1431 			/*
1432 			 * Nothing more to do with the abort. The problem
1433 			 * could already have been dealt with from another
1434 			 * core or if ret is false the TA will be paniced.
1435 			 */
1436 			goto out;
1437 		}
1438 
1439 		pmem = tee_pager_get_page(area);
1440 		if (!pmem) {
1441 			abort_print(ai);
1442 			panic();
1443 		}
1444 
1445 		/* load page code & data */
1446 		tee_pager_load_page(area, page_va, pmem->va_alias);
1447 
1448 
1449 		pmem->area = area;
1450 		pmem->pgidx = area_va2idx(area, ai->va);
1451 		attr = get_area_mattr(area->flags) &
1452 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1453 		pa = get_pmem_pa(pmem);
1454 
1455 		/*
1456 		 * We've updated the page using the aliased mapping and
1457 		 * some cache maintenence is now needed if it's an
1458 		 * executable page.
1459 		 *
1460 		 * Since the d-cache is a Physically-indexed,
1461 		 * physically-tagged (PIPT) cache we can clean either the
1462 		 * aliased address or the real virtual address. In this
1463 		 * case we choose the real virtual address.
1464 		 *
1465 		 * The i-cache can also be PIPT, but may be something else
1466 		 * too like VIPT. The current code requires the caches to
1467 		 * implement the IVIPT extension, that is:
1468 		 * "instruction cache maintenance is required only after
1469 		 * writing new data to a physical address that holds an
1470 		 * instruction."
1471 		 *
1472 		 * To portably invalidate the icache the page has to
1473 		 * be mapped at the final virtual address but not
1474 		 * executable.
1475 		 */
1476 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1477 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1478 					TEE_MATTR_PW | TEE_MATTR_UW;
1479 
1480 			/* Set a temporary read-only mapping */
1481 			area_set_entry(pmem->area, pmem->pgidx, pa,
1482 				       attr & ~mask);
1483 			tlbi_mva_allasid(page_va);
1484 
1485 			/*
1486 			 * Doing these operations to LoUIS (Level of
1487 			 * unification, Inner Shareable) would be enough
1488 			 */
1489 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1490 				       SMALL_PAGE_SIZE);
1491 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1492 				       SMALL_PAGE_SIZE);
1493 
1494 			/* Set the final mapping */
1495 			area_set_entry(area, pmem->pgidx, pa, attr);
1496 			tlbi_mva_allasid(page_va);
1497 		} else {
1498 			area_set_entry(area, pmem->pgidx, pa, attr);
1499 			/*
1500 			 * No need to flush TLB for this entry, it was
1501 			 * invalid. We should use a barrier though, to make
1502 			 * sure that the change is visible.
1503 			 */
1504 			dsb_ishst();
1505 		}
1506 		pgt_inc_used_entries(area->pgt);
1507 
1508 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1509 
1510 	}
1511 
1512 	tee_pager_hide_pages();
1513 	ret = true;
1514 out:
1515 	pager_unlock(exceptions);
1516 	return ret;
1517 }
1518 
1519 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1520 {
1521 	size_t n;
1522 
1523 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1524 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1525 
1526 	/* setup memory */
1527 	for (n = 0; n < npages; n++) {
1528 		struct core_mmu_table_info *ti;
1529 		struct tee_pager_pmem *pmem;
1530 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1531 		unsigned int pgidx;
1532 		paddr_t pa;
1533 		uint32_t attr;
1534 
1535 		ti = find_table_info(va);
1536 		pgidx = core_mmu_va2idx(ti, va);
1537 		/*
1538 		 * Note that we can only support adding pages in the
1539 		 * valid range of this table info, currently not a problem.
1540 		 */
1541 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1542 
1543 		/* Ignore unmapped pages/blocks */
1544 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1545 			continue;
1546 
1547 		pmem = malloc(sizeof(struct tee_pager_pmem));
1548 		if (!pmem)
1549 			panic("out of mem");
1550 
1551 		pmem->va_alias = pager_add_alias_page(pa);
1552 
1553 		if (unmap) {
1554 			pmem->area = NULL;
1555 			pmem->pgidx = INVALID_PGIDX;
1556 			core_mmu_set_entry(ti, pgidx, 0, 0);
1557 			pgt_dec_used_entries(find_core_pgt(va));
1558 		} else {
1559 			/*
1560 			 * The page is still mapped, let's assign the area
1561 			 * and update the protection bits accordingly.
1562 			 */
1563 			pmem->area = find_area(&tee_pager_area_head, va);
1564 			assert(pmem->area->pgt == find_core_pgt(va));
1565 			pmem->pgidx = pgidx;
1566 			assert(pa == get_pmem_pa(pmem));
1567 			area_set_entry(pmem->area, pgidx, pa,
1568 				       get_area_mattr(pmem->area->flags));
1569 		}
1570 
1571 		tee_pager_npages++;
1572 		incr_npages_all();
1573 		set_npages();
1574 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1575 	}
1576 
1577 	/*
1578 	 * As this is done at inits, invalidate all TLBs once instead of
1579 	 * targeting only the modified entries.
1580 	 */
1581 	tlbi_all();
1582 }
1583 
1584 #ifdef CFG_PAGED_USER_TA
1585 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1586 {
1587 	struct pgt *p = pgt;
1588 
1589 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1590 		p = SLIST_NEXT(p, link);
1591 	return p;
1592 }
1593 
1594 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1595 {
1596 	struct tee_pager_area *area;
1597 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1598 
1599 	TAILQ_FOREACH(area, utc->areas, link) {
1600 		if (!area->pgt)
1601 			area->pgt = find_pgt(pgt, area->base);
1602 		else
1603 			assert(area->pgt == find_pgt(pgt, area->base));
1604 		if (!area->pgt)
1605 			panic();
1606 	}
1607 }
1608 
1609 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1610 {
1611 	uint32_t attr;
1612 
1613 	assert(pmem->area && pmem->area->pgt);
1614 
1615 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1616 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1617 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1618 	tee_pager_save_page(pmem, attr);
1619 	assert(pmem->area->pgt->num_used_entries);
1620 	pmem->area->pgt->num_used_entries--;
1621 	pmem->pgidx = INVALID_PGIDX;
1622 	pmem->area = NULL;
1623 }
1624 
1625 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1626 {
1627 	struct tee_pager_pmem *pmem;
1628 	struct tee_pager_area *area;
1629 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1630 
1631 	if (!pgt->num_used_entries)
1632 		goto out;
1633 
1634 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1635 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1636 			continue;
1637 		if (pmem->area->pgt == pgt)
1638 			pager_save_and_release_entry(pmem);
1639 	}
1640 	assert(!pgt->num_used_entries);
1641 
1642 out:
1643 	if (is_user_ta_ctx(pgt->ctx)) {
1644 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1645 			if (area->pgt == pgt)
1646 				area->pgt = NULL;
1647 		}
1648 	}
1649 
1650 	pager_unlock(exceptions);
1651 }
1652 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1653 #endif /*CFG_PAGED_USER_TA*/
1654 
1655 void tee_pager_release_phys(void *addr, size_t size)
1656 {
1657 	bool unmaped = false;
1658 	vaddr_t va = (vaddr_t)addr;
1659 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1660 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1661 	struct tee_pager_area *area;
1662 	uint32_t exceptions;
1663 
1664 	if (end <= begin)
1665 		return;
1666 
1667 	exceptions = pager_lock_check_stack(128);
1668 
1669 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1670 		area = find_area(&tee_pager_area_head, va);
1671 		if (!area)
1672 			panic();
1673 		unmaped |= tee_pager_release_one_phys(area, va);
1674 	}
1675 
1676 	if (unmaped)
1677 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1678 
1679 	pager_unlock(exceptions);
1680 }
1681 KEEP_PAGER(tee_pager_release_phys);
1682 
1683 void *tee_pager_alloc(size_t size, uint32_t flags)
1684 {
1685 	tee_mm_entry_t *mm;
1686 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1687 	uint8_t *smem;
1688 	size_t bytes;
1689 
1690 	if (!size)
1691 		return NULL;
1692 
1693 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1694 	if (!mm)
1695 		return NULL;
1696 
1697 	bytes = tee_mm_get_bytes(mm);
1698 	smem = (uint8_t *)tee_mm_get_smem(mm);
1699 	tee_pager_add_core_area((vaddr_t)smem, bytes, f, NULL, NULL);
1700 	asan_tag_access(smem, smem + bytes);
1701 
1702 	return smem;
1703 }
1704