xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 53dcd8f7c7e70bef1cf646dd128a9817feabacd4)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <assert.h>
31 #include <io.h>
32 #include <keep.h>
33 #include <kernel/abort.h>
34 #include <kernel/panic.h>
35 #include <kernel/spinlock.h>
36 #include <kernel/tee_misc.h>
37 #include <kernel/tee_ta_manager.h>
38 #include <kernel/thread.h>
39 #include <kernel/tlb_helpers.h>
40 #include <mm/core_memprot.h>
41 #include <mm/tee_mm.h>
42 #include <mm/tee_pager.h>
43 #include <stdlib.h>
44 #include <sys/queue.h>
45 #include <tee_api_defines.h>
46 #include <tee/tee_cryp_provider.h>
47 #include <trace.h>
48 #include <types_ext.h>
49 #include <utee_defines.h>
50 #include <util.h>
51 
52 #include "pager_private.h"
53 
54 #define PAGER_AE_KEY_BITS	256
55 
56 struct pager_rw_pstate {
57 	uint64_t iv;
58 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
59 };
60 
61 enum area_type {
62 	AREA_TYPE_RO,
63 	AREA_TYPE_RW,
64 	AREA_TYPE_LOCK,
65 };
66 
67 struct tee_pager_area {
68 	union {
69 		const uint8_t *hashes;
70 		struct pager_rw_pstate *rwp;
71 	} u;
72 	uint8_t *store;
73 	enum area_type type;
74 	uint32_t flags;
75 	vaddr_t base;
76 	size_t size;
77 	struct pgt *pgt;
78 	TAILQ_ENTRY(tee_pager_area) link;
79 };
80 
81 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
82 
83 static struct tee_pager_area_head tee_pager_area_head =
84 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
85 
86 #define INVALID_PGIDX	UINT_MAX
87 
88 /*
89  * struct tee_pager_pmem - Represents a physical page used for paging.
90  *
91  * @pgidx	an index of the entry in area->ti.
92  * @va_alias	Virtual address where the physical page always is aliased.
93  *		Used during remapping of the page when the content need to
94  *		be updated before it's available at the new location.
95  * @area	a pointer to the pager area
96  */
97 struct tee_pager_pmem {
98 	unsigned pgidx;
99 	void *va_alias;
100 	struct tee_pager_area *area;
101 	TAILQ_ENTRY(tee_pager_pmem) link;
102 };
103 
104 /* The list of physical pages. The first page in the list is the oldest */
105 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
106 
107 static struct tee_pager_pmem_head tee_pager_pmem_head =
108 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
109 
110 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
111 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
112 
113 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
114 
115 /* number of pages hidden */
116 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
117 
118 /* Number of registered physical pages, used hiding pages. */
119 static size_t tee_pager_npages;
120 
121 #ifdef CFG_WITH_STATS
122 static struct tee_pager_stats pager_stats;
123 
124 static inline void incr_ro_hits(void)
125 {
126 	pager_stats.ro_hits++;
127 }
128 
129 static inline void incr_rw_hits(void)
130 {
131 	pager_stats.rw_hits++;
132 }
133 
134 static inline void incr_hidden_hits(void)
135 {
136 	pager_stats.hidden_hits++;
137 }
138 
139 static inline void incr_zi_released(void)
140 {
141 	pager_stats.zi_released++;
142 }
143 
144 static inline void incr_npages_all(void)
145 {
146 	pager_stats.npages_all++;
147 }
148 
149 static inline void set_npages(void)
150 {
151 	pager_stats.npages = tee_pager_npages;
152 }
153 
154 void tee_pager_get_stats(struct tee_pager_stats *stats)
155 {
156 	*stats = pager_stats;
157 
158 	pager_stats.hidden_hits = 0;
159 	pager_stats.ro_hits = 0;
160 	pager_stats.rw_hits = 0;
161 	pager_stats.zi_released = 0;
162 }
163 
164 #else /* CFG_WITH_STATS */
165 static inline void incr_ro_hits(void) { }
166 static inline void incr_rw_hits(void) { }
167 static inline void incr_hidden_hits(void) { }
168 static inline void incr_zi_released(void) { }
169 static inline void incr_npages_all(void) { }
170 static inline void set_npages(void) { }
171 
172 void tee_pager_get_stats(struct tee_pager_stats *stats)
173 {
174 	memset(stats, 0, sizeof(struct tee_pager_stats));
175 }
176 #endif /* CFG_WITH_STATS */
177 
178 static struct pgt pager_core_pgt;
179 struct core_mmu_table_info tee_pager_tbl_info;
180 static struct core_mmu_table_info pager_alias_tbl_info;
181 
182 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
183 
184 /* Defines the range of the alias area */
185 static tee_mm_entry_t *pager_alias_area;
186 /*
187  * Physical pages are added in a stack like fashion to the alias area,
188  * @pager_alias_next_free gives the address of next free entry if
189  * @pager_alias_next_free is != 0
190  */
191 static uintptr_t pager_alias_next_free;
192 
193 #ifdef CFG_TEE_CORE_DEBUG
194 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
195 
196 static uint32_t pager_lock_dldetect(const char *func, const int line,
197 				    struct abort_info *ai)
198 {
199 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
200 	unsigned int retries = 0;
201 	unsigned int reminder = 0;
202 
203 	while (!cpu_spin_trylock(&pager_spinlock)) {
204 		retries++;
205 		if (!retries) {
206 			/* wrapped, time to report */
207 			trace_printf(func, line, TRACE_ERROR, true,
208 				     "possible spinlock deadlock reminder %u",
209 				     reminder);
210 			if (reminder < UINT_MAX)
211 				reminder++;
212 			if (ai)
213 				abort_print(ai);
214 		}
215 	}
216 
217 	return exceptions;
218 }
219 #else
220 static uint32_t pager_lock(struct abort_info __unused *ai)
221 {
222 	return cpu_spin_lock_xsave(&pager_spinlock);
223 }
224 #endif
225 
226 static uint32_t pager_lock_check_stack(size_t stack_size)
227 {
228 	if (stack_size) {
229 		int8_t buf[stack_size];
230 		size_t n;
231 
232 		/*
233 		 * Make sure to touch all pages of the stack that we expect
234 		 * to use with this lock held. We need to take eventual
235 		 * page faults before the lock is taken or we'll deadlock
236 		 * the pager. The pages that are populated in this way will
237 		 * eventually be released at certain save transitions of
238 		 * the thread.
239 		 */
240 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
241 			write8(1, (vaddr_t)buf + n);
242 		write8(1, (vaddr_t)buf + stack_size - 1);
243 	}
244 
245 	return pager_lock(NULL);
246 }
247 
248 static void pager_unlock(uint32_t exceptions)
249 {
250 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
251 }
252 
253 void *tee_pager_phys_to_virt(paddr_t pa)
254 {
255 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
256 	unsigned idx;
257 	unsigned end_idx;
258 	uint32_t a;
259 	paddr_t p;
260 
261 	end_idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START +
262 				      CFG_TEE_RAM_VA_SIZE);
263 	/* Most addresses are mapped lineary, try that first if possible. */
264 	idx = core_mmu_va2idx(ti, pa);
265 	if (idx >= core_mmu_va2idx(ti, CFG_TEE_RAM_START) &&
266 	    idx < end_idx) {
267 		core_mmu_get_entry(ti, idx, &p, &a);
268 		if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
269 			return (void *)core_mmu_idx2va(ti, idx);
270 	}
271 
272 	for (idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START);
273 	     idx < end_idx; idx++) {
274 		core_mmu_get_entry(ti, idx, &p, &a);
275 		if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
276 			return (void *)core_mmu_idx2va(ti, idx);
277 	}
278 
279 	return NULL;
280 }
281 
282 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
283 {
284 	if (va >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
285 	    va <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
286 		*ti = tee_pager_tbl_info;
287 		return true;
288 	}
289 
290 	return false;
291 }
292 
293 static void set_alias_area(tee_mm_entry_t *mm)
294 {
295 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
296 	size_t tbl_va_size;
297 	unsigned idx;
298 	unsigned last_idx;
299 	vaddr_t smem = tee_mm_get_smem(mm);
300 	size_t nbytes = tee_mm_get_bytes(mm);
301 
302 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
303 
304 	if (pager_alias_area)
305 		panic("null pager_alias_area");
306 
307 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
308 		panic("Can't find translation table");
309 
310 	if ((1 << ti->shift) != SMALL_PAGE_SIZE)
311 		panic("Unsupported page size in translation table");
312 
313 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
314 	if (!core_is_buffer_inside(smem, nbytes,
315 				   ti->va_base, tbl_va_size)) {
316 		EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
317 		     smem, nbytes, ti->va_base, tbl_va_size);
318 		panic();
319 	}
320 
321 	if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
322 		panic("invalid area alignment");
323 
324 	pager_alias_area = mm;
325 	pager_alias_next_free = smem;
326 
327 	/* Clear all mapping in the alias area */
328 	idx = core_mmu_va2idx(ti, smem);
329 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
330 	for (; idx < last_idx; idx++)
331 		core_mmu_set_entry(ti, idx, 0, 0);
332 
333 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
334 }
335 
336 static void generate_ae_key(void)
337 {
338 	if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
339 		panic("failed to generate random");
340 }
341 
342 void tee_pager_early_init(void)
343 {
344 	if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX,
345 				 &tee_pager_tbl_info))
346 		panic("can't find mmu tables");
347 
348 	if (tee_pager_tbl_info.shift != SMALL_PAGE_SHIFT)
349 		panic("Unsupported page size in translation table");
350 }
351 
352 void tee_pager_init(tee_mm_entry_t *mm_alias)
353 {
354 	set_alias_area(mm_alias);
355 	generate_ae_key();
356 }
357 
358 static void *pager_add_alias_page(paddr_t pa)
359 {
360 	unsigned idx;
361 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
362 	/* Alias pages mapped without write permission: runtime will care */
363 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
364 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
365 			TEE_MATTR_SECURE | TEE_MATTR_PR;
366 
367 	DMSG("0x%" PRIxPA, pa);
368 
369 	if (!pager_alias_next_free || !ti->num_entries)
370 		panic("invalid alias entry");
371 
372 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
373 	core_mmu_set_entry(ti, idx, pa, attr);
374 	pgt_inc_used_entries(&pager_core_pgt);
375 	pager_alias_next_free += SMALL_PAGE_SIZE;
376 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
377 				      tee_mm_get_bytes(pager_alias_area)))
378 		pager_alias_next_free = 0;
379 	return (void *)core_mmu_idx2va(ti, idx);
380 }
381 
382 static struct tee_pager_area *alloc_area(struct pgt *pgt,
383 					 vaddr_t base, size_t size,
384 					 uint32_t flags, const void *store,
385 					 const void *hashes)
386 {
387 	struct tee_pager_area *area = calloc(1, sizeof(*area));
388 	enum area_type at;
389 	tee_mm_entry_t *mm_store = NULL;
390 
391 	if (!area)
392 		return NULL;
393 
394 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
395 		if (flags & TEE_MATTR_LOCKED) {
396 			at = AREA_TYPE_LOCK;
397 			goto out;
398 		}
399 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
400 		if (!mm_store)
401 			goto bad;
402 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
403 					   MEM_AREA_TA_RAM);
404 		if (!area->store)
405 			goto bad;
406 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
407 				     sizeof(struct pager_rw_pstate));
408 		if (!area->u.rwp)
409 			goto bad;
410 		at = AREA_TYPE_RW;
411 	} else {
412 		area->store = (void *)store;
413 		area->u.hashes = hashes;
414 		at = AREA_TYPE_RO;
415 	}
416 out:
417 	area->pgt = pgt;
418 	area->base = base;
419 	area->size = size;
420 	area->flags = flags;
421 	area->type = at;
422 	return area;
423 bad:
424 	tee_mm_free(mm_store);
425 	free(area->u.rwp);
426 	free(area);
427 	return NULL;
428 }
429 
430 static void area_insert_tail(struct tee_pager_area *area)
431 {
432 	uint32_t exceptions = pager_lock_check_stack(8);
433 
434 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
435 
436 	pager_unlock(exceptions);
437 }
438 KEEP_PAGER(area_insert_tail);
439 
440 static size_t tbl_usage_count(struct pgt *pgt)
441 {
442 	size_t n;
443 	paddr_t pa;
444 	size_t usage = 0;
445 
446 	for (n = 0; n < tee_pager_tbl_info.num_entries; n++) {
447 		core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level,
448 					     n, &pa, NULL);
449 		if (pa)
450 			usage++;
451 	}
452 	return usage;
453 }
454 
455 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
456 			const void *store, const void *hashes)
457 {
458 	struct tee_pager_area *area;
459 	size_t tbl_va_size;
460 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
461 
462 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
463 		base, base + size, flags, store, hashes);
464 
465 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
466 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
467 		panic();
468 	}
469 
470 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
471 		panic("write pages cannot provide store or hashes");
472 
473 	if ((flags & TEE_MATTR_PW) && (store || hashes))
474 		panic("non-write pages must provide store and hashes");
475 
476 	if (!pager_core_pgt.tbl) {
477 		pager_core_pgt.tbl = ti->table;
478 		pgt_set_used_entries(&pager_core_pgt,
479 				     tbl_usage_count(&pager_core_pgt));
480 	}
481 
482 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
483 	if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
484 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
485 			base, size, ti->va_base, tbl_va_size);
486 		return false;
487 	}
488 
489 	area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes);
490 	if (!area)
491 		return false;
492 
493 	area_insert_tail(area);
494 	return true;
495 }
496 
497 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
498 					vaddr_t va)
499 {
500 	struct tee_pager_area *area;
501 
502 	if (!areas)
503 		return NULL;
504 
505 	TAILQ_FOREACH(area, areas, link) {
506 		if (core_is_buffer_inside(va, 1, area->base, area->size))
507 			return area;
508 	}
509 	return NULL;
510 }
511 
512 #ifdef CFG_PAGED_USER_TA
513 static struct tee_pager_area *find_uta_area(vaddr_t va)
514 {
515 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
516 
517 	if (!ctx || !is_user_ta_ctx(ctx))
518 		return NULL;
519 	return find_area(to_user_ta_ctx(ctx)->areas, va);
520 }
521 #else
522 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
523 {
524 	return NULL;
525 }
526 #endif /*CFG_PAGED_USER_TA*/
527 
528 
529 static uint32_t get_area_mattr(uint32_t area_flags)
530 {
531 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
532 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
533 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
534 
535 	if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
536 		attr |= TEE_MATTR_GLOBAL;
537 
538 	return attr;
539 }
540 
541 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
542 {
543 	paddr_t pa;
544 	unsigned idx;
545 
546 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
547 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
548 	return pa;
549 }
550 
551 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
552 			void *dst)
553 {
554 	struct pager_aes_gcm_iv iv = {
555 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
556 	};
557 
558 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
559 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
560 }
561 
562 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
563 {
564 	struct pager_aes_gcm_iv iv;
565 
566 	assert((rwp->iv + 1) > rwp->iv);
567 	rwp->iv++;
568 	/*
569 	 * IV is constructed as recommended in section "8.2.1 Deterministic
570 	 * Construction" of "Recommendation for Block Cipher Modes of
571 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
572 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
573 	 */
574 	iv.iv[0] = (vaddr_t)rwp;
575 	iv.iv[1] = rwp->iv >> 32;
576 	iv.iv[2] = rwp->iv;
577 
578 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
579 				   &iv, rwp->tag,
580 				   src, dst, SMALL_PAGE_SIZE))
581 		panic("gcm failed");
582 }
583 
584 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
585 			void *va_alias)
586 {
587 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
588 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
589 	struct core_mmu_table_info *ti;
590 	uint32_t attr_alias;
591 	paddr_t pa_alias;
592 	unsigned int idx_alias;
593 
594 	/* Insure we are allowed to write to aliased virtual page */
595 	ti = &pager_alias_tbl_info;
596 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
597 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
598 	if (!(attr_alias & TEE_MATTR_PW)) {
599 		attr_alias |= TEE_MATTR_PW;
600 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
601 		tlbi_mva_allasid((vaddr_t)va_alias);
602 	}
603 
604 	switch (area->type) {
605 	case AREA_TYPE_RO:
606 		{
607 			const void *hash = area->u.hashes +
608 					   idx * TEE_SHA256_HASH_SIZE;
609 
610 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
611 			incr_ro_hits();
612 
613 			if (hash_sha256_check(hash, va_alias,
614 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
615 				EMSG("PH 0x%" PRIxVA " failed", page_va);
616 				panic();
617 			}
618 		}
619 		/* Forbid write to aliases for read-only (maybe exec) pages */
620 		attr_alias &= ~TEE_MATTR_PW;
621 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
622 		tlbi_mva_allasid((vaddr_t)va_alias);
623 		break;
624 	case AREA_TYPE_RW:
625 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
626 			va_alias, page_va, area->u.rwp[idx].iv);
627 		if (!area->u.rwp[idx].iv)
628 			memset(va_alias, 0, SMALL_PAGE_SIZE);
629 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
630 				       va_alias)) {
631 			EMSG("PH 0x%" PRIxVA " failed", page_va);
632 			panic();
633 		}
634 		incr_rw_hits();
635 		break;
636 	case AREA_TYPE_LOCK:
637 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
638 		memset(va_alias, 0, SMALL_PAGE_SIZE);
639 		break;
640 	default:
641 		panic();
642 	}
643 }
644 
645 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
646 {
647 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
648 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
649 
650 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
651 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
652 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
653 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
654 
655 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
656 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
657 			     stored_page);
658 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
659 			pmem->area->base + idx * SMALL_PAGE_SIZE,
660 			pmem->area->u.rwp[idx].iv);
661 	}
662 }
663 
664 static void area_get_entry(struct tee_pager_area *area, size_t idx,
665 			   paddr_t *pa, uint32_t *attr)
666 {
667 	assert(area->pgt);
668 	assert(idx < tee_pager_tbl_info.num_entries);
669 	core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
670 				     idx, pa, attr);
671 }
672 
673 static void area_set_entry(struct tee_pager_area *area, size_t idx,
674 			   paddr_t pa, uint32_t attr)
675 {
676 	assert(area->pgt);
677 	assert(idx < tee_pager_tbl_info.num_entries);
678 	core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
679 				     idx, pa, attr);
680 }
681 
682 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
683 {
684 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
685 }
686 
687 static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area,
688 					 size_t idx)
689 {
690 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
691 }
692 
693 #ifdef CFG_PAGED_USER_TA
694 static void free_area(struct tee_pager_area *area)
695 {
696 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
697 				virt_to_phys(area->store)));
698 	if (area->type == AREA_TYPE_RW)
699 		free(area->u.rwp);
700 	free(area);
701 }
702 
703 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
704 			       size_t size)
705 {
706 	struct tee_pager_area *area;
707 	uint32_t flags;
708 	vaddr_t b = base;
709 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
710 
711 	if (!utc->areas) {
712 		utc->areas = malloc(sizeof(*utc->areas));
713 		if (!utc->areas)
714 			return false;
715 		TAILQ_INIT(utc->areas);
716 	}
717 
718 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
719 
720 	while (s) {
721 		size_t s2;
722 
723 		if (find_area(utc->areas, b))
724 			return false;
725 
726 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
727 
728 		/* Table info will be set when the context is activated. */
729 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
730 		if (!area)
731 			return false;
732 		TAILQ_INSERT_TAIL(utc->areas, area, link);
733 		b += s2;
734 		s -= s2;
735 	}
736 
737 	return true;
738 }
739 
740 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
741 {
742 	struct thread_specific_data *tsd = thread_get_tsd();
743 	struct tee_pager_area *area;
744 	struct core_mmu_table_info dir_info = { NULL };
745 
746 	if (&utc->ctx != tsd->ctx) {
747 		/*
748 		 * Changes are to an utc that isn't active. Just add the
749 		 * areas page tables will be dealt with later.
750 		 */
751 		return pager_add_uta_area(utc, base, size);
752 	}
753 
754 	/*
755 	 * Assign page tables before adding areas to be able to tell which
756 	 * are newly added and should be removed in case of failure.
757 	 */
758 	tee_pager_assign_uta_tables(utc);
759 	if (!pager_add_uta_area(utc, base, size)) {
760 		struct tee_pager_area *next_a;
761 
762 		/* Remove all added areas */
763 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
764 			if (!area->pgt) {
765 				TAILQ_REMOVE(utc->areas, area, link);
766 				free_area(area);
767 			}
768 		}
769 		return false;
770 	}
771 
772 	/*
773 	 * Assign page tables to the new areas and make sure that the page
774 	 * tables are registered in the upper table.
775 	 */
776 	tee_pager_assign_uta_tables(utc);
777 	core_mmu_get_user_pgdir(&dir_info);
778 	TAILQ_FOREACH(area, utc->areas, link) {
779 		paddr_t pa;
780 		size_t idx;
781 		uint32_t attr;
782 
783 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
784 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
785 
786 		/*
787 		 * Check if the page table already is used, if it is, it's
788 		 * already registered.
789 		 */
790 		if (area->pgt->num_used_entries) {
791 			assert(attr & TEE_MATTR_TABLE);
792 			assert(pa == virt_to_phys(area->pgt->tbl));
793 			continue;
794 		}
795 
796 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
797 		pa = virt_to_phys(area->pgt->tbl);
798 		assert(pa);
799 		/*
800 		 * Note that the update of the table entry is guaranteed to
801 		 * be atomic.
802 		 */
803 		core_mmu_set_entry(&dir_info, idx, pa, attr);
804 	}
805 
806 	return true;
807 }
808 
809 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
810 				   struct pgt *pgt)
811 {
812 	assert(pgt);
813 	ti->table = pgt->tbl;
814 	ti->va_base = pgt->vabase;
815 	ti->level = tee_pager_tbl_info.level;
816 	ti->shift = tee_pager_tbl_info.shift;
817 	ti->num_entries = tee_pager_tbl_info.num_entries;
818 }
819 
820 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
821 			   vaddr_t new_base)
822 {
823 	uint32_t exceptions = pager_lock_check_stack(64);
824 
825 	/*
826 	 * If there's no pgt assigned to the old area there's no pages to
827 	 * deal with either, just update with a new pgt and base.
828 	 */
829 	if (area->pgt) {
830 		struct core_mmu_table_info old_ti;
831 		struct core_mmu_table_info new_ti;
832 		struct tee_pager_pmem *pmem;
833 
834 		init_tbl_info_from_pgt(&old_ti, area->pgt);
835 		init_tbl_info_from_pgt(&new_ti, new_pgt);
836 
837 
838 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
839 			vaddr_t va;
840 			paddr_t pa;
841 			uint32_t attr;
842 
843 			if (pmem->area != area)
844 				continue;
845 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
846 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
847 
848 			assert(pa == get_pmem_pa(pmem));
849 			assert(attr);
850 			assert(area->pgt->num_used_entries);
851 			area->pgt->num_used_entries--;
852 
853 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
854 			va = va - area->base + new_base;
855 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
856 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
857 			new_pgt->num_used_entries++;
858 		}
859 	}
860 
861 	area->pgt = new_pgt;
862 	area->base = new_base;
863 	pager_unlock(exceptions);
864 }
865 KEEP_PAGER(transpose_area);
866 
867 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
868 				   vaddr_t src_base,
869 				   struct user_ta_ctx *dst_utc,
870 				   vaddr_t dst_base, struct pgt **dst_pgt,
871 				   size_t size)
872 {
873 	struct tee_pager_area *area;
874 	struct tee_pager_area *next_a;
875 
876 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
877 		vaddr_t new_area_base;
878 		size_t new_idx;
879 
880 		if (!core_is_buffer_inside(area->base, area->size,
881 					  src_base, size))
882 			continue;
883 
884 		TAILQ_REMOVE(src_utc->areas, area, link);
885 
886 		new_area_base = dst_base + (src_base - area->base);
887 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
888 			  CORE_MMU_PGDIR_SIZE;
889 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
890 		       dst_pgt[new_idx]->vabase);
891 		transpose_area(area, dst_pgt[new_idx], new_area_base);
892 
893 		/*
894 		 * Assert that this will not cause any conflicts in the new
895 		 * utc.  This should already be guaranteed, but a bug here
896 		 * could be tricky to find.
897 		 */
898 		assert(!find_area(dst_utc->areas, area->base));
899 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
900 	}
901 }
902 
903 static void rem_area(struct tee_pager_area_head *area_head,
904 		     struct tee_pager_area *area)
905 {
906 	struct tee_pager_pmem *pmem;
907 	uint32_t exceptions;
908 
909 	exceptions = pager_lock_check_stack(64);
910 
911 	TAILQ_REMOVE(area_head, area, link);
912 
913 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
914 		if (pmem->area == area) {
915 			area_set_entry(area, pmem->pgidx, 0, 0);
916 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
917 			pgt_dec_used_entries(area->pgt);
918 			pmem->area = NULL;
919 			pmem->pgidx = INVALID_PGIDX;
920 		}
921 	}
922 
923 	pager_unlock(exceptions);
924 	free_area(area);
925 }
926 KEEP_PAGER(rem_area);
927 
928 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
929 			      size_t size)
930 {
931 	struct tee_pager_area *area;
932 	struct tee_pager_area *next_a;
933 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
934 
935 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
936 		if (core_is_buffer_inside(area->base, area->size, base, s))
937 			rem_area(utc->areas, area);
938 	}
939 }
940 
941 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
942 {
943 	struct tee_pager_area *area;
944 
945 	if (!utc->areas)
946 		return;
947 
948 	while (true) {
949 		area = TAILQ_FIRST(utc->areas);
950 		if (!area)
951 			break;
952 		TAILQ_REMOVE(utc->areas, area, link);
953 		free_area(area);
954 	}
955 
956 	free(utc->areas);
957 }
958 
959 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
960 				 size_t size, uint32_t flags)
961 {
962 	bool ret;
963 	vaddr_t b = base;
964 	size_t s = size;
965 	size_t s2;
966 	struct tee_pager_area *area = find_area(utc->areas, b);
967 	uint32_t exceptions;
968 	struct tee_pager_pmem *pmem;
969 	paddr_t pa;
970 	uint32_t a;
971 	uint32_t f;
972 
973 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
974 	if (f & TEE_MATTR_UW)
975 		f |= TEE_MATTR_PW;
976 	f = get_area_mattr(f);
977 
978 	exceptions = pager_lock_check_stack(64);
979 
980 	while (s) {
981 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
982 		if (!area || area->base != b || area->size != s2) {
983 			ret = false;
984 			goto out;
985 		}
986 		b += s2;
987 		s -= s2;
988 
989 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
990 			if (pmem->area != area)
991 				continue;
992 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
993 			if (a & TEE_MATTR_VALID_BLOCK)
994 				assert(pa == get_pmem_pa(pmem));
995 			else
996 				pa = get_pmem_pa(pmem);
997 			if (a == f)
998 				continue;
999 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1000 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1001 			if (!(flags & TEE_MATTR_UW))
1002 				tee_pager_save_page(pmem, a);
1003 
1004 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
1005 			/*
1006 			 * Make sure the table update is visible before
1007 			 * continuing.
1008 			 */
1009 			dsb_ishst();
1010 
1011 			if (flags & TEE_MATTR_UX) {
1012 				void *va = (void *)area_idx2va(pmem->area,
1013 							       pmem->pgidx);
1014 
1015 				cache_op_inner(DCACHE_AREA_CLEAN, va,
1016 						SMALL_PAGE_SIZE);
1017 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
1018 						SMALL_PAGE_SIZE);
1019 			}
1020 		}
1021 
1022 		area->flags = f;
1023 		area = TAILQ_NEXT(area, link);
1024 	}
1025 
1026 	ret = true;
1027 out:
1028 	pager_unlock(exceptions);
1029 	return ret;
1030 }
1031 KEEP_PAGER(tee_pager_set_uta_area_attr);
1032 #endif /*CFG_PAGED_USER_TA*/
1033 
1034 static bool tee_pager_unhide_page(vaddr_t page_va)
1035 {
1036 	struct tee_pager_pmem *pmem;
1037 
1038 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1039 		paddr_t pa;
1040 		uint32_t attr;
1041 
1042 		if (pmem->pgidx == INVALID_PGIDX)
1043 			continue;
1044 
1045 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1046 
1047 		if (!(attr &
1048 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
1049 			continue;
1050 
1051 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
1052 			uint32_t a = get_area_mattr(pmem->area->flags);
1053 
1054 			/* page is hidden, show and move to back */
1055 			if (pa != get_pmem_pa(pmem))
1056 				panic("unexpected pa");
1057 
1058 			/*
1059 			 * If it's not a dirty block, then it should be
1060 			 * read only.
1061 			 */
1062 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
1063 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1064 			else
1065 				FMSG("Unhide %#" PRIxVA, page_va);
1066 
1067 			if (page_va == 0x8000a000)
1068 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
1069 					page_va, a);
1070 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
1071 			/*
1072 			 * Note that TLB invalidation isn't needed since
1073 			 * there wasn't a valid mapping before. We should
1074 			 * use a barrier though, to make sure that the
1075 			 * change is visible.
1076 			 */
1077 			dsb_ishst();
1078 
1079 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1080 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1081 			incr_hidden_hits();
1082 			return true;
1083 		}
1084 	}
1085 
1086 	return false;
1087 }
1088 
1089 static void tee_pager_hide_pages(void)
1090 {
1091 	struct tee_pager_pmem *pmem;
1092 	size_t n = 0;
1093 
1094 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1095 		paddr_t pa;
1096 		uint32_t attr;
1097 		uint32_t a;
1098 
1099 		if (n >= TEE_PAGER_NHIDE)
1100 			break;
1101 		n++;
1102 
1103 		/* we cannot hide pages when pmem->area is not defined. */
1104 		if (!pmem->area)
1105 			continue;
1106 
1107 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1108 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1109 			continue;
1110 
1111 		assert(pa == get_pmem_pa(pmem));
1112 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1113 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1114 			FMSG("Hide %#" PRIxVA,
1115 			     area_idx2va(pmem->area, pmem->pgidx));
1116 		} else
1117 			a = TEE_MATTR_HIDDEN_BLOCK;
1118 
1119 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1120 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1121 	}
1122 }
1123 
1124 /*
1125  * Find mapped pmem, hide and move to pageble pmem.
1126  * Return false if page was not mapped, and true if page was mapped.
1127  */
1128 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1129 				       vaddr_t page_va)
1130 {
1131 	struct tee_pager_pmem *pmem;
1132 	unsigned pgidx;
1133 	paddr_t pa;
1134 	uint32_t attr;
1135 
1136 	pgidx = area_va2idx(area, page_va);
1137 	area_get_entry(area, pgidx, &pa, &attr);
1138 
1139 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1140 
1141 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1142 		if (pmem->area != area || pmem->pgidx != pgidx)
1143 			continue;
1144 
1145 		assert(pa == get_pmem_pa(pmem));
1146 		area_set_entry(area, pgidx, 0, 0);
1147 		pgt_dec_used_entries(area->pgt);
1148 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1149 		pmem->area = NULL;
1150 		pmem->pgidx = INVALID_PGIDX;
1151 		tee_pager_npages++;
1152 		set_npages();
1153 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1154 		incr_zi_released();
1155 		return true;
1156 	}
1157 
1158 	return false;
1159 }
1160 
1161 /* Finds the oldest page and unmats it from its old virtual address */
1162 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1163 {
1164 	struct tee_pager_pmem *pmem;
1165 
1166 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1167 	if (!pmem) {
1168 		EMSG("No pmem entries");
1169 		return NULL;
1170 	}
1171 	if (pmem->pgidx != INVALID_PGIDX) {
1172 		uint32_t a;
1173 
1174 		assert(pmem->area && pmem->area->pgt);
1175 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1176 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1177 		pgt_dec_used_entries(pmem->area->pgt);
1178 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1179 		tee_pager_save_page(pmem, a);
1180 	}
1181 
1182 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1183 	pmem->pgidx = INVALID_PGIDX;
1184 	pmem->area = NULL;
1185 	if (area->type == AREA_TYPE_LOCK) {
1186 		/* Move page to lock list */
1187 		if (tee_pager_npages <= 0)
1188 			panic("running out of page");
1189 		tee_pager_npages--;
1190 		set_npages();
1191 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1192 	} else {
1193 		/* move page to back */
1194 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1195 	}
1196 
1197 	return pmem;
1198 }
1199 
1200 static bool pager_update_permissions(struct tee_pager_area *area,
1201 			struct abort_info *ai, bool *handled)
1202 {
1203 	unsigned int pgidx = area_va2idx(area, ai->va);
1204 	uint32_t attr;
1205 	paddr_t pa;
1206 
1207 	*handled = false;
1208 
1209 	area_get_entry(area, pgidx, &pa, &attr);
1210 
1211 	/* Not mapped */
1212 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1213 		return false;
1214 
1215 	/* Not readable, should not happen */
1216 	if (abort_is_user_exception(ai)) {
1217 		if (!(attr & TEE_MATTR_UR))
1218 			return true;
1219 	} else {
1220 		if (!(attr & TEE_MATTR_PR)) {
1221 			abort_print_error(ai);
1222 			panic();
1223 		}
1224 	}
1225 
1226 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1227 	case CORE_MMU_FAULT_TRANSLATION:
1228 	case CORE_MMU_FAULT_READ_PERMISSION:
1229 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1230 			/* Check attempting to execute from an NOX page */
1231 			if (abort_is_user_exception(ai)) {
1232 				if (!(attr & TEE_MATTR_UX))
1233 					return true;
1234 			} else {
1235 				if (!(attr & TEE_MATTR_PX)) {
1236 					abort_print_error(ai);
1237 					panic();
1238 				}
1239 			}
1240 		}
1241 		/* Since the page is mapped now it's OK */
1242 		break;
1243 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1244 		/* Check attempting to write to an RO page */
1245 		if (abort_is_user_exception(ai)) {
1246 			if (!(area->flags & TEE_MATTR_UW))
1247 				return true;
1248 			if (!(attr & TEE_MATTR_UW)) {
1249 				FMSG("Dirty %p",
1250 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1251 				area_set_entry(area, pgidx, pa,
1252 					       get_area_mattr(area->flags));
1253 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1254 			}
1255 
1256 		} else {
1257 			if (!(area->flags & TEE_MATTR_PW)) {
1258 				abort_print_error(ai);
1259 				panic();
1260 			}
1261 			if (!(attr & TEE_MATTR_PW)) {
1262 				FMSG("Dirty %p",
1263 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1264 				area_set_entry(area, pgidx, pa,
1265 					       get_area_mattr(area->flags));
1266 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1267 			}
1268 		}
1269 		/* Since permissions has been updated now it's OK */
1270 		break;
1271 	default:
1272 		/* Some fault we can't deal with */
1273 		if (abort_is_user_exception(ai))
1274 			return true;
1275 		abort_print_error(ai);
1276 		panic();
1277 	}
1278 	*handled = true;
1279 	return true;
1280 }
1281 
1282 #ifdef CFG_TEE_CORE_DEBUG
1283 static void stat_handle_fault(void)
1284 {
1285 	static size_t num_faults;
1286 	static size_t min_npages = SIZE_MAX;
1287 	static size_t total_min_npages = SIZE_MAX;
1288 
1289 	num_faults++;
1290 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1291 		DMSG("nfaults %zu npages %zu (min %zu)",
1292 		     num_faults, tee_pager_npages, min_npages);
1293 		min_npages = tee_pager_npages; /* reset */
1294 	}
1295 	if (tee_pager_npages < min_npages)
1296 		min_npages = tee_pager_npages;
1297 	if (tee_pager_npages < total_min_npages)
1298 		total_min_npages = tee_pager_npages;
1299 }
1300 #else
1301 static void stat_handle_fault(void)
1302 {
1303 }
1304 #endif
1305 
1306 bool tee_pager_handle_fault(struct abort_info *ai)
1307 {
1308 	struct tee_pager_area *area;
1309 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1310 	uint32_t exceptions;
1311 	bool ret;
1312 
1313 #ifdef TEE_PAGER_DEBUG_PRINT
1314 	abort_print(ai);
1315 #endif
1316 
1317 	/*
1318 	 * We're updating pages that can affect several active CPUs at a
1319 	 * time below. We end up here because a thread tries to access some
1320 	 * memory that isn't available. We have to be careful when making
1321 	 * that memory available as other threads may succeed in accessing
1322 	 * that address the moment after we've made it available.
1323 	 *
1324 	 * That means that we can't just map the memory and populate the
1325 	 * page, instead we use the aliased mapping to populate the page
1326 	 * and once everything is ready we map it.
1327 	 */
1328 	exceptions = pager_lock(ai);
1329 
1330 	stat_handle_fault();
1331 
1332 	/* check if the access is valid */
1333 	if (abort_is_user_exception(ai)) {
1334 		area = find_uta_area(ai->va);
1335 
1336 	} else {
1337 		area = find_area(&tee_pager_area_head, ai->va);
1338 		if (!area)
1339 			area = find_uta_area(ai->va);
1340 	}
1341 	if (!area || !area->pgt) {
1342 		ret = false;
1343 		goto out;
1344 	}
1345 
1346 	if (!tee_pager_unhide_page(page_va)) {
1347 		struct tee_pager_pmem *pmem = NULL;
1348 		uint32_t attr;
1349 		paddr_t pa;
1350 
1351 		/*
1352 		 * The page wasn't hidden, but some other core may have
1353 		 * updated the table entry before we got here or we need
1354 		 * to make a read-only page read-write (dirty).
1355 		 */
1356 		if (pager_update_permissions(area, ai, &ret)) {
1357 			/*
1358 			 * Nothing more to do with the abort. The problem
1359 			 * could already have been dealt with from another
1360 			 * core or if ret is false the TA will be paniced.
1361 			 */
1362 			goto out;
1363 		}
1364 
1365 		pmem = tee_pager_get_page(area);
1366 		if (!pmem) {
1367 			abort_print(ai);
1368 			panic();
1369 		}
1370 
1371 		/* load page code & data */
1372 		tee_pager_load_page(area, page_va, pmem->va_alias);
1373 
1374 
1375 		pmem->area = area;
1376 		pmem->pgidx = area_va2idx(area, ai->va);
1377 		attr = get_area_mattr(area->flags) &
1378 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1379 		pa = get_pmem_pa(pmem);
1380 
1381 		/*
1382 		 * We've updated the page using the aliased mapping and
1383 		 * some cache maintenence is now needed if it's an
1384 		 * executable page.
1385 		 *
1386 		 * Since the d-cache is a Physically-indexed,
1387 		 * physically-tagged (PIPT) cache we can clean either the
1388 		 * aliased address or the real virtual address. In this
1389 		 * case we choose the real virtual address.
1390 		 *
1391 		 * The i-cache can also be PIPT, but may be something else
1392 		 * too like VIPT. The current code requires the caches to
1393 		 * implement the IVIPT extension, that is:
1394 		 * "instruction cache maintenance is required only after
1395 		 * writing new data to a physical address that holds an
1396 		 * instruction."
1397 		 *
1398 		 * To portably invalidate the icache the page has to
1399 		 * be mapped at the final virtual address but not
1400 		 * executable.
1401 		 */
1402 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1403 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1404 					TEE_MATTR_PW | TEE_MATTR_UW;
1405 
1406 			/* Set a temporary read-only mapping */
1407 			area_set_entry(pmem->area, pmem->pgidx, pa,
1408 				       attr & ~mask);
1409 			tlbi_mva_allasid(page_va);
1410 
1411 			/*
1412 			 * Doing these operations to LoUIS (Level of
1413 			 * unification, Inner Shareable) would be enough
1414 			 */
1415 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1416 				       SMALL_PAGE_SIZE);
1417 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1418 				       SMALL_PAGE_SIZE);
1419 
1420 			/* Set the final mapping */
1421 			area_set_entry(area, pmem->pgidx, pa, attr);
1422 			tlbi_mva_allasid(page_va);
1423 		} else {
1424 			area_set_entry(area, pmem->pgidx, pa, attr);
1425 			/*
1426 			 * No need to flush TLB for this entry, it was
1427 			 * invalid. We should use a barrier though, to make
1428 			 * sure that the change is visible.
1429 			 */
1430 			dsb_ishst();
1431 		}
1432 		pgt_inc_used_entries(area->pgt);
1433 
1434 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1435 
1436 	}
1437 
1438 	tee_pager_hide_pages();
1439 	ret = true;
1440 out:
1441 	pager_unlock(exceptions);
1442 	return ret;
1443 }
1444 
1445 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1446 {
1447 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1448 	size_t n;
1449 
1450 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1451 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1452 
1453 	/* setup memory */
1454 	for (n = 0; n < npages; n++) {
1455 		struct tee_pager_pmem *pmem;
1456 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1457 		unsigned pgidx = core_mmu_va2idx(ti, va);
1458 		paddr_t pa;
1459 		uint32_t attr;
1460 
1461 		/*
1462 		 * Note that we can only support adding pages in the
1463 		 * valid range of this table info, currently not a problem.
1464 		 */
1465 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1466 
1467 		/* Ignore unmapped pages/blocks */
1468 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1469 			continue;
1470 
1471 		pmem = malloc(sizeof(struct tee_pager_pmem));
1472 		if (!pmem)
1473 			panic("out of mem");
1474 
1475 		pmem->va_alias = pager_add_alias_page(pa);
1476 
1477 		if (unmap) {
1478 			pmem->area = NULL;
1479 			pmem->pgidx = INVALID_PGIDX;
1480 			core_mmu_set_entry(ti, pgidx, 0, 0);
1481 			pgt_dec_used_entries(&pager_core_pgt);
1482 		} else {
1483 			/*
1484 			 * The page is still mapped, let's assign the area
1485 			 * and update the protection bits accordingly.
1486 			 */
1487 			pmem->area = find_area(&tee_pager_area_head, va);
1488 			assert(pmem->area->pgt == &pager_core_pgt);
1489 			pmem->pgidx = pgidx;
1490 			assert(pa == get_pmem_pa(pmem));
1491 			area_set_entry(pmem->area, pgidx, pa,
1492 				       get_area_mattr(pmem->area->flags));
1493 		}
1494 
1495 		tee_pager_npages++;
1496 		incr_npages_all();
1497 		set_npages();
1498 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1499 	}
1500 
1501 	/*
1502 	 * As this is done at inits, invalidate all TLBs once instead of
1503 	 * targeting only the modified entries.
1504 	 */
1505 	tlbi_all();
1506 }
1507 
1508 #ifdef CFG_PAGED_USER_TA
1509 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1510 {
1511 	struct pgt *p = pgt;
1512 
1513 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1514 		p = SLIST_NEXT(p, link);
1515 	return p;
1516 }
1517 
1518 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1519 {
1520 	struct tee_pager_area *area;
1521 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1522 
1523 	TAILQ_FOREACH(area, utc->areas, link) {
1524 		if (!area->pgt)
1525 			area->pgt = find_pgt(pgt, area->base);
1526 		else
1527 			assert(area->pgt == find_pgt(pgt, area->base));
1528 		if (!area->pgt)
1529 			panic();
1530 	}
1531 }
1532 
1533 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1534 {
1535 	uint32_t attr;
1536 
1537 	assert(pmem->area && pmem->area->pgt);
1538 
1539 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1540 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1541 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1542 	tee_pager_save_page(pmem, attr);
1543 	assert(pmem->area->pgt->num_used_entries);
1544 	pmem->area->pgt->num_used_entries--;
1545 	pmem->pgidx = INVALID_PGIDX;
1546 	pmem->area = NULL;
1547 }
1548 
1549 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1550 {
1551 	struct tee_pager_pmem *pmem;
1552 	struct tee_pager_area *area;
1553 	uint32_t exceptions = pager_lock_check_stack(2048);
1554 
1555 	if (!pgt->num_used_entries)
1556 		goto out;
1557 
1558 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1559 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1560 			continue;
1561 		if (pmem->area->pgt == pgt)
1562 			pager_save_and_release_entry(pmem);
1563 	}
1564 	assert(!pgt->num_used_entries);
1565 
1566 out:
1567 	if (is_user_ta_ctx(pgt->ctx)) {
1568 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1569 			if (area->pgt == pgt)
1570 				area->pgt = NULL;
1571 		}
1572 	}
1573 
1574 	pager_unlock(exceptions);
1575 }
1576 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1577 #endif /*CFG_PAGED_USER_TA*/
1578 
1579 void tee_pager_release_phys(void *addr, size_t size)
1580 {
1581 	bool unmaped = false;
1582 	vaddr_t va = (vaddr_t)addr;
1583 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1584 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1585 	struct tee_pager_area *area;
1586 	uint32_t exceptions;
1587 
1588 	if (end <= begin)
1589 		return;
1590 
1591 	area = find_area(&tee_pager_area_head, begin);
1592 	if (!area ||
1593 	    area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE))
1594 		panic();
1595 
1596 	exceptions = pager_lock_check_stack(128);
1597 
1598 	for (va = begin; va < end; va += SMALL_PAGE_SIZE)
1599 		unmaped |= tee_pager_release_one_phys(area, va);
1600 
1601 	if (unmaped)
1602 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1603 
1604 	pager_unlock(exceptions);
1605 }
1606 KEEP_PAGER(tee_pager_release_phys);
1607 
1608 void *tee_pager_alloc(size_t size, uint32_t flags)
1609 {
1610 	tee_mm_entry_t *mm;
1611 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1612 
1613 	if (!size)
1614 		return NULL;
1615 
1616 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1617 	if (!mm)
1618 		return NULL;
1619 
1620 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
1621 				f, NULL, NULL);
1622 
1623 	return (void *)tee_mm_get_smem(mm);
1624 }
1625